id
stringlengths
25
30
content
stringlengths
14
942k
max_stars_repo_path
stringlengths
49
55
crossvul-cpp_data_bad_4786_2
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII FFFFF FFFFF % % V V I F F % % V V I FFF FFF % % V V I F F % % V IIIII F F % % % % % % Read/Write Khoros Visualization Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" /* Forward declarations. */ static MagickBooleanType WriteVIFFImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s V I F F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsVIFF() returns MagickTrue if the image format type, identified by the % magick string, is VIFF. % % The format of the IsVIFF method is: % % MagickBooleanType IsVIFF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsVIFF(const unsigned char *magick,const size_t length) { if (length < 2) return(MagickFalse); if (memcmp(magick,"\253\001",2) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadVIFFImage() reads a Khoros Visualization image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadVIFFImage method is: % % Image *ReadVIFFImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadVIFFImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static Image *ReadVIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_ntscRGB 1 #define VFF_CM_NONE 0 #define VFF_DEP_DECORDER 0x4 #define VFF_DEP_NSORDER 0x8 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MAPTYP_2_BYTE 2 #define VFF_MAPTYP_4_BYTE 4 #define VFF_MAPTYP_FLOAT 5 #define VFF_MAPTYP_DOUBLE 7 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_MS_SHARED 3 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 #define VFF_TYP_2_BYTE 2 #define VFF_TYP_4_BYTE 4 #define VFF_TYP_FLOAT 5 #define VFF_TYP_DOUBLE 9 typedef struct _ViffInfo { unsigned char identifier, file_type, release, version, machine_dependency, reserve[3]; char comment[512]; unsigned int rows, columns, subrows; int x_offset, y_offset; float x_bits_per_pixel, y_bits_per_pixel; unsigned int location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; double min_value, scale_factor, value; Image *image; int bit; MagickBooleanType status; MagickSizeType number_pixels; register ssize_t x; register Quantum *q; register ssize_t i; register unsigned char *p; size_t bytes_per_pixel, max_packets, quantum; ssize_t count, y; unsigned char *pixels; unsigned long lsb_first; ViffInfo viff_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read VIFF header (1024 bytes). */ count=ReadBlob(image,1,&viff_info.identifier); do { /* Verify VIFF identifier. */ if ((count != 1) || ((unsigned char) viff_info.identifier != 0xab)) ThrowReaderException(CorruptImageError,"NotAVIFFImage"); /* Initialize VIFF image. */ (void) ReadBlob(image,sizeof(viff_info.file_type),&viff_info.file_type); (void) ReadBlob(image,sizeof(viff_info.release),&viff_info.release); (void) ReadBlob(image,sizeof(viff_info.version),&viff_info.version); (void) ReadBlob(image,sizeof(viff_info.machine_dependency), &viff_info.machine_dependency); (void) ReadBlob(image,sizeof(viff_info.reserve),viff_info.reserve); count=ReadBlob(image,512,(unsigned char *) viff_info.comment); viff_info.comment[511]='\0'; if (strlen(viff_info.comment) > 4) (void) SetImageProperty(image,"comment",viff_info.comment,exception); if ((viff_info.machine_dependency == VFF_DEP_DECORDER) || (viff_info.machine_dependency == VFF_DEP_NSORDER)) image->endian=LSBEndian; else image->endian=MSBEndian; viff_info.rows=ReadBlobLong(image); viff_info.columns=ReadBlobLong(image); viff_info.subrows=ReadBlobLong(image); viff_info.x_offset=(int) ReadBlobLong(image); viff_info.y_offset=(int) ReadBlobLong(image); viff_info.x_bits_per_pixel=(float) ReadBlobLong(image); viff_info.y_bits_per_pixel=(float) ReadBlobLong(image); viff_info.location_type=ReadBlobLong(image); viff_info.location_dimension=ReadBlobLong(image); viff_info.number_of_images=ReadBlobLong(image); viff_info.number_data_bands=ReadBlobLong(image); viff_info.data_storage_type=ReadBlobLong(image); viff_info.data_encode_scheme=ReadBlobLong(image); viff_info.map_scheme=ReadBlobLong(image); viff_info.map_storage_type=ReadBlobLong(image); viff_info.map_rows=ReadBlobLong(image); viff_info.map_columns=ReadBlobLong(image); viff_info.map_subrows=ReadBlobLong(image); viff_info.map_enable=ReadBlobLong(image); viff_info.maps_per_cycle=ReadBlobLong(image); viff_info.color_space_model=ReadBlobLong(image); for (i=0; i < 420; i++) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); image->columns=viff_info.rows; image->rows=viff_info.columns; image->depth=viff_info.x_bits_per_pixel <= 8 ? 8UL : MAGICKCORE_QUANTUM_DEPTH; /* Verify that we can read this VIFF image. */ number_pixels=(MagickSizeType) viff_info.columns*viff_info.rows; if (number_pixels != (size_t) number_pixels) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (number_pixels == 0) ThrowReaderException(CoderError,"ImageColumnOrRowSizeIsNotSupported"); if ((viff_info.number_data_bands < 1) || (viff_info.number_data_bands > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((viff_info.data_storage_type != VFF_TYP_BIT) && (viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.data_storage_type != VFF_TYP_2_BYTE) && (viff_info.data_storage_type != VFF_TYP_4_BYTE) && (viff_info.data_storage_type != VFF_TYP_FLOAT) && (viff_info.data_storage_type != VFF_TYP_DOUBLE)) ThrowReaderException(CoderError,"DataStorageTypeIsNotSupported"); if (viff_info.data_encode_scheme != VFF_DES_RAW) ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); if ((viff_info.map_storage_type != VFF_MAPTYP_NONE) && (viff_info.map_storage_type != VFF_MAPTYP_1_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_2_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_4_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_FLOAT) && (viff_info.map_storage_type != VFF_MAPTYP_DOUBLE)) ThrowReaderException(CoderError,"MapStorageTypeIsNotSupported"); if ((viff_info.color_space_model != VFF_CM_NONE) && (viff_info.color_space_model != VFF_CM_ntscRGB) && (viff_info.color_space_model != VFF_CM_genericRGB)) ThrowReaderException(CoderError,"ColorspaceModelIsNotSupported"); if (viff_info.location_type != VFF_LOC_IMPLICIT) ThrowReaderException(CoderError,"LocationTypeIsNotSupported"); if (viff_info.number_of_images != 1) ThrowReaderException(CoderError,"NumberOfImagesIsNotSupported"); if (viff_info.map_rows == 0) viff_info.map_scheme=VFF_MS_NONE; switch ((int) viff_info.map_scheme) { case VFF_MS_NONE: { if (viff_info.number_data_bands < 3) { /* Create linear color ramp. */ if (viff_info.data_storage_type == VFF_TYP_BIT) image->colors=2; else if (viff_info.data_storage_type == VFF_MAPTYP_1_BYTE) image->colors=256UL; else image->colors=image->depth <= 8 ? 256UL : 65536UL; status=AcquireImageColormap(image,image->colors,exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } break; } case VFF_MS_ONEPERBAND: case VFF_MS_SHARED: { unsigned char *viff_colormap; /* Allocate VIFF colormap. */ switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_1_BYTE: bytes_per_pixel=1; break; case VFF_MAPTYP_2_BYTE: bytes_per_pixel=2; break; case VFF_MAPTYP_4_BYTE: bytes_per_pixel=4; break; case VFF_MAPTYP_FLOAT: bytes_per_pixel=4; break; case VFF_MAPTYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } image->colors=viff_info.map_columns; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (viff_info.map_rows > (viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Read VIFF raster colormap. */ count=ReadBlob(image,bytes_per_pixel*image->colors*viff_info.map_rows, viff_colormap); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: { MSBOrderShort(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } case VFF_MAPTYP_4_BYTE: case VFF_MAPTYP_FLOAT: { MSBOrderLong(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } default: break; } for (i=0; i < (ssize_t) (viff_info.map_rows*image->colors); i++) { switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: value=1.0*((short *) viff_colormap)[i]; break; case VFF_MAPTYP_4_BYTE: value=1.0*((int *) viff_colormap)[i]; break; case VFF_MAPTYP_FLOAT: value=((float *) viff_colormap)[i]; break; case VFF_MAPTYP_DOUBLE: value=((double *) viff_colormap)[i]; break; default: value=1.0*viff_colormap[i]; break; } if (i < (ssize_t) image->colors) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) value); image->colormap[i].green= ScaleCharToQuantum((unsigned char) value); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) value); } else if (i < (ssize_t) (2*image->colors)) image->colormap[i % image->colors].green= ScaleCharToQuantum((unsigned char) value); else if (i < (ssize_t) (3*image->colors)) image->colormap[i % image->colors].blue= ScaleCharToQuantum((unsigned char) value); } viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); break; } default: ThrowReaderException(CoderError,"ColormapTypeNotSupported"); } /* Initialize image structure. */ image->alpha_trait=viff_info.number_data_bands == 4 ? BlendPixelTrait : UndefinedPixelTrait; image->storage_class=(viff_info.number_data_bands < 3 ? PseudoClass : DirectClass); image->columns=viff_info.rows; image->rows=viff_info.columns; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate VIFF pixels. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: bytes_per_pixel=2; break; case VFF_TYP_4_BYTE: bytes_per_pixel=4; break; case VFF_TYP_FLOAT: bytes_per_pixel=4; break; case VFF_TYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } if (viff_info.data_storage_type == VFF_TYP_BIT) max_packets=((image->columns+7UL) >> 3UL)*image->rows; else max_packets=(size_t) (number_pixels*viff_info.number_data_bands); pixels=(unsigned char *) AcquireQuantumMemory(MagickMax(number_pixels, max_packets),bytes_per_pixel*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,bytes_per_pixel*max_packets,pixels); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: { MSBOrderShort(pixels,bytes_per_pixel*max_packets); break; } case VFF_TYP_4_BYTE: case VFF_TYP_FLOAT: { MSBOrderLong(pixels,bytes_per_pixel*max_packets); break; } default: break; } min_value=0.0; scale_factor=1.0; if ((viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.map_scheme == VFF_MS_NONE)) { double max_value; /* Determine scale factor. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[0]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[0]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[0]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[0]; break; default: value=1.0*pixels[0]; break; } max_value=value; min_value=value; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (value > max_value) max_value=value; else if (value < min_value) min_value=value; } if ((min_value == 0) && (max_value == 0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); } /* Convert pixels to Quantum size. */ p=(unsigned char *) pixels; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (viff_info.map_scheme == VFF_MS_NONE) { value=(value-min_value)*scale_factor; if (value > QuantumRange) value=QuantumRange; else if (value < 0) value=0; } *p=(unsigned char) ((Quantum) value); p++; } /* Convert VIFF raster image to pixel packets. */ p=(unsigned char *) pixels; if (viff_info.data_storage_type == VFF_TYP_BIT) { /* Convert bitmap scanline. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) (image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q); if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) quantum,q); q+=GetPixelChannels(image); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (int) (image->columns % 8); bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q); if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) quantum,q); q+=GetPixelChannels(image); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->storage_class == PseudoClass) for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else { /* Convert DirectColor scanline. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p),q); SetPixelGreen(image,ScaleCharToQuantum(*(p+number_pixels)),q); SetPixelBlue(image,ScaleCharToQuantum(*(p+2*number_pixels)),q); if (image->colors != 0) { ssize_t index; index=(ssize_t) GetPixelRed(image,q); SetPixelRed(image,image->colormap[ ConstrainColormapIndex(image,index,exception)].red,q); index=(ssize_t) GetPixelGreen(image,q); SetPixelGreen(image,image->colormap[ ConstrainColormapIndex(image,index,exception)].green,q); index=(ssize_t) GetPixelBlue(image,q); SetPixelBlue(image,image->colormap[ ConstrainColormapIndex(image,index,exception)].blue,q); } SetPixelAlpha(image,image->alpha_trait != UndefinedPixelTrait ? ScaleCharToQuantum(*(p+number_pixels*3)) : OpaqueAlpha,q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (image->storage_class == PseudoClass) (void) SyncImage(image,exception); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; count=ReadBlob(image,1,&viff_info.identifier); if ((count != 0) && (viff_info.identifier == 0xab)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (viff_info.identifier == 0xab)); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterVIFFImage() adds properties for the VIFF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterVIFFImage method is: % % size_t RegisterVIFFImage(void) % */ ModuleExport size_t RegisterVIFFImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("VIFF","VIFF","Khoros Visualization image"); entry->decoder=(DecodeImageHandler *) ReadVIFFImage; entry->encoder=(EncodeImageHandler *) WriteVIFFImage; entry->magick=(IsImageFormatHandler *) IsVIFF; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("VIFF","XV","Khoros Visualization image"); entry->decoder=(DecodeImageHandler *) ReadVIFFImage; entry->encoder=(EncodeImageHandler *) WriteVIFFImage; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterVIFFImage() removes format registrations made by the % VIFF module from the list of supported formats. % % The format of the UnregisterVIFFImage method is: % % UnregisterVIFFImage(void) % */ ModuleExport void UnregisterVIFFImage(void) { (void) UnregisterMagickInfo("VIFF"); (void) UnregisterMagickInfo("XV"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteVIFFImage() writes an image to a file in the VIFF image format. % % The format of the WriteVIFFImage method is: % % MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_NONE 0 #define VFF_DEP_IEEEORDER 0x2 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 typedef struct _ViffInfo { char identifier, file_type, release, version, machine_dependency, reserve[3], comment[512]; size_t rows, columns, subrows; int x_offset, y_offset; unsigned int x_bits_per_pixel, y_bits_per_pixel, location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; const char *value; MagickBooleanType status; MagickOffsetType scene; MagickSizeType number_pixels, packets; MemoryInfo *pixel_info; register const Quantum *p; register ssize_t x; register ssize_t i; register unsigned char *q; ssize_t y; unsigned char *pixels; ViffInfo viff_info; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) ResetMagickMemory(&viff_info,0,sizeof(ViffInfo)); scene=0; do { /* Initialize VIFF image structure. */ (void) TransformImageColorspace(image,sRGBColorspace,exception); DisableMSCWarning(4310) viff_info.identifier=(char) 0xab; RestoreMSCWarning viff_info.file_type=1; viff_info.release=1; viff_info.version=3; viff_info.machine_dependency=VFF_DEP_IEEEORDER; /* IEEE byte ordering */ *viff_info.comment='\0'; value=GetImageProperty(image,"comment",exception); if (value != (const char *) NULL) (void) CopyMagickString(viff_info.comment,value,MagickMin(strlen(value), 511)+1); viff_info.rows=image->columns; viff_info.columns=image->rows; viff_info.subrows=0; viff_info.x_offset=(~0); viff_info.y_offset=(~0); viff_info.x_bits_per_pixel=0; viff_info.y_bits_per_pixel=0; viff_info.location_type=VFF_LOC_IMPLICIT; viff_info.location_dimension=0; viff_info.number_of_images=1; viff_info.data_encode_scheme=VFF_DES_RAW; viff_info.map_scheme=VFF_MS_NONE; viff_info.map_storage_type=VFF_MAPTYP_NONE; viff_info.map_rows=0; viff_info.map_columns=0; viff_info.map_subrows=0; viff_info.map_enable=1; /* no colormap */ viff_info.maps_per_cycle=0; number_pixels=(MagickSizeType) image->columns*image->rows; if (image->storage_class == DirectClass) { /* Full color VIFF raster. */ viff_info.number_data_bands=image->alpha_trait ? 4U : 3U; viff_info.color_space_model=VFF_CM_genericRGB; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=viff_info.number_data_bands*number_pixels; } else { viff_info.number_data_bands=1; viff_info.color_space_model=VFF_CM_NONE; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=number_pixels; if (SetImageGray(image,exception) == MagickFalse) { /* Colormapped VIFF raster. */ viff_info.map_scheme=VFF_MS_ONEPERBAND; viff_info.map_storage_type=VFF_MAPTYP_1_BYTE; viff_info.map_rows=3; viff_info.map_columns=(unsigned int) image->colors; } else if (image->colors <= 2) { /* Monochrome VIFF raster. */ viff_info.data_storage_type=VFF_TYP_BIT; packets=((image->columns+7) >> 3)*image->rows; } } /* Write VIFF image header (pad to 1024 bytes). */ (void) WriteBlob(image,sizeof(viff_info.identifier),(unsigned char *) &viff_info.identifier); (void) WriteBlob(image,sizeof(viff_info.file_type),(unsigned char *) &viff_info.file_type); (void) WriteBlob(image,sizeof(viff_info.release),(unsigned char *) &viff_info.release); (void) WriteBlob(image,sizeof(viff_info.version),(unsigned char *) &viff_info.version); (void) WriteBlob(image,sizeof(viff_info.machine_dependency), (unsigned char *) &viff_info.machine_dependency); (void) WriteBlob(image,sizeof(viff_info.reserve),(unsigned char *) viff_info.reserve); (void) WriteBlob(image,512,(unsigned char *) viff_info.comment); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_offset); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_offset); viff_info.x_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_bits_per_pixel); viff_info.y_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_bits_per_pixel); (void) WriteBlobMSBLong(image,viff_info.location_type); (void) WriteBlobMSBLong(image,viff_info.location_dimension); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_of_images); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_data_bands); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_encode_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_enable); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.maps_per_cycle); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.color_space_model); for (i=0; i < 420; i++) (void) WriteBlobByte(image,'\0'); /* Convert MIFF to VIFF raster pixels. */ pixel_info=AcquireVirtualMemory((size_t) packets,sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); q=pixels; if (image->storage_class == DirectClass) { /* Convert DirectClass packet to VIFF RGB pixel. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q=ScaleQuantumToChar(GetPixelRed(image,p)); *(q+number_pixels)=ScaleQuantumToChar(GetPixelGreen(image,p)); *(q+number_pixels*2)=ScaleQuantumToChar(GetPixelBlue(image,p)); if (image->alpha_trait != UndefinedPixelTrait) *(q+number_pixels*3)=ScaleQuantumToChar((Quantum) (GetPixelAlpha(image,p))); p+=GetPixelChannels(image); q++; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (SetImageGray(image,exception) == MagickFalse) { unsigned char *viff_colormap; /* Dump colormap to file. */ viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, 3*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); q=viff_colormap; for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].red); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].green); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].blue); (void) WriteBlob(image,3*image->colors,viff_colormap); viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); /* Convert PseudoClass packet to VIFF colormapped pixels. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) GetPixelIndex(image,p); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->colors <= 2) { ssize_t x, y; register unsigned char bit, byte; /* Convert PseudoClass image to a VIFF monochrome image. */ (void) SetImageType(image,BilevelType,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte>>=1; if (GetPixelLuma(image,p) < (QuantumRange/2.0)) byte|=0x80; bit++; if (bit == 8) { *q++=byte; bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) *q++=byte >> (8-bit); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else { /* Convert PseudoClass packet to VIFF grayscale pixel. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) ClampToQuantum(GetPixelLuma(image,p)); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } (void) WriteBlob(image,(size_t) packets,pixels); pixel_info=RelinquishVirtualMemory(pixel_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4786_2
crossvul-cpp_data_good_5198_0
/* * linux/fs/open.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/string.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fsnotify.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/namei.h> #include <linux/backing-dev.h> #include <linux/capability.h> #include <linux/securebits.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> #include <linux/audit.h> #include <linux/falloc.h> #include <linux/fs_struct.h> #include <linux/ima.h> #include <linux/dnotify.h> #include <linux/compat.h> #include "internal.h" int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, struct file *filp) { int ret; struct iattr newattrs; /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */ if (length < 0) return -EINVAL; newattrs.ia_size = length; newattrs.ia_valid = ATTR_SIZE | time_attrs; if (filp) { newattrs.ia_file = filp; newattrs.ia_valid |= ATTR_FILE; } /* Remove suid, sgid, and file capabilities on truncate too */ ret = dentry_needs_remove_privs(dentry); if (ret < 0) return ret; if (ret) newattrs.ia_valid |= ret | ATTR_FORCE; inode_lock(dentry->d_inode); /* Note any delegations or leases have already been broken: */ ret = notify_change(dentry, &newattrs, NULL); inode_unlock(dentry->d_inode); return ret; } long vfs_truncate(struct path *path, loff_t length) { struct inode *inode; long error; inode = path->dentry->d_inode; /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode)) return -EINVAL; error = mnt_want_write(path->mnt); if (error) goto out; error = inode_permission(inode, MAY_WRITE); if (error) goto mnt_drop_write_and_out; error = -EPERM; if (IS_APPEND(inode)) goto mnt_drop_write_and_out; error = get_write_access(inode); if (error) goto mnt_drop_write_and_out; /* * Make sure that there are no leases. get_write_access() protects * against the truncate racing with a lease-granting setlease(). */ error = break_lease(inode, O_WRONLY); if (error) goto put_write_and_out; error = locks_verify_truncate(inode, NULL, length); if (!error) error = security_path_truncate(path); if (!error) error = do_truncate(path->dentry, length, 0, NULL); put_write_and_out: put_write_access(inode); mnt_drop_write_and_out: mnt_drop_write(path->mnt); out: return error; } EXPORT_SYMBOL_GPL(vfs_truncate); static long do_sys_truncate(const char __user *pathname, loff_t length) { unsigned int lookup_flags = LOOKUP_FOLLOW; struct path path; int error; if (length < 0) /* sorry, but loff_t says... */ return -EINVAL; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (!error) { error = vfs_truncate(&path, length); path_put(&path); } if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) { return do_sys_truncate(path, length); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length) { return do_sys_truncate(path, length); } #endif static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) { struct inode *inode; struct dentry *dentry; struct fd f; int error; error = -EINVAL; if (length < 0) goto out; error = -EBADF; f = fdget(fd); if (!f.file) goto out; /* explicitly opened as large or we are on 64-bit box */ if (f.file->f_flags & O_LARGEFILE) small = 0; dentry = f.file->f_path.dentry; inode = dentry->d_inode; error = -EINVAL; if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE)) goto out_putf; error = -EINVAL; /* Cannot ftruncate over 2^31 bytes without large file support */ if (small && length > MAX_NON_LFS) goto out_putf; error = -EPERM; if (IS_APPEND(inode)) goto out_putf; sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, f.file, length); if (!error) error = security_path_truncate(&f.file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file); sb_end_write(inode->i_sb); out_putf: fdput(f); out: return error; } SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length) { return do_sys_ftruncate(fd, length, 1); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length) { return do_sys_ftruncate(fd, length, 1); } #endif /* LFS versions of truncate are only needed on 32 bit machines */ #if BITS_PER_LONG == 32 SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length) { return do_sys_truncate(path, length); } SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length) { return do_sys_ftruncate(fd, length, 0); } #endif /* BITS_PER_LONG == 32 */ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; /* Return error if mode is not supported */ if (mode & ~FALLOC_FL_SUPPORTED_MASK) return -EOPNOTSUPP; /* Punch hole and zero range are mutually exclusive */ if ((mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) == (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; /* Punch hole must have keep size set */ if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; /* Collapse range should only be used exclusively. */ if ((mode & FALLOC_FL_COLLAPSE_RANGE) && (mode & ~FALLOC_FL_COLLAPSE_RANGE)) return -EINVAL; /* Insert range should only be used exclusively. */ if ((mode & FALLOC_FL_INSERT_RANGE) && (mode & ~FALLOC_FL_INSERT_RANGE)) return -EINVAL; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* * We can only allow pure fallocate on append only files */ if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * We cannot allow any fallocate operation on an active swapfile */ if (IS_SWAPFILE(inode)) return -ETXTBSY; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; /* * Let individual file system decide if it supports preallocation * for directories or not. */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; sb_start_write(inode->i_sb); ret = file->f_op->fallocate(file, mode, offset, len); /* * Create inotify and fanotify events. * * To keep the logic simple always create events if fallocate succeeds. * This implies that events are even created if the file size remains * unchanged, e.g. when using flag FALLOC_FL_KEEP_SIZE. */ if (ret == 0) fsnotify_modify(file); sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL_GPL(vfs_fallocate); SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) { struct fd f = fdget(fd); int error = -EBADF; if (f.file) { error = vfs_fallocate(f.file, mode, offset, len); fdput(f); } return error; } /* * access() needs to use the real uid/gid, not the effective uid/gid. * We do this by temporarily clearing all FS-related capabilities and * switching the fsuid/fsgid around to the real ones. */ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) { const struct cred *old_cred; struct cred *override_cred; struct path path; struct inode *inode; int res; unsigned int lookup_flags = LOOKUP_FOLLOW; if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; override_cred = prepare_creds(); if (!override_cred) return -ENOMEM; override_cred->fsuid = override_cred->uid; override_cred->fsgid = override_cred->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { /* Clear the capabilities if we switch to a non-root user */ kuid_t root_uid = make_kuid(override_cred->user_ns, 0); if (!uid_eq(override_cred->uid, root_uid)) cap_clear(override_cred->cap_effective); else override_cred->cap_effective = override_cred->cap_permitted; } old_cred = override_creds(override_cred); retry: res = user_path_at(dfd, filename, lookup_flags, &path); if (res) goto out; inode = d_backing_inode(path.dentry); if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) { /* * MAY_EXEC on regular files is denied if the fs is mounted * with the "noexec" flag. */ res = -EACCES; if (path_noexec(&path)) goto out_path_release; } res = inode_permission(inode, mode | MAY_ACCESS); /* SuS v2 requires we report a read only fs too */ if (res || !(mode & S_IWOTH) || special_file(inode->i_mode)) goto out_path_release; /* * This is a rare case where using __mnt_is_readonly() * is OK without a mnt_want/drop_write() pair. Since * no actual write to the fs is performed here, we do * not need to telegraph to that to anyone. * * By doing this, we accept that this access is * inherently racy and know that the fs may change * state before we even see this result. */ if (__mnt_is_readonly(path.mnt)) res = -EROFS; out_path_release: path_put(&path); if (retry_estale(res, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: revert_creds(old_cred); put_cred(override_cred); return res; } SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) { return sys_faccessat(AT_FDCWD, filename, mode); } SYSCALL_DEFINE1(chdir, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; set_fs_pwd(current->fs, &path); dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE1(fchdir, unsigned int, fd) { struct fd f = fdget_raw(fd); struct inode *inode; int error = -EBADF; error = -EBADF; if (!f.file) goto out; inode = file_inode(f.file); error = -ENOTDIR; if (!S_ISDIR(inode->i_mode)) goto out_putf; error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); if (!error) set_fs_pwd(current->fs, &f.file->f_path); out_putf: fdput(f); out: return error; } SYSCALL_DEFINE1(chroot, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; error = -EPERM; if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT)) goto dput_and_out; error = security_path_chroot(&path); if (error) goto dput_and_out; set_fs_root(current->fs, &path); error = 0; dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } static int chmod_common(struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; struct iattr newattrs; int error; error = mnt_want_write(path->mnt); if (error) return error; retry_deleg: inode_lock(inode); error = security_path_chmod(path, mode); if (error) goto out_unlock; newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; error = notify_change(path->dentry, &newattrs, &delegated_inode); out_unlock: inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path->mnt); return error; } SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) { struct fd f = fdget(fd); int err = -EBADF; if (f.file) { audit_file(f.file); err = chmod_common(&f.file->f_path, mode); fdput(f); } return err; } SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (!error) { error = chmod_common(&path, mode); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; } SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) { return sys_fchmodat(AT_FDCWD, filename, mode); } static int chown_common(struct path *path, uid_t user, gid_t group) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; int error; struct iattr newattrs; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); retry_deleg: newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { if (!uid_valid(uid)) return -EINVAL; newattrs.ia_valid |= ATTR_UID; newattrs.ia_uid = uid; } if (group != (gid_t) -1) { if (!gid_valid(gid)) return -EINVAL; newattrs.ia_valid |= ATTR_GID; newattrs.ia_gid = gid; } if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; inode_lock(inode); error = security_path_chown(path, uid, gid); if (!error) error = notify_change(path->dentry, &newattrs, &delegated_inode); inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } return error; } SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag) { struct path path; int error = -EINVAL; int lookup_flags; if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) goto out; lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; if (flag & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; error = mnt_want_write(path.mnt); if (error) goto out_release; error = chown_common(&path, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) { return sys_fchownat(AT_FDCWD, filename, user, group, 0); } SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group) { return sys_fchownat(AT_FDCWD, filename, user, group, AT_SYMLINK_NOFOLLOW); } SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) { struct fd f = fdget(fd); int error = -EBADF; if (!f.file) goto out; error = mnt_want_write_file(f.file); if (error) goto out_fput; audit_file(f.file); error = chown_common(&f.file->f_path, user, group); mnt_drop_write_file(f.file); out_fput: fdput(f); out: return error; } int open_check_o_direct(struct file *f) { /* NB: we're sure to have correct a_ops only after f_op->open */ if (f->f_flags & O_DIRECT) { if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO) return -EINVAL; } return 0; } static int do_dentry_open(struct file *f, struct inode *inode, int (*open)(struct inode *, struct file *), const struct cred *cred) { static const struct file_operations empty_fops = {}; int error; f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; path_get(&f->f_path); f->f_inode = inode; f->f_mapping = inode->i_mapping; if (unlikely(f->f_flags & O_PATH)) { f->f_mode = FMODE_PATH; f->f_op = &empty_fops; return 0; } if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { error = get_write_access(inode); if (unlikely(error)) goto cleanup_file; error = __mnt_want_write(f->f_path.mnt); if (unlikely(error)) { put_write_access(inode); goto cleanup_file; } f->f_mode |= FMODE_WRITER; } /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ if (S_ISREG(inode->i_mode)) f->f_mode |= FMODE_ATOMIC_POS; f->f_op = fops_get(inode->i_fop); if (unlikely(WARN_ON(!f->f_op))) { error = -ENODEV; goto cleanup_all; } error = security_file_open(f, cred); if (error) goto cleanup_all; error = break_lease(inode, f->f_flags); if (error) goto cleanup_all; if (!open) open = f->f_op->open; if (open) { error = open(inode, f); if (error) goto cleanup_all; } if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(inode); if ((f->f_mode & FMODE_READ) && likely(f->f_op->read || f->f_op->read_iter)) f->f_mode |= FMODE_CAN_READ; if ((f->f_mode & FMODE_WRITE) && likely(f->f_op->write || f->f_op->write_iter)) f->f_mode |= FMODE_CAN_WRITE; f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); return 0; cleanup_all: fops_put(f->f_op); if (f->f_mode & FMODE_WRITER) { put_write_access(inode); __mnt_drop_write(f->f_path.mnt); } cleanup_file: path_put(&f->f_path); f->f_path.mnt = NULL; f->f_path.dentry = NULL; f->f_inode = NULL; return error; } /** * finish_open - finish opening a file * @file: file pointer * @dentry: pointer to dentry * @open: open callback * @opened: state of open * * This can be used to finish opening a file passed to i_op->atomic_open(). * * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. * * NB: the dentry reference is _not_ consumed. If, for example, the dentry is * the return value of d_splice_alias(), then the caller needs to perform dput() * on it after finish_open(). * * On successful return @file is a fully instantiated open file. After this, if * an error occurs in ->atomic_open(), it needs to clean up with fput(). * * Returns zero on success or -errno if the open failed. */ int finish_open(struct file *file, struct dentry *dentry, int (*open)(struct inode *, struct file *), int *opened) { int error; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ file->f_path.dentry = dentry; error = do_dentry_open(file, d_backing_inode(dentry), open, current_cred()); if (!error) *opened |= FILE_OPENED; return error; } EXPORT_SYMBOL(finish_open); /** * finish_no_open - finish ->atomic_open() without opening the file * * @file: file pointer * @dentry: dentry or NULL (as returned from ->lookup()) * * This can be used to set the result of a successful lookup in ->atomic_open(). * * NB: unlike finish_open() this function does consume the dentry reference and * the caller need not dput() it. * * Returns "1" which must be the return value of ->atomic_open() after having * called this function. */ int finish_no_open(struct file *file, struct dentry *dentry) { file->f_path.dentry = dentry; return 1; } EXPORT_SYMBOL(finish_no_open); char *file_path(struct file *filp, char *buf, int buflen) { return d_path(&filp->f_path, buf, buflen); } EXPORT_SYMBOL(file_path); /** * vfs_open - open the file at the given path * @path: path to open * @file: newly allocated file with f_flag initialized * @cred: credentials to use */ int vfs_open(const struct path *path, struct file *file, const struct cred *cred) { struct inode *inode = vfs_select_inode(path->dentry, file->f_flags); if (IS_ERR(inode)) return PTR_ERR(inode); file->f_path = *path; return do_dentry_open(file, inode, NULL, cred); } struct file *dentry_open(const struct path *path, int flags, const struct cred *cred) { int error; struct file *f; validate_creds(cred); /* We must always pass in a valid mount pointer. */ BUG_ON(!path->mnt); f = get_empty_filp(); if (!IS_ERR(f)) { f->f_flags = flags; error = vfs_open(path, f, cred); if (!error) { /* from now on we need fput() to dispose of f */ error = open_check_o_direct(f); if (error) { fput(f); f = ERR_PTR(error); } } else { put_filp(f); f = ERR_PTR(error); } } return f; } EXPORT_SYMBOL(dentry_open); static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) { int lookup_flags = 0; int acc_mode = ACC_MODE(flags); if (flags & (O_CREAT | __O_TMPFILE)) op->mode = (mode & S_IALLUGO) | S_IFREG; else op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC; /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only * check for O_DSYNC if the need any syncing at all we enforce it's * always set instead of having to deal with possibly weird behaviour * for malicious applications setting only __O_SYNC. */ if (flags & __O_SYNC) flags |= O_DSYNC; if (flags & __O_TMPFILE) { if ((flags & O_TMPFILE_MASK) != O_TMPFILE) return -EINVAL; if (!(acc_mode & MAY_WRITE)) return -EINVAL; } else if (flags & O_PATH) { /* * If we have O_PATH in the open flag. Then we * cannot have anything other than the below set of flags */ flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; acc_mode = 0; } op->open_flag = flags; /* O_TRUNC implies we need access checks for write permissions */ if (flags & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flags & O_APPEND) acc_mode |= MAY_APPEND; op->acc_mode = acc_mode; op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN; if (flags & O_CREAT) { op->intent |= LOOKUP_CREATE; if (flags & O_EXCL) op->intent |= LOOKUP_EXCL; } if (flags & O_DIRECTORY) lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; op->lookup_flags = lookup_flags; return 0; } /** * file_open_name - open file and return file pointer * * @name: struct filename containing path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *file_open_name(struct filename *name, int flags, umode_t mode) { struct open_flags op; int err = build_open_flags(flags, mode, &op); return err ? ERR_PTR(err) : do_filp_open(AT_FDCWD, name, &op); } /** * filp_open - open file and return file pointer * * @filename: path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *filp_open(const char *filename, int flags, umode_t mode) { struct filename *name = getname_kernel(filename); struct file *file = ERR_CAST(name); if (!IS_ERR(name)) { file = file_open_name(name, flags, mode); putname(name); } return file; } EXPORT_SYMBOL(filp_open); struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *filename, int flags, umode_t mode) { struct open_flags op; int err = build_open_flags(flags, mode, &op); if (err) return ERR_PTR(err); return do_file_open_root(dentry, mnt, filename, &op); } EXPORT_SYMBOL(file_open_root); long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int fd = build_open_flags(flags, mode, &op); struct filename *tmp; if (fd) return fd; tmp = getname(filename); if (IS_ERR(tmp)) return PTR_ERR(tmp); fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); } } putname(tmp); return fd; } SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(AT_FDCWD, filename, flags, mode); } SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(dfd, filename, flags, mode); } #ifndef __alpha__ /* * For backward compatibility? Maybe this should be moved * into arch/i386 instead? */ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode); } #endif /* * "id" is the POSIX thread ID. We use the * files pointer for this.. */ int filp_close(struct file *filp, fl_owner_t id) { int retval = 0; if (!file_count(filp)) { printk(KERN_ERR "VFS: Close: file count is 0\n"); return 0; } if (filp->f_op->flush) retval = filp->f_op->flush(filp, id); if (likely(!(filp->f_mode & FMODE_PATH))) { dnotify_flush(filp, id); locks_remove_posix(filp, id); } fput(filp); return retval; } EXPORT_SYMBOL(filp_close); /* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { int retval = __close_fd(current->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; } EXPORT_SYMBOL(sys_close); /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. */ SYSCALL_DEFINE0(vhangup) { if (capable(CAP_SYS_TTY_CONFIG)) { tty_vhangup_self(); return 0; } return -EPERM; } /* * Called when an inode is about to be open. * We use this to disallow opening large files on 32bit systems if * the caller didn't specify O_LARGEFILE. On 64bit systems we force * on this flag in sys_open. */ int generic_file_open(struct inode * inode, struct file * filp) { if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EOVERFLOW; return 0; } EXPORT_SYMBOL(generic_file_open); /* * This is used by subsystems that don't want seekable * file descriptors. The function is not supposed to ever fail, the only * reason it returns an 'int' and not 'void' is so that it can be plugged * directly into file_operations structure. */ int nonseekable_open(struct inode *inode, struct file *filp) { filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } EXPORT_SYMBOL(nonseekable_open);
./CrossVul/dataset_final_sorted/CWE-284/c/good_5198_0
crossvul-cpp_data_bad_5092_0
/* * socket.c * * Copyright (c) 2012 Martin Szulecki All Rights Reserved. * Copyright (c) 2012 Nikias Bassen All Rights Reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <sys/stat.h> #ifdef WIN32 #include <winsock2.h> #include <windows.h> static int wsa_init = 0; #else #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <netdb.h> #include <arpa/inet.h> #endif #include "socket.h" #define RECV_TIMEOUT 20000 static int verbose = 0; void socket_set_verbose(int level) { verbose = level; } #ifndef WIN32 int socket_create_unix(const char *filename) { struct sockaddr_un name; int sock; size_t size; // remove if still present unlink(filename); /* Create the socket. */ sock = socket(PF_LOCAL, SOCK_STREAM, 0); if (sock < 0) { perror("socket"); return -1; } /* Bind a name to the socket. */ name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = '\0'; /* The size of the address is the offset of the start of the filename, plus its length, plus one for the terminating null byte. Alternatively you can just do: size = SUN_LEN (&name); */ size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (bind(sock, (struct sockaddr *) &name, size) < 0) { perror("bind"); socket_close(sock); return -1; } if (listen(sock, 10) < 0) { perror("listen"); socket_close(sock); return -1; } return sock; } int socket_connect_unix(const char *filename) { struct sockaddr_un name; int sfd = -1; size_t size; struct stat fst; // check if socket file exists... if (stat(filename, &fst) != 0) { if (verbose >= 2) fprintf(stderr, "%s: stat '%s': %s\n", __func__, filename, strerror(errno)); return -1; } // ... and if it is a unix domain socket if (!S_ISSOCK(fst.st_mode)) { if (verbose >= 2) fprintf(stderr, "%s: File '%s' is not a socket!\n", __func__, filename); return -1; } // make a new socket if ((sfd = socket(PF_LOCAL, SOCK_STREAM, 0)) < 0) { if (verbose >= 2) fprintf(stderr, "%s: socket: %s\n", __func__, strerror(errno)); return -1; } // and connect to 'filename' name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = 0; size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (connect(sfd, (struct sockaddr *) &name, size) < 0) { socket_close(sfd); if (verbose >= 2) fprintf(stderr, "%s: connect: %s\n", __func__, strerror(errno)); return -1; } return sfd; } #endif int socket_create(uint16_t port) { int sfd = -1; int yes = 1; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif struct sockaddr_in saddr; if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = htonl(INADDR_ANY); saddr.sin_port = htons(port); if (0 > bind(sfd, (struct sockaddr *) &saddr, sizeof(saddr))) { perror("bind()"); socket_close(sfd); return -1; } if (listen(sfd, 1) == -1) { perror("listen()"); socket_close(sfd); return -1; } return sfd; } int socket_connect(const char *addr, uint16_t port) { int sfd = -1; int yes = 1; struct hostent *hp; struct sockaddr_in saddr; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif if (!addr) { errno = EINVAL; return -1; } if ((hp = gethostbyname(addr)) == NULL) { if (verbose >= 2) fprintf(stderr, "%s: unknown host '%s'\n", __func__, addr); return -1; } if (!hp->h_addr) { if (verbose >= 2) fprintf(stderr, "%s: gethostbyname returned NULL address!\n", __func__); return -1; } if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = *(uint32_t *) hp->h_addr; saddr.sin_port = htons(port); if (connect(sfd, (struct sockaddr *) &saddr, sizeof(saddr)) < 0) { perror("connect"); socket_close(sfd); return -2; } return sfd; } int socket_check_fd(int fd, fd_mode fdm, unsigned int timeout) { fd_set fds; int sret; int eagain; struct timeval to; struct timeval *pto; if (fd <= 0) { if (verbose >= 2) fprintf(stderr, "ERROR: invalid fd in check_fd %d\n", fd); return -1; } FD_ZERO(&fds); FD_SET(fd, &fds); if (timeout > 0) { to.tv_sec = (time_t) (timeout / 1000); to.tv_usec = (time_t) ((timeout - (to.tv_sec * 1000)) * 1000); pto = &to; } else { pto = NULL; } sret = -1; do { eagain = 0; switch (fdm) { case FDM_READ: sret = select(fd + 1, &fds, NULL, NULL, pto); break; case FDM_WRITE: sret = select(fd + 1, NULL, &fds, NULL, pto); break; case FDM_EXCEPT: sret = select(fd + 1, NULL, NULL, &fds, pto); break; default: return -1; } if (sret < 0) { switch (errno) { case EINTR: // interrupt signal in select if (verbose >= 2) fprintf(stderr, "%s: EINTR\n", __func__); eagain = 1; break; case EAGAIN: if (verbose >= 2) fprintf(stderr, "%s: EAGAIN\n", __func__); break; default: if (verbose >= 2) fprintf(stderr, "%s: select failed: %s\n", __func__, strerror(errno)); return -1; } } } while (eagain); return sret; } int socket_accept(int fd, uint16_t port) { #ifdef WIN32 int addr_len; #else socklen_t addr_len; #endif int result; struct sockaddr_in addr; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = htons(port); addr_len = sizeof(addr); result = accept(fd, (struct sockaddr*)&addr, &addr_len); return result; } int socket_shutdown(int fd, int how) { return shutdown(fd, how); } int socket_close(int fd) { #ifdef WIN32 return closesocket(fd); #else return close(fd); #endif } int socket_receive(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, 0, RECV_TIMEOUT); } int socket_peek(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, MSG_PEEK, RECV_TIMEOUT); } int socket_receive_timeout(int fd, void *data, size_t length, int flags, unsigned int timeout) { int res; int result; // check if data is available res = socket_check_fd(fd, FDM_READ, timeout); if (res <= 0) { return res; } // if we get here, there _is_ data available result = recv(fd, data, length, flags); if (res > 0 && result == 0) { // but this is an error condition if (verbose >= 3) fprintf(stderr, "%s: fd=%d recv returned 0\n", __func__, fd); return -EAGAIN; } if (result < 0) { return -errno; } return result; } int socket_send(int fd, void *data, size_t length) { return send(fd, data, length, 0); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5092_0
crossvul-cpp_data_good_5204_2
/************************************************************************** * * Copyright (c) 2000-2003 Intel Corporation * All rights reserved. * Copyright (c) 2012 France Telecom All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither name of Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************/ /*! * \file * * \brief Defines the Web Server and has functions to carry out * operations of the Web Server. */ #include "config.h" #if EXCLUDE_WEB_SERVER == 0 #include "webserver.h" #include "FileInfo.h" #include "httpparser.h" #include "httpreadwrite.h" #include "ithread.h" #include "membuffer.h" #include "ssdplib.h" #include "statcodes.h" #include "strintmap.h" #include "unixutil.h" #include "upnp.h" #include "upnpapi.h" #include "UpnpIntTypes.h" #include "UpnpStdInt.h" #include "upnputil.h" #include "VirtualDir.h" #include <assert.h> #include <fcntl.h> #include <sys/stat.h> #ifdef WIN32 #define snprintf _snprintf #endif /*! * Response Types. */ enum resp_type { RESP_FILEDOC, RESP_XMLDOC, RESP_HEADERS, RESP_WEBDOC, RESP_POST }; /* mapping of file extension to content-type of document */ struct document_type_t { /*! . */ const char *file_ext; /*! . */ const char *content_type; /*! . */ const char *content_subtype; }; struct xml_alias_t { /*! name of DOC from root; e.g.: /foo/bar/mydesc.xml */ membuffer name; /*! the XML document contents */ membuffer doc; /*! . */ time_t last_modified; /*! . */ int *ct; }; static const char *gMediaTypes[] = { /*! 0. */ NULL, /*! 1. */ "audio", /*! 2. */ "video", /*! 3. */ "image", /*! 4. */ "application", /*! 5. */ "text" }; /* * Defines. */ /* index into 'gMediaTypes' */ #define AUDIO_STR "\1" #define VIDEO_STR "\2" #define IMAGE_STR "\3" #define APPLICATION_STR "\4" #define TEXT_STR "\5" /* int index */ #define APPLICATION_INDEX 4 #define TEXT_INDEX 5 /* general */ #define NUM_MEDIA_TYPES 70 #define NUM_HTTP_HEADER_NAMES 33 #define ASCTIME_R_BUFFER_SIZE 26 #ifdef WIN32 static char *web_server_asctime_r(const struct tm *tm, char *buf) { if (tm == NULL || buf == NULL) return NULL; asctime_s(buf, ASCTIME_R_BUFFER_SIZE, tm); return buf; } #else #define web_server_asctime_r asctime_r #endif /* sorted by file extension; must have 'NUM_MEDIA_TYPES' extensions */ static const char *gEncodedMediaTypes = "aif\0" AUDIO_STR "aiff\0" "aifc\0" AUDIO_STR "aiff\0" "aiff\0" AUDIO_STR "aiff\0" "asf\0" VIDEO_STR "x-ms-asf\0" "asx\0" VIDEO_STR "x-ms-asf\0" "au\0" AUDIO_STR "basic\0" "avi\0" VIDEO_STR "msvideo\0" "bmp\0" IMAGE_STR "bmp\0" "css\0" TEXT_STR "css\0" "dcr\0" APPLICATION_STR "x-director\0" "dib\0" IMAGE_STR "bmp\0" "dir\0" APPLICATION_STR "x-director\0" "dxr\0" APPLICATION_STR "x-director\0" "gif\0" IMAGE_STR "gif\0" "hta\0" TEXT_STR "hta\0" "htm\0" TEXT_STR "html\0" "html\0" TEXT_STR "html\0" "jar\0" APPLICATION_STR "java-archive\0" "jfif\0" IMAGE_STR "pjpeg\0" "jpe\0" IMAGE_STR "jpeg\0" "jpeg\0" IMAGE_STR "jpeg\0" "jpg\0" IMAGE_STR "jpeg\0" "js\0" APPLICATION_STR "x-javascript\0" "kar\0" AUDIO_STR "midi\0" "m3u\0" AUDIO_STR "mpegurl\0" "mid\0" AUDIO_STR "midi\0" "midi\0" AUDIO_STR "midi\0" "mov\0" VIDEO_STR "quicktime\0" "mp2v\0" VIDEO_STR "x-mpeg2\0" "mp3\0" AUDIO_STR "mpeg\0" "mpe\0" VIDEO_STR "mpeg\0" "mpeg\0" VIDEO_STR "mpeg\0" "mpg\0" VIDEO_STR "mpeg\0" "mpv\0" VIDEO_STR "mpeg\0" "mpv2\0" VIDEO_STR "x-mpeg2\0" "pdf\0" APPLICATION_STR "pdf\0" "pjp\0" IMAGE_STR "jpeg\0" "pjpeg\0" IMAGE_STR "jpeg\0" "plg\0" TEXT_STR "html\0" "pls\0" AUDIO_STR "scpls\0" "png\0" IMAGE_STR "png\0" "qt\0" VIDEO_STR "quicktime\0" "ram\0" AUDIO_STR "x-pn-realaudio\0" "rmi\0" AUDIO_STR "mid\0" "rmm\0" AUDIO_STR "x-pn-realaudio\0" "rtf\0" APPLICATION_STR "rtf\0" "shtml\0" TEXT_STR "html\0" "smf\0" AUDIO_STR "midi\0" "snd\0" AUDIO_STR "basic\0" "spl\0" APPLICATION_STR "futuresplash\0" "ssm\0" APPLICATION_STR "streamingmedia\0" "swf\0" APPLICATION_STR "x-shockwave-flash\0" "tar\0" APPLICATION_STR "tar\0" "tcl\0" APPLICATION_STR "x-tcl\0" "text\0" TEXT_STR "plain\0" "tif\0" IMAGE_STR "tiff\0" "tiff\0" IMAGE_STR "tiff\0" "txt\0" TEXT_STR "plain\0" "ulw\0" AUDIO_STR "basic\0" "wav\0" AUDIO_STR "wav\0" "wax\0" AUDIO_STR "x-ms-wax\0" "wm\0" VIDEO_STR "x-ms-wm\0" "wma\0" AUDIO_STR "x-ms-wma\0" "wmv\0" VIDEO_STR "x-ms-wmv\0" "wvx\0" VIDEO_STR "x-ms-wvx\0" "xbm\0" IMAGE_STR "x-xbitmap\0" "xml\0" TEXT_STR "xml\0" "xsl\0" TEXT_STR "xml\0" "z\0" APPLICATION_STR "x-compress\0" "zip\0" APPLICATION_STR "zip\0" "\0"; /* *** end *** */ /*! * module variables - Globals, static and externs. */ static struct document_type_t gMediaTypeList[NUM_MEDIA_TYPES]; /*! Global variable. A local dir which serves as webserver root. */ membuffer gDocumentRootDir; /*! XML document. */ static struct xml_alias_t gAliasDoc; static ithread_mutex_t gWebMutex; extern str_int_entry Http_Header_Names[NUM_HTTP_HEADER_NAMES]; /*! * \brief Decodes list and stores it in gMediaTypeList. */ static UPNP_INLINE void media_list_init(void) { int i; const char *s = gEncodedMediaTypes; struct document_type_t *doc_type; for (i = 0; *s != '\0'; i++) { doc_type = &gMediaTypeList[i]; doc_type->file_ext = s; /* point to type. */ s += strlen(s) + 1; doc_type->content_type = gMediaTypes[(int)*s]; /* point to subtype. */ s++; doc_type->content_subtype = s; /* next entry. */ s += strlen(s) + 1; } assert(i == NUM_MEDIA_TYPES); } /*! * \brief Based on the extension, returns the content type and content * subtype. * * \return * \li \c 0 on success * \li \c -1 on error */ static UPNP_INLINE int search_extension( /*! [in] . */ const char *extension, /*! [out] . */ const char **con_type, /*! [out] . */ const char **con_subtype) { int top, mid, bot; int cmp; top = 0; bot = NUM_MEDIA_TYPES - 1; while (top <= bot) { mid = (top + bot) / 2; cmp = strcasecmp(extension, gMediaTypeList[mid].file_ext); if (cmp > 0) { /* look below mid. */ top = mid + 1; } else if (cmp < 0) { /* look above mid. */ bot = mid - 1; } else { /* cmp == 0 */ *con_type = gMediaTypeList[mid].content_type; *con_subtype = gMediaTypeList[mid].content_subtype; return 0; } } return -1; } /*! * \brief Based on the extension, clones an XML string based on type and * content subtype. If content type and sub type are not found, unknown * types are used. * * \return * \li \c 0 on success. * \li \c UPNP_E_OUTOF_MEMORY - on memory allocation failures. */ static UPNP_INLINE int get_content_type( /*! [in] . */ const char *filename, /*! [out] . */ OUT UpnpFileInfo *fileInfo) { const char *extension; const char *type; const char *subtype; int ctype_found = FALSE; char *temp = NULL; size_t length = 0; int rc = 0; UpnpFileInfo_set_ContentType(fileInfo, NULL); /* get ext */ extension = strrchr(filename, '.'); if (extension != NULL) if (search_extension(extension + 1, &type, &subtype) == 0) ctype_found = TRUE; if (!ctype_found) { /* unknown content type */ type = gMediaTypes[APPLICATION_INDEX]; subtype = "octet-stream"; } length = strlen(type) + strlen("/") + strlen(subtype) + 1; temp = malloc(length); if (!temp) return UPNP_E_OUTOF_MEMORY; rc = snprintf(temp, length, "%s/%s", type, subtype); if (rc < 0 || (unsigned int) rc >= length) { free(temp); return UPNP_E_OUTOF_MEMORY; } UpnpFileInfo_set_ContentType(fileInfo, temp); free(temp); if (!UpnpFileInfo_get_ContentType(fileInfo)) return UPNP_E_OUTOF_MEMORY; return 0; } /*! * \brief Initialize the global XML document. Allocate buffers for the XML * document. */ static UPNP_INLINE void glob_alias_init(void) { struct xml_alias_t *alias = &gAliasDoc; membuffer_init(&alias->doc); membuffer_init(&alias->name); alias->ct = NULL; alias->last_modified = 0; } /*! * \brief Check for the validity of the XML object buffer. * * \return BOOLEAN. */ static UPNP_INLINE int is_valid_alias( /*! [in] XML alias object. */ const struct xml_alias_t *alias) { return alias->doc.buf != NULL; } /*! * \brief Copy the contents of the global XML document into the local output * parameter. */ static void alias_grab( /*! [out] XML alias object. */ struct xml_alias_t *alias) { ithread_mutex_lock(&gWebMutex); assert(is_valid_alias(&gAliasDoc)); memcpy(alias, &gAliasDoc, sizeof(struct xml_alias_t)); *alias->ct = *alias->ct + 1; ithread_mutex_unlock(&gWebMutex); } /*! * \brief Release the XML document referred to by the input parameter. Free * the allocated buffers associated with this object. */ static void alias_release( /*! [in] XML alias object. */ struct xml_alias_t *alias) { ithread_mutex_lock(&gWebMutex); /* ignore invalid alias */ if (!is_valid_alias(alias)) { ithread_mutex_unlock(&gWebMutex); return; } assert(*alias->ct > 0); *alias->ct -= 1; if (*alias->ct <= 0) { membuffer_destroy(&alias->doc); membuffer_destroy(&alias->name); free(alias->ct); } ithread_mutex_unlock(&gWebMutex); } int web_server_set_alias(const char *alias_name, const char *alias_content, size_t alias_content_length, time_t last_modified) { int ret_code; struct xml_alias_t alias; alias_release(&gAliasDoc); if (alias_name == NULL) { /* don't serve aliased doc anymore */ return 0; } assert(alias_content != NULL); membuffer_init(&alias.doc); membuffer_init(&alias.name); alias.ct = NULL; do { /* insert leading /, if missing */ if (*alias_name != '/') if (membuffer_assign_str(&alias.name, "/") != 0) break; /* error; out of mem */ ret_code = membuffer_append_str(&alias.name, alias_name); if (ret_code != 0) break; /* error */ if ((alias.ct = (int *)malloc(sizeof(int))) == NULL) break; /* error */ *alias.ct = 1; membuffer_attach(&alias.doc, (char *)alias_content, alias_content_length); alias.last_modified = last_modified; /* save in module var */ ithread_mutex_lock(&gWebMutex); gAliasDoc = alias; ithread_mutex_unlock(&gWebMutex); return 0; } while (FALSE); /* error handler */ /* free temp alias */ membuffer_destroy(&alias.name); membuffer_destroy(&alias.doc); free(alias.ct); return UPNP_E_OUTOF_MEMORY; } int web_server_init() { int ret = 0; if (bWebServerState == WEB_SERVER_DISABLED) { /* decode media list */ media_list_init(); membuffer_init(&gDocumentRootDir); glob_alias_init(); pVirtualDirList = NULL; /* Initialize callbacks */ virtualDirCallback.get_info = NULL; virtualDirCallback.open = NULL; virtualDirCallback.read = NULL; virtualDirCallback.write = NULL; virtualDirCallback.seek = NULL; virtualDirCallback.close = NULL; if (ithread_mutex_init(&gWebMutex, NULL) == -1) ret = UPNP_E_OUTOF_MEMORY; else bWebServerState = WEB_SERVER_ENABLED; } return ret; } void web_server_destroy(void) { if (bWebServerState == WEB_SERVER_ENABLED) { membuffer_destroy(&gDocumentRootDir); alias_release(&gAliasDoc); ithread_mutex_lock(&gWebMutex); memset(&gAliasDoc, 0, sizeof(struct xml_alias_t)); ithread_mutex_unlock(&gWebMutex); ithread_mutex_destroy(&gWebMutex); bWebServerState = WEB_SERVER_DISABLED; } } /*! * \brief Release memory allocated for the global web server root directory * and the global XML document. Resets the flag bWebServerState to * WEB_SERVER_DISABLED. * * \return Integer. */ static int get_file_info( /*! [in] Filename having the description document. */ const char *filename, /*! [out] File information object having file attributes such as filelength, * when was the file last modified, whether a file or a directory and * whether the file or directory is readable. */ OUT UpnpFileInfo *info) { int code; struct stat s; FILE *fp; int rc = 0; time_t aux_LastModified; struct tm date; char buffer[ASCTIME_R_BUFFER_SIZE]; UpnpFileInfo_set_ContentType(info, NULL); code = stat(filename, &s); if (code == -1) return -1; if (S_ISDIR(s.st_mode)) UpnpFileInfo_set_IsDirectory(info, TRUE); else if (S_ISREG(s.st_mode)) UpnpFileInfo_set_IsDirectory(info, FALSE); else return -1; /* check readable */ fp = fopen(filename, "r"); UpnpFileInfo_set_IsReadable(info, fp != NULL); if (fp) fclose(fp); UpnpFileInfo_set_FileLength(info, s.st_size); UpnpFileInfo_set_LastModified(info, s.st_mtime); rc = get_content_type(filename, info); aux_LastModified = UpnpFileInfo_get_LastModified(info); UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "file info: %s, length: %lld, last_mod=%s readable=%d\n", filename, (long long)UpnpFileInfo_get_FileLength(info), web_server_asctime_r(http_gmtime_r(&aux_LastModified, &date), buffer), UpnpFileInfo_get_IsReadable(info)); return rc; } int web_server_set_root_dir(const char *root_dir) { size_t index; int ret; ret = membuffer_assign_str(&gDocumentRootDir, root_dir); if (ret != 0) return ret; /* remove trailing '/', if any */ if (gDocumentRootDir.length > 0) { index = gDocumentRootDir.length - 1; /* last char */ if (gDocumentRootDir.buf[index] == '/') membuffer_delete(&gDocumentRootDir, index, 1); } return 0; } /*! * \brief Compare the files names between the one on the XML alias the one * passed in as the input parameter. If equal extract file information. * * \return * \li \c TRUE - On Success * \li \c FALSE if request is not an alias */ static UPNP_INLINE int get_alias( /*! [in] request file passed in to be compared with. */ const char *request_file, /*! [out] xml alias object which has a file name stored. */ struct xml_alias_t *alias, /*! [out] File information object which will be filled up if the file * comparison succeeds. */ UpnpFileInfo *info) { int cmp = strcmp(alias->name.buf, request_file); if (cmp == 0) { UpnpFileInfo_set_FileLength(info, (off_t)alias->doc.length); UpnpFileInfo_set_IsDirectory(info, FALSE); UpnpFileInfo_set_IsReadable(info, TRUE); UpnpFileInfo_set_LastModified(info, alias->last_modified); } return cmp == 0; } /*! * \brief Compares filePath with paths from the list of virtual directory * lists. * * \return BOOLEAN. */ static int isFileInVirtualDir( /*! [in] Directory path to be tested for virtual directory. */ char *filePath) { virtualDirList *pCurVirtualDir; size_t webDirLen; pCurVirtualDir = pVirtualDirList; while (pCurVirtualDir != NULL) { webDirLen = strlen(pCurVirtualDir->dirName); if (webDirLen) { if (pCurVirtualDir->dirName[webDirLen - 1] == '/') { if (strncmp(pCurVirtualDir->dirName, filePath, webDirLen) == 0) return !0; } else { if (strncmp(pCurVirtualDir->dirName, filePath, webDirLen) == 0 && (filePath[webDirLen] == '/' || filePath[webDirLen] == '\0' || filePath[webDirLen] == '?')) return !0; } } pCurVirtualDir = pCurVirtualDir->next; } return 0; } /*! * \brief Converts input string to upper case. */ static void ToUpperCase( /*! Input string to be converted. */ char *s) { while (*s) { *s = (char)toupper(*s); ++s; } } /*! * \brief Finds a substring from a string in a case insensitive way. * * \return A pointer to the first occurence of s2 in s1. */ static char *StrStr( /*! Input string. */ char *s1, /*! Input sub-string. */ const char *s2) { char *Str1; char *Str2; const char *Ptr; char *ret = NULL; Str1 = strdup(s1); if (!Str1) goto error1; Str2 = strdup(s2); if (!Str2) goto error2; ToUpperCase(Str1); ToUpperCase(Str2); Ptr = strstr(Str1, Str2); if (!Ptr) ret = NULL; else ret = s1 + (Ptr - Str1); free(Str2); error2: free(Str1); error1: return ret; } /*! * \brief Finds next token in a string. * * \return Pointer to the next token. */ static char *StrTok( /*! String containing the token. */ char **Src, /*! Set of delimiter characters. */ const char *Del) { char *TmpPtr; char *RetPtr; if (*Src != NULL) { RetPtr = *Src; TmpPtr = strstr(*Src, Del); if (TmpPtr != NULL) { *TmpPtr = '\0'; *Src = TmpPtr + strlen(Del); } else *Src = NULL; return RetPtr; } return NULL; } /*! * \brief Returns a range of integers from a string. * * \return Always returns 1. */ static int GetNextRange( /*! string containing the token / range. */ char **SrcRangeStr, /*! gets the first byte of the token. */ off_t *FirstByte, /*! gets the last byte of the token. */ off_t *LastByte) { char *Ptr; char *Tok; int i; int64_t F = -1; int64_t L = -1; int Is_Suffix_byte_Range = 1; if (*SrcRangeStr == NULL) return -1; Tok = StrTok(SrcRangeStr, ","); if ((Ptr = strstr(Tok, "-")) == NULL) return -1; *Ptr = ' '; sscanf(Tok, "%" SCNd64 "%" SCNd64, &F, &L); if (F == -1 || L == -1) { *Ptr = '-'; for (i = 0; i < (int)strlen(Tok); i++) { if (Tok[i] == '-') { break; } else if (isdigit(Tok[i])) { Is_Suffix_byte_Range = 0; break; } } if (Is_Suffix_byte_Range) { *FirstByte = (off_t) L; *LastByte = (off_t) F; return 1; } } *FirstByte = (off_t) F; *LastByte = (off_t) L; return 1; } /*! * \brief Fills in the Offset, read size and contents to send out as an HTTP * Range Response. * * \return * \li \c HTTP_BAD_REQUEST * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_REQUEST_RANGE_NOT_SATISFIABLE * \li \c HTTP_OK */ static int CreateHTTPRangeResponseHeader( /*! String containing the range. */ char *ByteRangeSpecifier, /*! Length of the file. */ off_t FileLength, /*! [out] SendInstruction object where the range operations will be stored. */ struct SendInstruction *Instr) { off_t FirstByte, LastByte; char *RangeInput; char *Ptr; int rc = 0; Instr->IsRangeActive = 1; Instr->ReadSendSize = FileLength; if (!ByteRangeSpecifier) return HTTP_BAD_REQUEST; RangeInput = strdup(ByteRangeSpecifier); if (!RangeInput) return HTTP_INTERNAL_SERVER_ERROR; /* CONTENT-RANGE: bytes 222-3333/4000 HTTP_PARTIAL_CONTENT */ if (StrStr(RangeInput, "bytes") == NULL || (Ptr = StrStr(RangeInput, "=")) == NULL) { free(RangeInput); Instr->IsRangeActive = 0; return HTTP_BAD_REQUEST; } /* Jump = */ Ptr = Ptr + 1; if (FileLength < 0) { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } if (GetNextRange(&Ptr, &FirstByte, &LastByte) != -1) { if (FileLength < FirstByte) { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } if (FirstByte >= 0 && LastByte >= 0 && LastByte >= FirstByte) { if (LastByte >= FileLength) LastByte = FileLength - 1; Instr->RangeOffset = FirstByte; Instr->ReadSendSize = LastByte - FirstByte + 1; /* Data between two range. */ rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n", (int64_t)FirstByte, (int64_t)LastByte, (int64_t)FileLength); if (rc < 0 || (unsigned int) rc >= sizeof(Instr->RangeHeader)) { free(RangeInput); return HTTP_INTERNAL_SERVER_ERROR; } } else if (FirstByte >= 0 && LastByte == -1 && FirstByte < FileLength) { Instr->RangeOffset = FirstByte; Instr->ReadSendSize = FileLength - FirstByte; rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n", (int64_t)FirstByte, (int64_t)(FileLength - 1), (int64_t)FileLength); if (rc < 0 || (unsigned int) rc >= sizeof(Instr->RangeHeader)) { free(RangeInput); return HTTP_INTERNAL_SERVER_ERROR; } } else if (FirstByte == -1 && LastByte > 0) { if (LastByte >= FileLength) { Instr->RangeOffset = 0; Instr->ReadSendSize = FileLength; rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes 0-%" PRId64 "/%" PRId64 "\r\n", (int64_t)(FileLength - 1), (int64_t)FileLength); } else { Instr->RangeOffset = FileLength - LastByte; Instr->ReadSendSize = LastByte; rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n", (int64_t)(FileLength - LastByte), (int64_t)FileLength - 1, (int64_t)FileLength); } if (rc < 0 || (unsigned int) rc >= sizeof(Instr->RangeHeader)) { free(RangeInput); return HTTP_INTERNAL_SERVER_ERROR; } } else { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } } else { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } free(RangeInput); return HTTP_OK; } /*! * \brief Get header id from the request parameter and take appropriate * action based on the ids as an HTTP Range Response. * * \return * \li \c HTTP_BAD_REQUEST * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_REQUEST_RANGE_NOT_SATISFIABLE * \li \c HTTP_OK */ static int CheckOtherHTTPHeaders( /*! [in] HTTP Request message. */ http_message_t *Req, /*! [out] Send Instruction object to data for the response. */ struct SendInstruction *RespInstr, /*! Size of the file containing the request document. */ off_t FileSize) { http_header_t *header; ListNode *node; /*NNS: dlist_node* node; */ int index, RetCode = HTTP_OK; char *TmpBuf; size_t TmpBufSize = LINE_SIZE; TmpBuf = (char *)malloc(TmpBufSize); if (!TmpBuf) return HTTP_INTERNAL_SERVER_ERROR; node = ListHead(&Req->headers); while (node != NULL) { header = (http_header_t *) node->item; /* find header type. */ index = map_str_to_int((const char *)header->name.buf, header->name.length, Http_Header_Names, NUM_HTTP_HEADER_NAMES, FALSE); if (header->value.length >= TmpBufSize) { free(TmpBuf); TmpBufSize = header->value.length + 1; TmpBuf = (char *)malloc(TmpBufSize); if (!TmpBuf) return HTTP_INTERNAL_SERVER_ERROR; } memcpy(TmpBuf, header->value.buf, header->value.length); TmpBuf[header->value.length] = '\0'; if (index >= 0) { switch (Http_Header_Names[index].id) { case HDR_TE: { /* Request */ RespInstr->IsChunkActive = 1; if (strlen(TmpBuf) > strlen("gzip")) { /* means client will accept trailer. */ if (StrStr(TmpBuf, "trailers") != NULL) { RespInstr->IsTrailers = 1; } } break; } case HDR_CONTENT_LENGTH: RespInstr->RecvWriteSize = atoi(TmpBuf); break; case HDR_RANGE: RetCode = CreateHTTPRangeResponseHeader(TmpBuf, FileSize, RespInstr); if (RetCode != HTTP_OK) { free(TmpBuf); return RetCode; } break; case HDR_ACCEPT_LANGUAGE: if (header->value.length + 1 > sizeof(RespInstr->AcceptLanguageHeader)) { size_t length = sizeof(RespInstr->AcceptLanguageHeader) - 1; memcpy(RespInstr->AcceptLanguageHeader, TmpBuf, length); RespInstr->AcceptLanguageHeader[length] = '\0'; } else { memcpy(RespInstr->AcceptLanguageHeader, TmpBuf, header->value.length + 1); } break; default: /* TODO */ /* header.value is the value. */ /* case HDR_CONTENT_TYPE: return 1; case HDR_CONTENT_LANGUAGE:return 1; case HDR_LOCATION: return 1; case HDR_CONTENT_LOCATION:return 1; case HDR_ACCEPT: return 1; case HDR_ACCEPT_CHARSET: return 1; case HDR_USER_AGENT: return 1; */ /*Header check for encoding */ /* case HDR_ACCEPT_RANGE: case HDR_CONTENT_RANGE: case HDR_IF_RANGE: */ /*Header check for encoding */ /* case HDR_ACCEPT_ENCODING: if(StrStr(TmpBuf, "identity")) { break; } else return -1; case HDR_CONTENT_ENCODING: case HDR_TRANSFER_ENCODING: */ break; } } node = ListNext(&Req->headers, node); } free(TmpBuf); return RetCode; } /*! * \brief Processes the request and returns the result in the output parameters. * * \return * \li \c HTTP_BAD_REQUEST * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_REQUEST_RANGE_NOT_SATISFIABLE * \li \c HTTP_FORBIDDEN * \li \c HTTP_NOT_FOUND * \li \c HTTP_NOT_ACCEPTABLE * \li \c HTTP_OK */ static int process_request( /*! [in] HTTP Request message. */ http_message_t *req, /*! [out] Tpye of response. */ enum resp_type *rtype, /*! [out] Headers. */ membuffer *headers, /*! [out] Get filename from request document. */ membuffer *filename, /*! [out] Xml alias document from the request document. */ struct xml_alias_t *alias, /*! [out] Send Instruction object where the response is set up. */ struct SendInstruction *RespInstr) { int code; int err_code; char *request_doc; UpnpFileInfo *finfo; time_t aux_LastModified; int using_alias; int using_virtual_dir; uri_type *url; const char *temp_str; int resp_major; int resp_minor; int alias_grabbed; size_t dummy; const char *extra_headers = NULL; print_http_headers(req); url = &req->uri; assert(req->method == HTTPMETHOD_GET || req->method == HTTPMETHOD_HEAD || req->method == HTTPMETHOD_POST || req->method == HTTPMETHOD_SIMPLEGET); /* init */ memset(&finfo, 0, sizeof(finfo)); request_doc = NULL; finfo = UpnpFileInfo_new(); alias_grabbed = FALSE; err_code = HTTP_INTERNAL_SERVER_ERROR; /* default error */ using_virtual_dir = FALSE; using_alias = FALSE; http_CalcResponseVersion(req->major_version, req->minor_version, &resp_major, &resp_minor); /* */ /* remove dots */ /* */ request_doc = malloc(url->pathquery.size + 1); if (request_doc == NULL) { goto error_handler; /* out of mem */ } memcpy(request_doc, url->pathquery.buff, url->pathquery.size); request_doc[url->pathquery.size] = '\0'; dummy = url->pathquery.size; remove_escaped_chars(request_doc, &dummy); code = remove_dots(request_doc, url->pathquery.size); if (code != 0) { err_code = HTTP_FORBIDDEN; goto error_handler; } if (*request_doc != '/') { /* no slash */ err_code = HTTP_BAD_REQUEST; goto error_handler; } if (isFileInVirtualDir(request_doc)) { using_virtual_dir = TRUE; RespInstr->IsVirtualFile = 1; if (membuffer_assign_str(filename, request_doc) != 0) { goto error_handler; } } else { /* try using alias */ if (is_valid_alias(&gAliasDoc)) { alias_grab(alias); alias_grabbed = TRUE; using_alias = get_alias(request_doc, alias, finfo); if (using_alias == TRUE) { UpnpFileInfo_set_ContentType(finfo, "text/xml; charset=\"utf-8\""); if (UpnpFileInfo_get_ContentType(finfo) == NULL) { goto error_handler; } } } } if (using_virtual_dir) { if (req->method != HTTPMETHOD_POST) { /* get file info */ if (virtualDirCallback. get_info(filename->buf, finfo) != 0) { err_code = HTTP_NOT_FOUND; goto error_handler; } /* try index.html if req is a dir */ if (UpnpFileInfo_get_IsDirectory(finfo)) { if (filename->buf[filename->length - 1] == '/') { temp_str = "index.html"; } else { temp_str = "/index.html"; } if (membuffer_append_str(filename, temp_str) != 0) { goto error_handler; } /* get info */ if (virtualDirCallback.get_info(filename->buf, finfo) != UPNP_E_SUCCESS || UpnpFileInfo_get_IsDirectory(finfo)) { err_code = HTTP_NOT_FOUND; goto error_handler; } } /* not readable */ if (!UpnpFileInfo_get_IsReadable(finfo)) { err_code = HTTP_FORBIDDEN; goto error_handler; } /* finally, get content type */ /* if ( get_content_type(filename->buf, &content_type) != 0 ) */ /*{ */ /* goto error_handler; */ /* } */ } } else if (!using_alias) { if (gDocumentRootDir.length == 0) { goto error_handler; } /* */ /* get file name */ /* */ /* filename str */ if (membuffer_assign_str(filename, gDocumentRootDir.buf) != 0 || membuffer_append_str(filename, request_doc) != 0) { goto error_handler; /* out of mem */ } /* remove trailing slashes */ while (filename->length > 0 && filename->buf[filename->length - 1] == '/') { membuffer_delete(filename, filename->length - 1, 1); } if (req->method != HTTPMETHOD_POST) { /* get info on file */ if (get_file_info(filename->buf, finfo) != 0) { err_code = HTTP_NOT_FOUND; goto error_handler; } /* try index.html if req is a dir */ if (UpnpFileInfo_get_IsDirectory(finfo)) { if (filename->buf[filename->length - 1] == '/') { temp_str = "index.html"; } else { temp_str = "/index.html"; } if (membuffer_append_str(filename, temp_str) != 0) { goto error_handler; } /* get info */ if (get_file_info(filename->buf, finfo) != 0 || UpnpFileInfo_get_IsDirectory(finfo)) { err_code = HTTP_NOT_FOUND; goto error_handler; } } /* not readable */ if (!UpnpFileInfo_get_IsReadable(finfo)) { err_code = HTTP_FORBIDDEN; goto error_handler; } } /* finally, get content type */ /* if ( get_content_type(filename->buf, &content_type) != 0 ) */ /* { */ /* goto error_handler; */ /* } */ } RespInstr->ReadSendSize = UpnpFileInfo_get_FileLength(finfo); /* Check other header field. */ code = CheckOtherHTTPHeaders(req, RespInstr, UpnpFileInfo_get_FileLength(finfo)); if (code != HTTP_OK) { err_code = code; goto error_handler; } if (req->method == HTTPMETHOD_POST) { *rtype = RESP_POST; err_code = HTTP_OK; goto error_handler; } extra_headers = UpnpFileInfo_get_ExtraHeaders(finfo); if (!extra_headers) { extra_headers = ""; } /* Check if chunked encoding should be used. */ if (using_virtual_dir && UpnpFileInfo_get_FileLength(finfo) == UPNP_USING_CHUNKED) { /* Chunked encoding is only supported by HTTP 1.1 clients */ if (resp_major == 1 && resp_minor == 1) { RespInstr->IsChunkActive = 1; } else { /* The virtual callback indicates that we should use * chunked encoding however the client doesn't support * it. Return with an internal server error. */ err_code = HTTP_NOT_ACCEPTABLE; goto error_handler; } } aux_LastModified = UpnpFileInfo_get_LastModified(finfo); if (RespInstr->IsRangeActive && RespInstr->IsChunkActive) { /* Content-Range: bytes 222-3333/4000 HTTP_PARTIAL_CONTENT */ /* Transfer-Encoding: chunked */ if (http_MakeMessage(headers, resp_major, resp_minor, "R" "T" "GKLD" "s" "tcS" "Xc" "sCc", HTTP_PARTIAL_CONTENT, /* status code */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* range info */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else if (RespInstr->IsRangeActive && !RespInstr->IsChunkActive) { /* Content-Range: bytes 222-3333/4000 HTTP_PARTIAL_CONTENT */ if (http_MakeMessage(headers, resp_major, resp_minor, "R" "N" "T" "GLD" "s" "tcS" "Xc" "sCc", HTTP_PARTIAL_CONTENT, /* status code */ RespInstr->ReadSendSize, /* content length */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* range info */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else if (!RespInstr->IsRangeActive && RespInstr->IsChunkActive) { /* Transfer-Encoding: chunked */ if (http_MakeMessage(headers, resp_major, resp_minor, "RK" "TLD" "s" "tcS" "Xc" "sCc", HTTP_OK, /* status code */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else { /* !RespInstr->IsRangeActive && !RespInstr->IsChunkActive */ if (RespInstr->ReadSendSize >= 0) { if (http_MakeMessage(headers, resp_major, resp_minor, "R" "N" "TLD" "s" "tcS" "Xc" "sCc", HTTP_OK, /* status code */ RespInstr->ReadSendSize, /* content length */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else { if (http_MakeMessage(headers, resp_major, resp_minor, "R" "TLD" "s" "tcS" "Xc" "sCc", HTTP_OK, /* status code */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } } if (req->method == HTTPMETHOD_HEAD) { *rtype = RESP_HEADERS; } else if (using_alias) { /* GET xml */ *rtype = RESP_XMLDOC; } else if (using_virtual_dir) { *rtype = RESP_WEBDOC; } else { /* GET filename */ *rtype = RESP_FILEDOC; } /* simple get http 0.9 as specified in http 1.0 */ /* don't send headers */ if (req->method == HTTPMETHOD_SIMPLEGET) { membuffer_destroy(headers); } err_code = HTTP_OK; error_handler: free(request_doc); UpnpFileInfo_delete(finfo); if (err_code != HTTP_OK && alias_grabbed) { alias_release(alias); } return err_code; } /*! * \brief Receives the HTTP post message. * * \return * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_UNAUTHORIZED * \li \c HTTP_BAD_REQUEST * \li \c HTTP_SERVICE_UNAVAILABLE * \li \c HTTP_OK */ static int http_RecvPostMessage( /*! HTTP Parser object. */ http_parser_t *parser, /*! [in] Socket Information object. */ SOCKINFO *info, /*! File where received data is copied to. */ char *filename, /*! Send Instruction object which gives information whether the file * is a virtual file or not. */ struct SendInstruction *Instr) { size_t Data_Buf_Size = 1024; char Buf[1024]; int Timeout = -1; FILE *Fp; parse_status_t status = PARSE_OK; int ok_on_close = FALSE; size_t entity_offset = 0; int num_read = 0; int ret_code = HTTP_OK; if (Instr && Instr->IsVirtualFile) { Fp = (virtualDirCallback.open) (filename, UPNP_WRITE); if (Fp == NULL) return HTTP_INTERNAL_SERVER_ERROR; } else { #ifdef UPNP_ENABLE_POST_WRITE Fp = fopen(filename, "wb"); if (Fp == NULL) return HTTP_UNAUTHORIZED; #else return HTTP_NOT_FOUND; #endif } parser->position = POS_ENTITY; do { /* first parse what has already been gotten */ if (parser->position != POS_COMPLETE) status = parser_parse_entity(parser); if (status == PARSE_INCOMPLETE_ENTITY) { /* read until close */ ok_on_close = TRUE; } else if ((status != PARSE_SUCCESS) && (status != PARSE_CONTINUE_1) && (status != PARSE_INCOMPLETE)) { /* error */ ret_code = HTTP_BAD_REQUEST; goto ExitFunction; } /* read more if necessary entity */ while (entity_offset + Data_Buf_Size > parser->msg.entity.length && parser->position != POS_COMPLETE) { num_read = sock_read(info, Buf, sizeof(Buf), &Timeout); if (num_read > 0) { /* append data to buffer */ if (membuffer_append(&parser->msg.msg, Buf, (size_t)num_read) != 0) { /* set failure status */ parser->http_error_code = HTTP_INTERNAL_SERVER_ERROR; ret_code = HTTP_INTERNAL_SERVER_ERROR; goto ExitFunction; } status = parser_parse_entity(parser); if (status == PARSE_INCOMPLETE_ENTITY) { /* read until close */ ok_on_close = TRUE; } else if ((status != PARSE_SUCCESS) && (status != PARSE_CONTINUE_1) && (status != PARSE_INCOMPLETE)) { ret_code = HTTP_BAD_REQUEST; goto ExitFunction; } } else if (num_read == 0) { if (ok_on_close) { UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "<<< (RECVD) <<<\n%s\n-----------------\n", parser->msg.msg.buf); print_http_headers(&parser->msg); parser->position = POS_COMPLETE; } else { /* partial msg or response */ parser->http_error_code = HTTP_BAD_REQUEST; ret_code = HTTP_BAD_REQUEST; goto ExitFunction; } } else { ret_code = HTTP_SERVICE_UNAVAILABLE; goto ExitFunction; } } if ((entity_offset + Data_Buf_Size) > parser->msg.entity.length) { Data_Buf_Size = parser->msg.entity.length - entity_offset; } memcpy(Buf, &parser->msg.msg.buf[parser->entity_start_position + entity_offset], Data_Buf_Size); entity_offset += Data_Buf_Size; if (Instr && Instr->IsVirtualFile) { int n = virtualDirCallback.write(Fp, Buf, Data_Buf_Size); if (n < 0) { ret_code = HTTP_INTERNAL_SERVER_ERROR; goto ExitFunction; } } else { size_t n = fwrite(Buf, 1, Data_Buf_Size, Fp); if (n != Data_Buf_Size) { ret_code = HTTP_INTERNAL_SERVER_ERROR; goto ExitFunction; } } } while (parser->position != POS_COMPLETE || entity_offset != parser->msg.entity.length); ExitFunction: if (Instr && Instr->IsVirtualFile) { virtualDirCallback.close(Fp); } else { fclose(Fp); } return ret_code; } void web_server_callback(http_parser_t *parser, INOUT http_message_t *req, SOCKINFO *info) { int ret; int timeout = -1; enum resp_type rtype = 0; membuffer headers; membuffer filename; struct xml_alias_t xmldoc; struct SendInstruction RespInstr; /*Initialize instruction header. */ RespInstr.IsVirtualFile = 0; RespInstr.IsChunkActive = 0; RespInstr.IsRangeActive = 0; RespInstr.IsTrailers = 0; memset(RespInstr.AcceptLanguageHeader, 0, sizeof(RespInstr.AcceptLanguageHeader)); /* init */ membuffer_init(&headers); membuffer_init(&filename); /*Process request should create the different kind of header depending on the */ /*the type of request. */ ret = process_request(req, &rtype, &headers, &filename, &xmldoc, &RespInstr); if (ret != HTTP_OK) { /* send error code */ http_SendStatusResponse(info, ret, req->major_version, req->minor_version); } else { /* send response */ switch (rtype) { case RESP_FILEDOC: http_SendMessage(info, &timeout, "Ibf", &RespInstr, headers.buf, headers.length, filename.buf); break; case RESP_XMLDOC: http_SendMessage(info, &timeout, "Ibb", &RespInstr, headers.buf, headers.length, xmldoc.doc.buf, xmldoc.doc.length); alias_release(&xmldoc); break; case RESP_WEBDOC: /*http_SendVirtualDirDoc(info, &timeout, "Ibf", &RespInstr, headers.buf, headers.length, filename.buf);*/ http_SendMessage(info, &timeout, "Ibf", &RespInstr, headers.buf, headers.length, filename.buf); break; case RESP_HEADERS: /* headers only */ http_SendMessage(info, &timeout, "b", headers.buf, headers.length); break; case RESP_POST: /* headers only */ ret = http_RecvPostMessage(parser, info, filename.buf, &RespInstr); /* Send response. */ http_MakeMessage(&headers, 1, 1, "RTLSXcCc", ret, "text/html", &RespInstr, X_USER_AGENT); http_SendMessage(info, &timeout, "b", headers.buf, headers.length); break; default: UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "webserver: Invalid response type received.\n"); assert(0); } } UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "webserver: request processed...\n"); membuffer_destroy(&headers); membuffer_destroy(&filename); } #endif /* EXCLUDE_WEB_SERVER */
./CrossVul/dataset_final_sorted/CWE-284/c/good_5204_2
crossvul-cpp_data_good_4770_1
/*****************************************************************************\ * src/slurmd/slurmd/req.c - slurmd request handling ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Copyright (C) 2008-2010 Lawrence Livermore National Security. * Portions Copyright (C) 2010-2016 SchedMD LLC. * Portions copyright (C) 2015 Mellanox Technologies Inc. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. * CODE-OCEC-09-009. All rights reserved. * * This file is part of SLURM, a resource management program. * For details, see <http://slurm.schedmd.com/>. * Please also read the included file: DISCLAIMER. * * SLURM is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * In addition, as a special exception, the copyright holders give permission * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than * OpenSSL. If you modify file(s) with this exception, you may extend this * exception to your version of the file(s), but you are not obligated to do * so. If you do not wish to do so, delete this exception statement from your * version. If you delete this exception statement from all source files in * the program, then also delete it here. * * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along * with SLURM; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ #if HAVE_CONFIG_H # include "config.h" #endif #include <fcntl.h> #include <grp.h> #include <pthread.h> #include <sched.h> #include <signal.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/param.h> #include <poll.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/un.h> #include <sys/wait.h> #include <unistd.h> #include <utime.h> #include "src/common/callerid.h" #include "src/common/cpu_frequency.h" #include "src/common/env.h" #include "src/common/fd.h" #include "src/common/forward.h" #include "src/common/gres.h" #include "src/common/hostlist.h" #include "src/common/list.h" #include "src/common/log.h" #include "src/common/macros.h" #include "src/common/msg_aggr.h" #include "src/common/node_features.h" #include "src/common/node_select.h" #include "src/common/plugstack.h" #include "src/common/read_config.h" #include "src/common/siphash.h" #include "src/common/slurm_auth.h" #include "src/common/slurm_cred.h" #include "src/common/slurm_acct_gather_energy.h" #include "src/common/slurm_jobacct_gather.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/slurm_protocol_api.h" #include "src/common/slurm_protocol_interface.h" #include "src/common/slurm_strcasestr.h" #include "src/common/stepd_api.h" #include "src/common/uid.h" #include "src/common/util-net.h" #include "src/common/xstring.h" #include "src/common/xmalloc.h" #include "src/bcast/file_bcast.h" #include "src/slurmd/slurmd/get_mach_stat.h" #include "src/slurmd/slurmd/slurmd.h" #include "src/slurmd/common/job_container_plugin.h" #include "src/slurmd/common/proctrack.h" #include "src/slurmd/common/run_script.h" #include "src/slurmd/common/reverse_tree_math.h" #include "src/slurmd/common/slurmstepd_init.h" #include "src/slurmd/common/task_plugin.h" #define _LIMIT_INFO 0 #define RETRY_DELAY 15 /* retry every 15 seconds */ #define MAX_RETRY 240 /* retry 240 times (one hour max) */ #define EPIL_RETRY_MAX 2 /* max retries of epilog complete message */ #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 64 #endif typedef struct { int ngids; gid_t *gids; } gids_t; typedef struct { uint32_t job_id; uint32_t step_id; uint32_t job_mem; uint32_t step_mem; } job_mem_limits_t; typedef struct { uint32_t job_id; uint32_t step_id; } starting_step_t; typedef struct { uint32_t job_id; uint16_t msg_timeout; bool *prolog_fini; pthread_cond_t *timer_cond; pthread_mutex_t *timer_mutex; } timer_struct_t; typedef struct { uint32_t jobid; uint32_t step_id; char *node_list; char *partition; char *resv_id; char **spank_job_env; uint32_t spank_job_env_size; uid_t uid; char *user_name; } job_env_t; static int _abort_step(uint32_t job_id, uint32_t step_id); static char **_build_env(job_env_t *job_env); static void _delay_rpc(int host_inx, int host_cnt, int usec_per_rpc); static void _destroy_env(char **env); static bool _is_batch_job_finished(uint32_t job_id); static void _job_limits_free(void *x); static int _job_limits_match(void *x, void *key); static bool _job_still_running(uint32_t job_id); static int _kill_all_active_steps(uint32_t jobid, int sig, bool batch); static void _launch_complete_add(uint32_t job_id); static void _launch_complete_log(char *type, uint32_t job_id); static void _launch_complete_rm(uint32_t job_id); static void _launch_complete_wait(uint32_t job_id); static int _launch_job_fail(uint32_t job_id, uint32_t slurm_rc); static bool _launch_job_test(uint32_t job_id); static void _note_batch_job_finished(uint32_t job_id); static int _prolog_is_running (uint32_t jobid); static int _step_limits_match(void *x, void *key); static int _terminate_all_steps(uint32_t jobid, bool batch); static int _receive_fd(int socket); static void _rpc_launch_tasks(slurm_msg_t *); static void _rpc_abort_job(slurm_msg_t *); static void _rpc_batch_job(slurm_msg_t *msg, bool new_msg); static void _rpc_prolog(slurm_msg_t *msg); static void _rpc_job_notify(slurm_msg_t *); static void _rpc_signal_tasks(slurm_msg_t *); static void _rpc_checkpoint_tasks(slurm_msg_t *); static void _rpc_complete_batch(slurm_msg_t *); static void _rpc_terminate_tasks(slurm_msg_t *); static void _rpc_timelimit(slurm_msg_t *); static void _rpc_reattach_tasks(slurm_msg_t *); static void _rpc_signal_job(slurm_msg_t *); static void _rpc_suspend_job(slurm_msg_t *msg); static void _rpc_terminate_job(slurm_msg_t *); static void _rpc_update_time(slurm_msg_t *); static void _rpc_shutdown(slurm_msg_t *msg); static void _rpc_reconfig(slurm_msg_t *msg); static void _rpc_reboot(slurm_msg_t *msg); static void _rpc_pid2jid(slurm_msg_t *msg); static int _rpc_file_bcast(slurm_msg_t *msg); static void _file_bcast_cleanup(void); static int _file_bcast_register_file(slurm_msg_t *msg, file_bcast_info_t *key); static int _rpc_ping(slurm_msg_t *); static int _rpc_health_check(slurm_msg_t *); static int _rpc_acct_gather_update(slurm_msg_t *); static int _rpc_acct_gather_energy(slurm_msg_t *); static int _rpc_step_complete(slurm_msg_t *msg); static int _rpc_step_complete_aggr(slurm_msg_t *msg); static int _rpc_stat_jobacct(slurm_msg_t *msg); static int _rpc_list_pids(slurm_msg_t *msg); static int _rpc_daemon_status(slurm_msg_t *msg); static int _run_epilog(job_env_t *job_env); static int _run_prolog(job_env_t *job_env, slurm_cred_t *cred); static void _rpc_forward_data(slurm_msg_t *msg); static int _rpc_network_callerid(slurm_msg_t *msg); static void _dealloc_gids(gids_t *p); static bool _pause_for_job_completion(uint32_t jobid, char *nodes, int maxtime); static bool _slurm_authorized_user(uid_t uid); static void _sync_messages_kill(kill_job_msg_t *req); static int _waiter_init (uint32_t jobid); static int _waiter_complete (uint32_t jobid); static void _send_back_fd(int socket, int fd); static bool _steps_completed_now(uint32_t jobid); static int _valid_sbcast_cred(file_bcast_msg_t *req, uid_t req_uid, uint16_t block_no, uint32_t *job_id); static void _wait_state_completed(uint32_t jobid, int max_delay); static uid_t _get_job_uid(uint32_t jobid); static gids_t *_gids_cache_lookup(char *user, gid_t gid); static int _add_starting_step(uint16_t type, void *req); static int _remove_starting_step(uint16_t type, void *req); static int _compare_starting_steps(void *s0, void *s1); static int _wait_for_starting_step(uint32_t job_id, uint32_t step_id); static bool _step_is_starting(uint32_t job_id, uint32_t step_id); static void _add_job_running_prolog(uint32_t job_id); static void _remove_job_running_prolog(uint32_t job_id); static int _match_jobid(void *s0, void *s1); static void _wait_for_job_running_prolog(uint32_t job_id); static bool _requeue_setup_env_fail(void); /* * List of threads waiting for jobs to complete */ static List waiters; static pthread_mutex_t launch_mutex = PTHREAD_MUTEX_INITIALIZER; static time_t startup = 0; /* daemon startup time */ static time_t last_slurmctld_msg = 0; static pthread_mutex_t job_limits_mutex = PTHREAD_MUTEX_INITIALIZER; static List job_limits_list = NULL; static bool job_limits_loaded = false; #define FINI_JOB_CNT 32 static pthread_mutex_t fini_mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t fini_job_id[FINI_JOB_CNT]; static int next_fini_job_inx = 0; /* NUM_PARALLEL_SUSP_JOBS controls the number of jobs that can be suspended or * resumed at one time. */ #define NUM_PARALLEL_SUSP_JOBS 64 /* NUM_PARALLEL_SUSP_STEPS controls the number of steps per job that can be * suspended at one time. */ #define NUM_PARALLEL_SUSP_STEPS 8 static pthread_mutex_t suspend_mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t job_suspend_array[NUM_PARALLEL_SUSP_JOBS]; static int job_suspend_size = 0; #define JOB_STATE_CNT 64 static pthread_mutex_t job_state_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t job_state_cond = PTHREAD_COND_INITIALIZER; static uint32_t active_job_id[JOB_STATE_CNT]; static pthread_mutex_t prolog_mutex = PTHREAD_MUTEX_INITIALIZER; #define FILE_BCAST_TIMEOUT 300 static pthread_mutex_t file_bcast_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t file_bcast_cond = PTHREAD_COND_INITIALIZER; static int fb_read_lock = 0, fb_write_wait_lock = 0, fb_write_lock = 0; static List file_bcast_list = NULL; void slurmd_req(slurm_msg_t *msg) { int rc; if (msg == NULL) { if (startup == 0) startup = time(NULL); FREE_NULL_LIST(waiters); slurm_mutex_lock(&job_limits_mutex); if (job_limits_list) { FREE_NULL_LIST(job_limits_list); job_limits_loaded = false; } slurm_mutex_unlock(&job_limits_mutex); return; } switch (msg->msg_type) { case REQUEST_LAUNCH_PROLOG: debug2("Processing RPC: REQUEST_LAUNCH_PROLOG"); _rpc_prolog(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_BATCH_JOB_LAUNCH: debug2("Processing RPC: REQUEST_BATCH_JOB_LAUNCH"); /* Mutex locking moved into _rpc_batch_job() due to * very slow prolog on Blue Gene system. Only batch * jobs are supported on Blue Gene (no job steps). */ _rpc_batch_job(msg, true); last_slurmctld_msg = time(NULL); break; case REQUEST_LAUNCH_TASKS: debug2("Processing RPC: REQUEST_LAUNCH_TASKS"); slurm_mutex_lock(&launch_mutex); _rpc_launch_tasks(msg); slurm_mutex_unlock(&launch_mutex); break; case REQUEST_SIGNAL_TASKS: debug2("Processing RPC: REQUEST_SIGNAL_TASKS"); _rpc_signal_tasks(msg); break; case REQUEST_CHECKPOINT_TASKS: debug2("Processing RPC: REQUEST_CHECKPOINT_TASKS"); _rpc_checkpoint_tasks(msg); break; case REQUEST_TERMINATE_TASKS: debug2("Processing RPC: REQUEST_TERMINATE_TASKS"); _rpc_terminate_tasks(msg); break; case REQUEST_KILL_PREEMPTED: debug2("Processing RPC: REQUEST_KILL_PREEMPTED"); last_slurmctld_msg = time(NULL); _rpc_timelimit(msg); break; case REQUEST_KILL_TIMELIMIT: debug2("Processing RPC: REQUEST_KILL_TIMELIMIT"); last_slurmctld_msg = time(NULL); _rpc_timelimit(msg); break; case REQUEST_REATTACH_TASKS: debug2("Processing RPC: REQUEST_REATTACH_TASKS"); _rpc_reattach_tasks(msg); break; case REQUEST_SIGNAL_JOB: debug2("Processing RPC: REQUEST_SIGNAL_JOB"); _rpc_signal_job(msg); break; case REQUEST_SUSPEND_INT: debug2("Processing RPC: REQUEST_SUSPEND_INT"); _rpc_suspend_job(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_ABORT_JOB: debug2("Processing RPC: REQUEST_ABORT_JOB"); last_slurmctld_msg = time(NULL); _rpc_abort_job(msg); break; case REQUEST_TERMINATE_JOB: debug2("Processing RPC: REQUEST_TERMINATE_JOB"); last_slurmctld_msg = time(NULL); _rpc_terminate_job(msg); break; case REQUEST_COMPLETE_BATCH_SCRIPT: debug2("Processing RPC: REQUEST_COMPLETE_BATCH_SCRIPT"); _rpc_complete_batch(msg); break; case REQUEST_UPDATE_JOB_TIME: debug2("Processing RPC: REQUEST_UPDATE_JOB_TIME"); _rpc_update_time(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_SHUTDOWN: debug2("Processing RPC: REQUEST_SHUTDOWN"); _rpc_shutdown(msg); break; case REQUEST_RECONFIGURE: debug2("Processing RPC: REQUEST_RECONFIGURE"); _rpc_reconfig(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_REBOOT_NODES: debug2("Processing RPC: REQUEST_REBOOT_NODES"); _rpc_reboot(msg); break; case REQUEST_NODE_REGISTRATION_STATUS: debug2("Processing RPC: REQUEST_NODE_REGISTRATION_STATUS"); /* Treat as ping (for slurmctld agent, just return SUCCESS) */ rc = _rpc_ping(msg); last_slurmctld_msg = time(NULL); /* Then initiate a separate node registration */ if (rc == SLURM_SUCCESS) send_registration_msg(SLURM_SUCCESS, true); break; case REQUEST_PING: _rpc_ping(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_HEALTH_CHECK: debug2("Processing RPC: REQUEST_HEALTH_CHECK"); _rpc_health_check(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_ACCT_GATHER_UPDATE: debug2("Processing RPC: REQUEST_ACCT_GATHER_UPDATE"); _rpc_acct_gather_update(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_ACCT_GATHER_ENERGY: debug2("Processing RPC: REQUEST_ACCT_GATHER_ENERGY"); _rpc_acct_gather_energy(msg); break; case REQUEST_JOB_ID: _rpc_pid2jid(msg); break; case REQUEST_FILE_BCAST: rc = _rpc_file_bcast(msg); slurm_send_rc_msg(msg, rc); break; case REQUEST_STEP_COMPLETE: (void) _rpc_step_complete(msg); break; case REQUEST_STEP_COMPLETE_AGGR: (void) _rpc_step_complete_aggr(msg); break; case REQUEST_JOB_STEP_STAT: (void) _rpc_stat_jobacct(msg); break; case REQUEST_JOB_STEP_PIDS: (void) _rpc_list_pids(msg); break; case REQUEST_DAEMON_STATUS: _rpc_daemon_status(msg); break; case REQUEST_JOB_NOTIFY: _rpc_job_notify(msg); break; case REQUEST_FORWARD_DATA: _rpc_forward_data(msg); break; case REQUEST_NETWORK_CALLERID: debug2("Processing RPC: REQUEST_NETWORK_CALLERID"); _rpc_network_callerid(msg); break; case MESSAGE_COMPOSITE: error("Processing RPC: MESSAGE_COMPOSITE: " "This should never happen"); msg_aggr_add_msg(msg, 0, NULL); break; case RESPONSE_MESSAGE_COMPOSITE: debug2("Processing RPC: RESPONSE_MESSAGE_COMPOSITE"); msg_aggr_resp(msg); break; default: error("slurmd_req: invalid request msg type %d", msg->msg_type); slurm_send_rc_msg(msg, EINVAL); break; } return; } static int _send_slurmd_conf_lite (int fd, slurmd_conf_t *cf) { int len; Buf buffer = init_buf(0); slurm_mutex_lock(&cf->config_mutex); pack_slurmd_conf_lite(cf, buffer); slurm_mutex_unlock(&cf->config_mutex); len = get_buf_offset(buffer); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); return (0); rwfail: return (-1); } static int _send_slurmstepd_init(int fd, int type, void *req, slurm_addr_t *cli, slurm_addr_t *self, hostset_t step_hset, uint16_t protocol_version) { int len = 0; Buf buffer = NULL; slurm_msg_t msg; uid_t uid = (uid_t)-1; gid_t gid = (uid_t)-1; gids_t *gids = NULL; int rank, proto; int parent_rank, children, depth, max_depth; char *parent_alias = NULL; char *user_name = NULL; slurm_addr_t parent_addr = {0}; char pwd_buffer[PW_BUF_SIZE]; struct passwd pwd, *pwd_result; slurm_msg_t_init(&msg); /* send type over to slurmstepd */ safe_write(fd, &type, sizeof(int)); /* step_hset can be NULL for batch scripts OR if the job was submitted * by SlurmUser or root using the --no-allocate/-Z option and the job * job credential validation by _check_job_credential() failed. If the * job credential did not validate, then it did not come from slurmctld * and there is no reason to send step completion messages to slurmctld. */ if (step_hset == NULL) { bool send_error = false; if (type == LAUNCH_TASKS) { launch_tasks_request_msg_t *launch_req; launch_req = (launch_tasks_request_msg_t *) req; if (launch_req->job_step_id != SLURM_EXTERN_CONT) send_error = true; } if (send_error) { info("task rank unavailable due to invalid job " "credential, step completion RPC impossible"); } rank = -1; parent_rank = -1; children = 0; depth = 0; max_depth = 0; } else if ((type == LAUNCH_TASKS) && (((launch_tasks_request_msg_t *)req)->alias_list)) { /* In the cloud, each task talks directly to the slurmctld * since node addressing is abnormal */ rank = 0; parent_rank = -1; children = 0; depth = 0; max_depth = 0; } else { #ifndef HAVE_FRONT_END int count; count = hostset_count(step_hset); rank = hostset_find(step_hset, conf->node_name); reverse_tree_info(rank, count, REVERSE_TREE_WIDTH, &parent_rank, &children, &depth, &max_depth); if (rank > 0) { /* rank 0 talks directly to the slurmctld */ int rc; /* Find the slurm_addr_t of this node's parent slurmd * in the step host list */ parent_alias = hostset_nth(step_hset, parent_rank); rc = slurm_conf_get_addr(parent_alias, &parent_addr); if (rc != SLURM_SUCCESS) { error("Failed looking up address for " "NodeName %s", parent_alias); /* parent_rank = -1; */ } } #else /* In FRONT_END mode, one slurmd pretends to be all * NodeNames, so we can't compare conf->node_name * to the NodeNames in step_hset. Just send step complete * RPC directly to the controller. */ rank = 0; parent_rank = -1; children = 0; depth = 0; max_depth = 0; #endif } debug3("slurmstepd rank %d (%s), parent rank %d (%s), " "children %d, depth %d, max_depth %d", rank, conf->node_name, parent_rank, parent_alias ? parent_alias : "NONE", children, depth, max_depth); if (parent_alias) free(parent_alias); /* send reverse-tree info to the slurmstepd */ safe_write(fd, &rank, sizeof(int)); safe_write(fd, &parent_rank, sizeof(int)); safe_write(fd, &children, sizeof(int)); safe_write(fd, &depth, sizeof(int)); safe_write(fd, &max_depth, sizeof(int)); safe_write(fd, &parent_addr, sizeof(slurm_addr_t)); /* send conf over to slurmstepd */ if (_send_slurmd_conf_lite(fd, conf) < 0) goto rwfail; /* send cli address over to slurmstepd */ buffer = init_buf(0); slurm_pack_slurm_addr(cli, buffer); len = get_buf_offset(buffer); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); buffer = NULL; /* send self address over to slurmstepd */ if (self) { buffer = init_buf(0); slurm_pack_slurm_addr(self, buffer); len = get_buf_offset(buffer); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); buffer = NULL; } else { len = 0; safe_write(fd, &len, sizeof(int)); } /* Send GRES information to slurmstepd */ gres_plugin_send_stepd(fd); /* send cpu_frequency info to slurmstepd */ cpu_freq_send_info(fd); /* send req over to slurmstepd */ switch(type) { case LAUNCH_BATCH_JOB: gid = (uid_t)((batch_job_launch_msg_t *)req)->gid; uid = (uid_t)((batch_job_launch_msg_t *)req)->uid; user_name = ((batch_job_launch_msg_t *)req)->user_name; msg.msg_type = REQUEST_BATCH_JOB_LAUNCH; break; case LAUNCH_TASKS: /* * The validity of req->uid was verified against the * auth credential in _rpc_launch_tasks(). req->gid * has NOT yet been checked! */ gid = (uid_t)((launch_tasks_request_msg_t *)req)->gid; uid = (uid_t)((launch_tasks_request_msg_t *)req)->uid; user_name = ((launch_tasks_request_msg_t *)req)->user_name; msg.msg_type = REQUEST_LAUNCH_TASKS; break; default: error("Was sent a task I didn't understand"); break; } buffer = init_buf(0); msg.data = req; if (protocol_version == (uint16_t)NO_VAL) proto = SLURM_PROTOCOL_VERSION; else proto = protocol_version; msg.protocol_version = (uint16_t)proto; pack_msg(&msg, buffer); len = get_buf_offset(buffer); safe_write(fd, &proto, sizeof(int)); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); buffer = NULL; #ifdef HAVE_NATIVE_CRAY /* Try to avoid calling this on a system which is a native * cray. getpwuid_r is slow on the compute nodes and this has * in theory been verified earlier. */ if (!user_name) { #endif /* send cached group ids array for the relevant uid */ debug3("_send_slurmstepd_init: call to getpwuid_r"); if (slurm_getpwuid_r(uid, &pwd, pwd_buffer, PW_BUF_SIZE, &pwd_result) || (pwd_result == NULL)) { error("%s: getpwuid_r: %m", __func__); len = 0; safe_write(fd, &len, sizeof(int)); errno = ESLURMD_UID_NOT_FOUND; return errno; } debug3("%s: return from getpwuid_r", __func__); if (gid != pwd_result->pw_gid) { debug("%s: Changing gid from %d to %d", __func__, gid, pwd_result->pw_gid); } gid = pwd_result->pw_gid; if (!user_name) user_name = pwd_result->pw_name; #ifdef HAVE_NATIVE_CRAY } #endif if (!user_name) { /* Sanity check since gids_cache_lookup will fail * with a NULL. */ error("%s: No user name for %d: %m", __func__, uid); len = 0; safe_write(fd, &len, sizeof(int)); errno = ESLURMD_UID_NOT_FOUND; return errno; } if ((gids = _gids_cache_lookup(user_name, gid))) { int i; uint32_t tmp32; safe_write(fd, &gids->ngids, sizeof(int)); for (i = 0; i < gids->ngids; i++) { tmp32 = (uint32_t)gids->gids[i]; safe_write(fd, &tmp32, sizeof(uint32_t)); } _dealloc_gids(gids); } else { len = 0; safe_write(fd, &len, sizeof(int)); } return 0; rwfail: if (buffer) free_buf(buffer); error("_send_slurmstepd_init failed"); return errno; } /* * Fork and exec the slurmstepd, then send the slurmstepd its * initialization data. Then wait for slurmstepd to send an "ok" * message before returning. When the "ok" message is received, * the slurmstepd has created and begun listening on its unix * domain socket. * * Note that this code forks twice and it is the grandchild that * becomes the slurmstepd process, so the slurmstepd's parent process * will be init, not slurmd. */ static int _forkexec_slurmstepd(uint16_t type, void *req, slurm_addr_t *cli, slurm_addr_t *self, const hostset_t step_hset, uint16_t protocol_version) { pid_t pid; int to_stepd[2] = {-1, -1}; int to_slurmd[2] = {-1, -1}; if (pipe(to_stepd) < 0 || pipe(to_slurmd) < 0) { error("_forkexec_slurmstepd pipe failed: %m"); return SLURM_FAILURE; } if (_add_starting_step(type, req)) { error("_forkexec_slurmstepd failed in _add_starting_step: %m"); return SLURM_FAILURE; } if ((pid = fork()) < 0) { error("_forkexec_slurmstepd: fork: %m"); close(to_stepd[0]); close(to_stepd[1]); close(to_slurmd[0]); close(to_slurmd[1]); _remove_starting_step(type, req); return SLURM_FAILURE; } else if (pid > 0) { int rc = SLURM_SUCCESS; #if (SLURMSTEPD_MEMCHECK == 0) int i; time_t start_time = time(NULL); #endif /* * Parent sends initialization data to the slurmstepd * over the to_stepd pipe, and waits for the return code * reply on the to_slurmd pipe. */ if (close(to_stepd[0]) < 0) error("Unable to close read to_stepd in parent: %m"); if (close(to_slurmd[1]) < 0) error("Unable to close write to_slurmd in parent: %m"); if ((rc = _send_slurmstepd_init(to_stepd[1], type, req, cli, self, step_hset, protocol_version)) != 0) { error("Unable to init slurmstepd"); goto done; } /* If running under valgrind/memcheck, this pipe doesn't work * correctly so just skip it. */ #if (SLURMSTEPD_MEMCHECK == 0) i = read(to_slurmd[0], &rc, sizeof(int)); if (i < 0) { error("%s: Can not read return code from slurmstepd " "got %d: %m", __func__, i); rc = SLURM_FAILURE; } else if (i != sizeof(int)) { error("%s: slurmstepd failed to send return code " "got %d: %m", __func__, i); rc = SLURM_FAILURE; } else { int delta_time = time(NULL) - start_time; int cc; if (delta_time > 5) { info("Warning: slurmstepd startup took %d sec, " "possible file system problem or full " "memory", delta_time); } if (rc != SLURM_SUCCESS) error("slurmstepd return code %d", rc); cc = SLURM_SUCCESS; cc = write(to_stepd[1], &cc, sizeof(int)); if (cc != sizeof(int)) { error("%s: failed to send ack to stepd %d: %m", __func__, cc); } } #endif done: if (_remove_starting_step(type, req)) error("Error cleaning up starting_step list"); /* Reap child */ if (waitpid(pid, NULL, 0) < 0) error("Unable to reap slurmd child process"); if (close(to_stepd[1]) < 0) error("close write to_stepd in parent: %m"); if (close(to_slurmd[0]) < 0) error("close read to_slurmd in parent: %m"); return rc; } else { #if (SLURMSTEPD_MEMCHECK == 1) /* memcheck test of slurmstepd, option #1 */ char *const argv[3] = {"memcheck", (char *)conf->stepd_loc, NULL}; #elif (SLURMSTEPD_MEMCHECK == 2) /* valgrind test of slurmstepd, option #2 */ uint32_t job_id = 0, step_id = 0; char log_file[256]; char *const argv[13] = {"valgrind", "--tool=memcheck", "--error-limit=no", "--leak-check=summary", "--show-reachable=yes", "--max-stackframe=16777216", "--num-callers=20", "--child-silent-after-fork=yes", "--track-origins=yes", log_file, (char *)conf->stepd_loc, NULL}; if (type == LAUNCH_BATCH_JOB) { job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; } else if (type == LAUNCH_TASKS) { job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; } snprintf(log_file, sizeof(log_file), "--log-file=/tmp/slurmstepd_valgrind_%u.%u", job_id, step_id); #elif (SLURMSTEPD_MEMCHECK == 3) /* valgrind/drd test of slurmstepd, option #3 */ uint32_t job_id = 0, step_id = 0; char log_file[256]; char *const argv[10] = {"valgrind", "--tool=drd", "--error-limit=no", "--max-stackframe=16777216", "--num-callers=20", "--child-silent-after-fork=yes", log_file, (char *)conf->stepd_loc, NULL}; if (type == LAUNCH_BATCH_JOB) { job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; } else if (type == LAUNCH_TASKS) { job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; } snprintf(log_file, sizeof(log_file), "--log-file=/tmp/slurmstepd_valgrind_%u.%u", job_id, step_id); #elif (SLURMSTEPD_MEMCHECK == 4) /* valgrind/helgrind test of slurmstepd, option #4 */ uint32_t job_id = 0, step_id = 0; char log_file[256]; char *const argv[10] = {"valgrind", "--tool=helgrind", "--error-limit=no", "--max-stackframe=16777216", "--num-callers=20", "--child-silent-after-fork=yes", log_file, (char *)conf->stepd_loc, NULL}; if (type == LAUNCH_BATCH_JOB) { job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; } else if (type == LAUNCH_TASKS) { job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; } snprintf(log_file, sizeof(log_file), "--log-file=/tmp/slurmstepd_valgrind_%u.%u", job_id, step_id); #else /* no memory checking, default */ char *const argv[2] = { (char *)conf->stepd_loc, NULL}; #endif int i; int failed = 0; /* inform slurmstepd about our config */ setenv("SLURM_CONF", conf->conffile, 1); /* * Child forks and exits */ if (setsid() < 0) { error("_forkexec_slurmstepd: setsid: %m"); failed = 1; } if ((pid = fork()) < 0) { error("_forkexec_slurmstepd: " "Unable to fork grandchild: %m"); failed = 2; } else if (pid > 0) { /* child */ exit(0); } /* * Just incase we (or someone we are linking to) * opened a file and didn't do a close on exec. This * is needed mostly to protect us against libs we link * to that don't set the flag as we should already be * setting it for those that we open. The number 256 * is an arbitrary number based off test7.9. */ for (i=3; i<256; i++) { (void) fcntl(i, F_SETFD, FD_CLOEXEC); } /* * Grandchild exec's the slurmstepd * * If the slurmd is being shutdown/restarted before * the pipe happens the old conf->lfd could be reused * and if we close it the dup2 below will fail. */ if ((to_stepd[0] != conf->lfd) && (to_slurmd[1] != conf->lfd)) slurm_shutdown_msg_engine(conf->lfd); if (close(to_stepd[1]) < 0) error("close write to_stepd in grandchild: %m"); if (close(to_slurmd[0]) < 0) error("close read to_slurmd in parent: %m"); (void) close(STDIN_FILENO); /* ignore return */ if (dup2(to_stepd[0], STDIN_FILENO) == -1) { error("dup2 over STDIN_FILENO: %m"); exit(1); } fd_set_close_on_exec(to_stepd[0]); (void) close(STDOUT_FILENO); /* ignore return */ if (dup2(to_slurmd[1], STDOUT_FILENO) == -1) { error("dup2 over STDOUT_FILENO: %m"); exit(1); } fd_set_close_on_exec(to_slurmd[1]); (void) close(STDERR_FILENO); /* ignore return */ if (dup2(devnull, STDERR_FILENO) == -1) { error("dup2 /dev/null to STDERR_FILENO: %m"); exit(1); } fd_set_noclose_on_exec(STDERR_FILENO); log_fini(); if (!failed) { if (conf->chos_loc && !access(conf->chos_loc, X_OK)) execvp(conf->chos_loc, argv); else execvp(argv[0], argv); error("exec of slurmstepd failed: %m"); } exit(2); } } /* * The job(step) credential is the only place to get a definitive * list of the nodes allocated to a job step. We need to return * a hostset_t of the nodes. Validate the incoming RPC, updating * job_mem needed. */ static int _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid, int node_id, hostset_t *step_hset, uint16_t protocol_version) { slurm_cred_arg_t arg; hostset_t s_hset = NULL; bool user_ok = _slurm_authorized_user(uid); bool verified = true; int host_index = -1; int rc; slurm_cred_t *cred = req->cred; uint32_t jobid = req->job_id; uint32_t stepid = req->job_step_id; int tasks_to_launch = req->tasks_to_launch[node_id]; uint32_t job_cpus = 0, step_cpus = 0; /* * First call slurm_cred_verify() so that all valid * credentials are checked */ rc = slurm_cred_verify(conf->vctx, cred, &arg, protocol_version); if (rc < 0) { verified = false; if ((!user_ok) || (errno != ESLURMD_INVALID_JOB_CREDENTIAL)) return SLURM_ERROR; else { debug("_check_job_credential slurm_cred_verify failed:" " %m, but continuing anyway."); } } /* If uid is the SlurmUser or root and the credential is bad, * then do not attempt validating the credential */ if (!verified) { *step_hset = NULL; if (rc >= 0) { if ((s_hset = hostset_create(arg.step_hostlist))) *step_hset = s_hset; slurm_cred_free_args(&arg); } return SLURM_SUCCESS; } if ((arg.jobid != jobid) || (arg.stepid != stepid)) { error("job credential for %u.%u, expected %u.%u", arg.jobid, arg.stepid, jobid, stepid); goto fail; } if (arg.uid != uid) { error("job credential created for uid %ld, expected %ld", (long) arg.uid, (long) uid); goto fail; } /* * Check that credential is valid for this host */ if (!(s_hset = hostset_create(arg.step_hostlist))) { error("Unable to parse credential hostlist: `%s'", arg.step_hostlist); goto fail; } if (!hostset_within(s_hset, conf->node_name)) { error("Invalid job %u.%u credential for user %u: " "host %s not in hostset %s", arg.jobid, arg.stepid, arg.uid, conf->node_name, arg.step_hostlist); goto fail; } if ((arg.job_nhosts > 0) && (tasks_to_launch > 0)) { uint32_t hi, i, i_first_bit=0, i_last_bit=0, j; bool cpu_log = slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND; #ifdef HAVE_FRONT_END host_index = 0; /* It is always 0 for front end systems */ #else hostset_t j_hset; /* Determine the CPU count based upon this node's index into * the _job's_ allocation (job's hostlist and core_bitmap) */ if (!(j_hset = hostset_create(arg.job_hostlist))) { error("Unable to parse credential hostlist: `%s'", arg.job_hostlist); goto fail; } host_index = hostset_find(j_hset, conf->node_name); hostset_destroy(j_hset); if ((host_index < 0) || (host_index >= arg.job_nhosts)) { error("job cr credential invalid host_index %d for " "job %u", host_index, arg.jobid); goto fail; } #endif if (cpu_log) { char *per_job = "", *per_step = ""; uint32_t job_mem = arg.job_mem_limit; uint32_t step_mem = arg.step_mem_limit; if (job_mem & MEM_PER_CPU) { job_mem &= (~MEM_PER_CPU); per_job = "_per_CPU"; } if (step_mem & MEM_PER_CPU) { step_mem &= (~MEM_PER_CPU); per_step = "_per_CPU"; } info("===================="); info("step_id:%u.%u job_mem:%uMB%s step_mem:%uMB%s", arg.jobid, arg.stepid, job_mem, per_job, step_mem, per_step); } hi = host_index + 1; /* change from 0-origin to 1-origin */ for (i=0; hi; i++) { if (hi > arg.sock_core_rep_count[i]) { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * arg.sock_core_rep_count[i]; hi -= arg.sock_core_rep_count[i]; } else { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * (hi - 1); i_last_bit = i_first_bit + arg.sockets_per_node[i] * arg.cores_per_socket[i]; break; } } /* Now count the allocated processors */ for (i=i_first_bit, j=0; i<i_last_bit; i++, j++) { char *who_has = NULL; if (bit_test(arg.job_core_bitmap, i)) { job_cpus++; who_has = "Job"; } if (bit_test(arg.step_core_bitmap, i)) { step_cpus++; who_has = "Step"; } if (cpu_log && who_has) { info("JobNode[%u] CPU[%u] %s alloc", host_index, j, who_has); } } if (cpu_log) info("===================="); if (step_cpus == 0) { error("cons_res: zero processors allocated to step"); step_cpus = 1; } /* NOTE: step_cpus is the count of allocated resources * (typically cores). Convert to CPU count as needed */ if (i_last_bit <= i_first_bit) error("step credential has no CPUs selected"); else { i = conf->cpus / (i_last_bit - i_first_bit); if (i > 1) { if (cpu_log) info("Scaling CPU count by factor of " "%d (%u/(%u-%u))", i, conf->cpus, i_last_bit, i_first_bit); step_cpus *= i; job_cpus *= i; } } if (tasks_to_launch > step_cpus) { /* This is expected with the --overcommit option * or hyperthreads */ debug("cons_res: More than one tasks per logical " "processor (%d > %u) on host [%u.%u %ld %s] ", tasks_to_launch, step_cpus, arg.jobid, arg.stepid, (long) arg.uid, arg.step_hostlist); } } else { step_cpus = 1; job_cpus = 1; } /* Overwrite any memory limits in the RPC with contents of the * memory limit within the credential. * Reset the CPU count on this node to correct value. */ if (arg.step_mem_limit) { if (arg.step_mem_limit & MEM_PER_CPU) { req->step_mem_lim = arg.step_mem_limit & (~MEM_PER_CPU); req->step_mem_lim *= step_cpus; } else req->step_mem_lim = arg.step_mem_limit; } else { if (arg.job_mem_limit & MEM_PER_CPU) { req->step_mem_lim = arg.job_mem_limit & (~MEM_PER_CPU); req->step_mem_lim *= job_cpus; } else req->step_mem_lim = arg.job_mem_limit; } if (arg.job_mem_limit & MEM_PER_CPU) { req->job_mem_lim = arg.job_mem_limit & (~MEM_PER_CPU); req->job_mem_lim *= job_cpus; } else req->job_mem_lim = arg.job_mem_limit; req->job_core_spec = arg.job_core_spec; req->node_cpus = step_cpus; #if 0 info("%u.%u node_id:%d mem orig:%u cpus:%u limit:%u", jobid, stepid, node_id, arg.job_mem_limit, step_cpus, req->job_mem_lim); #endif *step_hset = s_hset; slurm_cred_free_args(&arg); return SLURM_SUCCESS; fail: if (s_hset) hostset_destroy(s_hset); *step_hset = NULL; slurm_cred_free_args(&arg); slurm_seterrno_ret(ESLURMD_INVALID_JOB_CREDENTIAL); } static void _rpc_launch_tasks(slurm_msg_t *msg) { int errnum = SLURM_SUCCESS; uint16_t port; char host[MAXHOSTNAMELEN]; uid_t req_uid; launch_tasks_request_msg_t *req = msg->data; bool super_user = false; #ifndef HAVE_FRONT_END bool first_job_run; #endif slurm_addr_t self; slurm_addr_t *cli = &msg->orig_addr; hostset_t step_hset = NULL; job_mem_limits_t *job_limits_ptr; int nodeid = 0; #ifndef HAVE_FRONT_END /* It is always 0 for front end systems */ nodeid = nodelist_find(req->complete_nodelist, conf->node_name); #endif req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); memcpy(&req->orig_addr, &msg->orig_addr, sizeof(slurm_addr_t)); super_user = _slurm_authorized_user(req_uid); if ((super_user == false) && (req_uid != req->uid)) { error("launch task request from uid %u", (unsigned int) req_uid); errnum = ESLURM_USER_ID_MISSING; /* or invalid user */ goto done; } slurm_get_ip_str(cli, &port, host, sizeof(host)); info("launch task %u.%u request from %u.%u@%s (port %hu)", req->job_id, req->job_step_id, req->uid, req->gid, host, port); /* this could be set previously and needs to be overwritten by * this call for messages to work correctly for the new call */ env_array_overwrite(&req->env, "SLURM_SRUN_COMM_HOST", host); req->envc = envcount(req->env); #ifndef HAVE_FRONT_END slurm_mutex_lock(&prolog_mutex); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); #endif if (_check_job_credential(req, req_uid, nodeid, &step_hset, msg->protocol_version) < 0) { errnum = errno; error("Invalid job credential from %ld@%s: %m", (long) req_uid, host); #ifndef HAVE_FRONT_END slurm_mutex_unlock(&prolog_mutex); #endif goto done; } /* Must follow _check_job_credential(), which sets some req fields */ task_g_slurmd_launch_request(req->job_id, req, nodeid); #ifndef HAVE_FRONT_END if (first_job_run) { int rc; job_env_t job_env; slurm_cred_insert_jobid(conf->vctx, req->job_id); _add_job_running_prolog(req->job_id); slurm_mutex_unlock(&prolog_mutex); if (container_g_create(req->job_id)) error("container_g_create(%u): %m", req->job_id); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.step_id = req->job_step_id; job_env.node_list = req->complete_nodelist; job_env.partition = req->partition; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->uid; job_env.user_name = req->user_name; rc = _run_prolog(&job_env, req->cred); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog failed status=%d:%d", req->job_id, exit_status, term_sig); errnum = ESLURMD_PROLOG_FAILED; goto done; } /* Since the job could have been killed while the prolog was * running, test if the credential has since been revoked * and exit as needed. */ if (slurm_cred_revoked(conf->vctx, req->cred)) { info("Job %u already killed, do not launch step %u.%u", req->job_id, req->job_id, req->job_step_id); errnum = ESLURMD_CREDENTIAL_REVOKED; goto done; } } else { slurm_mutex_unlock(&prolog_mutex); _wait_for_job_running_prolog(req->job_id); } #endif if (req->job_mem_lim || req->step_mem_lim) { step_loc_t step_info; slurm_mutex_lock(&job_limits_mutex); if (!job_limits_list) job_limits_list = list_create(_job_limits_free); step_info.jobid = req->job_id; step_info.stepid = req->job_step_id; job_limits_ptr = list_find_first (job_limits_list, _step_limits_match, &step_info); if (!job_limits_ptr) { job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); job_limits_ptr->job_id = req->job_id; job_limits_ptr->job_mem = req->job_mem_lim; job_limits_ptr->step_id = req->job_step_id; job_limits_ptr->step_mem = req->step_mem_lim; #if _LIMIT_INFO info("AddLim step:%u.%u job_mem:%u step_mem:%u", job_limits_ptr->job_id, job_limits_ptr->step_id, job_limits_ptr->job_mem, job_limits_ptr->step_mem); #endif list_append(job_limits_list, job_limits_ptr); } slurm_mutex_unlock(&job_limits_mutex); } slurm_get_stream_addr(msg->conn_fd, &self); debug3("_rpc_launch_tasks: call to _forkexec_slurmstepd"); errnum = _forkexec_slurmstepd(LAUNCH_TASKS, (void *)req, cli, &self, step_hset, msg->protocol_version); debug3("_rpc_launch_tasks: return from _forkexec_slurmstepd"); _launch_complete_add(req->job_id); done: if (step_hset) hostset_destroy(step_hset); if (slurm_send_rc_msg(msg, errnum) < 0) { char addr_str[32]; slurm_print_slurm_addr(&msg->address, addr_str, sizeof(addr_str)); error("_rpc_launch_tasks: unable to send return code to " "address:port=%s msg_type=%u: %m", addr_str, msg->msg_type); /* * Rewind credential so that srun may perform retry */ slurm_cred_rewind(conf->vctx, req->cred); /* ignore errors */ } else if (errnum == SLURM_SUCCESS) { save_cred_state(conf->vctx); task_g_slurmd_reserve_resources(req->job_id, req, nodeid); } /* * If job prolog failed, indicate failure to slurmctld */ if (errnum == ESLURMD_PROLOG_FAILED) send_registration_msg(errnum, false); } /* * Open file based upon permissions of a different user * IN path_name - name of file to open * IN uid - User ID to use for file access check * IN gid - Group ID to use for file access check * RET -1 on error, file descriptor otherwise */ static int _open_as_other(char *path_name, batch_job_launch_msg_t *req) { pid_t child; gids_t *gids; int pipe[2]; int fd = -1, rc = 0; if (!(gids = _gids_cache_lookup(req->user_name, req->gid))) { error("%s: gids_cache_lookup for %s failed", __func__, req->user_name); return -1; } if ((rc = container_g_create(req->job_id))) { error("%s: container_g_create(%u): %m", __func__, req->job_id); _dealloc_gids(gids); return -1; } /* child process will setuid to the user, register the process * with the container, and open the file for us. */ if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pipe) != 0) { error("%s: Failed to open pipe: %m", __func__); _dealloc_gids(gids); return -1; } child = fork(); if (child == -1) { error("%s: fork failure", __func__); _dealloc_gids(gids); close(pipe[0]); close(pipe[1]); return -1; } else if (child > 0) { close(pipe[0]); (void) waitpid(child, &rc, 0); _dealloc_gids(gids); if (WIFEXITED(rc) && (WEXITSTATUS(rc) == 0)) fd = _receive_fd(pipe[1]); close(pipe[1]); return fd; } /* child process below here */ close(pipe[1]); /* container_g_add_pid needs to be called in the * forked process part of the fork to avoid a race * condition where if this process makes a file or * detacts itself from a child before we add the pid * to the container in the parent of the fork. */ if (container_g_add_pid(req->job_id, getpid(), req->uid)) { error("%s container_g_add_pid(%u): %m", __func__, req->job_id); exit(SLURM_ERROR); } /* The child actually performs the I/O and exits with * a return code, do not return! */ /*********************************************************************\ * NOTE: It would be best to do an exec() immediately after the fork() * in order to help prevent a possible deadlock in the child process * due to locks being set at the time of the fork and being freed by * the parent process, but not freed by the child process. Performing * the work inline is done for simplicity. Note that the logging * performed by error() should be safe due to the use of * atfork_install_handlers() as defined in src/common/log.c. * Change the code below with caution. \*********************************************************************/ if (setgroups(gids->ngids, gids->gids) < 0) { error("%s: uid: %u setgroups failed: %m", __func__, req->uid); exit(errno); } _dealloc_gids(gids); if (setgid(req->gid) < 0) { error("%s: uid:%u setgid(%u): %m", __func__, req->uid,req->gid); exit(errno); } if (setuid(req->uid) < 0) { error("%s: getuid(%u): %m", __func__, req->uid); exit(errno); } fd = open(path_name, (O_CREAT|O_APPEND|O_WRONLY), 0644); if (fd == -1) { error("%s: uid:%u can't open `%s`: %m", __func__, req->uid, path_name); exit(errno); } _send_back_fd(pipe[0], fd); close(fd); exit(SLURM_SUCCESS); } static void _prolog_error(batch_job_launch_msg_t *req, int rc) { char *err_name_ptr, err_name[256], path_name[MAXPATHLEN]; char *fmt_char; int fd; if (req->std_err || req->std_out) { if (req->std_err) strncpy(err_name, req->std_err, sizeof(err_name)); else strncpy(err_name, req->std_out, sizeof(err_name)); if ((fmt_char = strchr(err_name, (int) '%')) && (fmt_char[1] == 'j') && !strchr(fmt_char+1, (int) '%')) { char tmp_name[256]; fmt_char[1] = 'u'; snprintf(tmp_name, sizeof(tmp_name), err_name, req->job_id); strncpy(err_name, tmp_name, sizeof(err_name)); } } else { snprintf(err_name, sizeof(err_name), "slurm-%u.out", req->job_id); } err_name_ptr = err_name; if (err_name_ptr[0] == '/') snprintf(path_name, MAXPATHLEN, "%s", err_name_ptr); else if (req->work_dir) snprintf(path_name, MAXPATHLEN, "%s/%s", req->work_dir, err_name_ptr); else snprintf(path_name, MAXPATHLEN, "/%s", err_name_ptr); if ((fd = _open_as_other(path_name, req)) == -1) { error("Unable to open %s: Permission denied", path_name); return; } snprintf(err_name, sizeof(err_name), "Error running slurm prolog: %d\n", WEXITSTATUS(rc)); safe_write(fd, err_name, strlen(err_name)); if (fchown(fd, (uid_t) req->uid, (gid_t) req->gid) == -1) { snprintf(err_name, sizeof(err_name), "Couldn't change fd owner to %u:%u: %m\n", req->uid, req->gid); } rwfail: close(fd); } /* load the user's environment on this machine if requested * SLURM_GET_USER_ENV environment variable is set */ static int _get_user_env(batch_job_launch_msg_t *req) { struct passwd pwd, *pwd_ptr = NULL; char pwd_buf[PW_BUF_SIZE]; char **new_env; int i; static time_t config_update = 0; static bool no_env_cache = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); no_env_cache = (sched_params && strstr(sched_params, "no_env_cache")); xfree(sched_params); config_update = conf->last_update; } for (i=0; i<req->envc; i++) { if (xstrcmp(req->environment[i], "SLURM_GET_USER_ENV=1") == 0) break; } if (i >= req->envc) return 0; /* don't need to load env */ if (slurm_getpwuid_r(req->uid, &pwd, pwd_buf, PW_BUF_SIZE, &pwd_ptr) || (pwd_ptr == NULL)) { error("%s: getpwuid_r(%u):%m", __func__, req->uid); return -1; } verbose("%s: get env for user %s here", __func__, pwd.pw_name); /* Permit up to 120 second delay before using cache file */ new_env = env_array_user_default(pwd.pw_name, 120, 0, no_env_cache); if (! new_env) { error("%s: Unable to get user's local environment%s", __func__, no_env_cache ? "" : ", running only with passed environment"); return -1; } env_array_merge(&new_env, (const char **) req->environment); env_array_free(req->environment); req->environment = new_env; req->envc = envcount(new_env); return 0; } /* The RPC currently contains a memory size limit, but we load the * value from the job credential to be certain it has not been * altered by the user */ static void _set_batch_job_limits(slurm_msg_t *msg) { int i; uint32_t alloc_lps = 0, last_bit = 0; bool cpu_log = slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND; slurm_cred_arg_t arg; batch_job_launch_msg_t *req = (batch_job_launch_msg_t *)msg->data; if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) return; req->job_core_spec = arg.job_core_spec; /* Prevent user reset */ if (cpu_log) { char *per_job = ""; uint32_t job_mem = arg.job_mem_limit; if (job_mem & MEM_PER_CPU) { job_mem &= (~MEM_PER_CPU); per_job = "_per_CPU"; } info("===================="); info("batch_job:%u job_mem:%uMB%s", req->job_id, job_mem, per_job); } if (cpu_log || (arg.job_mem_limit & MEM_PER_CPU)) { if (arg.job_nhosts > 0) { last_bit = arg.sockets_per_node[0] * arg.cores_per_socket[0]; for (i=0; i<last_bit; i++) { if (!bit_test(arg.job_core_bitmap, i)) continue; if (cpu_log) info("JobNode[0] CPU[%u] Job alloc",i); alloc_lps++; } } if (cpu_log) info("===================="); if (alloc_lps == 0) { error("_set_batch_job_limit: alloc_lps is zero"); alloc_lps = 1; } /* NOTE: alloc_lps is the count of allocated resources * (typically cores). Convert to CPU count as needed */ if (last_bit < 1) error("Batch job credential allocates no CPUs"); else { i = conf->cpus / last_bit; if (i > 1) alloc_lps *= i; } } if (arg.job_mem_limit & MEM_PER_CPU) { req->job_mem = arg.job_mem_limit & (~MEM_PER_CPU); req->job_mem *= alloc_lps; } else req->job_mem = arg.job_mem_limit; slurm_cred_free_args(&arg); } /* These functions prevent a possible race condition if the batch script's * complete RPC is processed before it's launch_successful response. This * */ static bool _is_batch_job_finished(uint32_t job_id) { bool found_job = false; int i; slurm_mutex_lock(&fini_mutex); for (i = 0; i < FINI_JOB_CNT; i++) { if (fini_job_id[i] == job_id) { found_job = true; break; } } slurm_mutex_unlock(&fini_mutex); return found_job; } static void _note_batch_job_finished(uint32_t job_id) { slurm_mutex_lock(&fini_mutex); fini_job_id[next_fini_job_inx] = job_id; if (++next_fini_job_inx >= FINI_JOB_CNT) next_fini_job_inx = 0; slurm_mutex_unlock(&fini_mutex); } /* Send notification to slurmctld we are finished running the prolog. * This is needed on system that don't use srun to launch their tasks. */ static void _notify_slurmctld_prolog_fini( uint32_t job_id, uint32_t prolog_return_code) { int rc; slurm_msg_t req_msg; complete_prolog_msg_t req; slurm_msg_t_init(&req_msg); req.job_id = job_id; req.prolog_rc = prolog_return_code; req_msg.msg_type= REQUEST_COMPLETE_PROLOG; req_msg.data = &req; if ((slurm_send_recv_controller_rc_msg(&req_msg, &rc) < 0) || (rc != SLURM_SUCCESS)) error("Error sending prolog completion notification: %m"); } /* Convert memory limits from per-CPU to per-node */ static void _convert_job_mem(slurm_msg_t *msg) { prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; slurm_cred_arg_t arg; hostset_t j_hset = NULL; int rc, hi, host_index, job_cpus; int i, i_first_bit = 0, i_last_bit = 0; rc = slurm_cred_verify(conf->vctx, req->cred, &arg, msg->protocol_version); if (rc < 0) { error("%s: slurm_cred_verify failed: %m", __func__); req->nnodes = 1; /* best guess */ return; } req->nnodes = arg.job_nhosts; if (arg.job_mem_limit == 0) goto fini; if ((arg.job_mem_limit & MEM_PER_CPU) == 0) { req->job_mem_limit = arg.job_mem_limit; goto fini; } /* Assume 1 CPU on error */ req->job_mem_limit = arg.job_mem_limit & (~MEM_PER_CPU); if (!(j_hset = hostset_create(arg.job_hostlist))) { error("%s: Unable to parse credential hostlist: `%s'", __func__, arg.step_hostlist); goto fini; } host_index = hostset_find(j_hset, conf->node_name); hostset_destroy(j_hset); hi = host_index + 1; /* change from 0-origin to 1-origin */ for (i = 0; hi; i++) { if (hi > arg.sock_core_rep_count[i]) { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * arg.sock_core_rep_count[i]; i_last_bit = i_first_bit + arg.sockets_per_node[i] * arg.cores_per_socket[i] * arg.sock_core_rep_count[i]; hi -= arg.sock_core_rep_count[i]; } else { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * (hi - 1); i_last_bit = i_first_bit + arg.sockets_per_node[i] * arg.cores_per_socket[i]; break; } } /* Now count the allocated processors on this node */ job_cpus = 0; for (i = i_first_bit; i < i_last_bit; i++) { if (bit_test(arg.job_core_bitmap, i)) job_cpus++; } /* NOTE: alloc_lps is the count of allocated resources * (typically cores). Convert to CPU count as needed */ if (i_last_bit > i_first_bit) { i = conf->cpus / (i_last_bit - i_first_bit); if (i > 1) job_cpus *= i; } req->job_mem_limit *= job_cpus; fini: slurm_cred_free_args(&arg); } static void _make_prolog_mem_container(slurm_msg_t *msg) { prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; job_mem_limits_t *job_limits_ptr; step_loc_t step_info; _convert_job_mem(msg); /* Convert per-CPU mem limit */ if (req->job_mem_limit) { slurm_mutex_lock(&job_limits_mutex); if (!job_limits_list) job_limits_list = list_create(_job_limits_free); step_info.jobid = req->job_id; step_info.stepid = SLURM_EXTERN_CONT; job_limits_ptr = list_find_first (job_limits_list, _step_limits_match, &step_info); if (!job_limits_ptr) { job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); job_limits_ptr->job_id = req->job_id; job_limits_ptr->job_mem = req->job_mem_limit; job_limits_ptr->step_id = SLURM_EXTERN_CONT; job_limits_ptr->step_mem = req->job_mem_limit; #if _LIMIT_INFO info("AddLim step:%u.%u job_mem:%u step_mem:%u", job_limits_ptr->job_id, job_limits_ptr->step_id, job_limits_ptr->job_mem, job_limits_ptr->step_mem); #endif list_append(job_limits_list, job_limits_ptr); } slurm_mutex_unlock(&job_limits_mutex); } } static void _spawn_prolog_stepd(slurm_msg_t *msg) { prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; launch_tasks_request_msg_t *launch_req; slurm_addr_t self; slurm_addr_t *cli = &msg->orig_addr; int i; launch_req = xmalloc(sizeof(launch_tasks_request_msg_t)); launch_req->alias_list = req->alias_list; launch_req->complete_nodelist = req->nodes; launch_req->cpus_per_task = 1; launch_req->cred = req->cred; launch_req->cwd = req->work_dir; launch_req->efname = "/dev/null"; launch_req->gid = req->gid; launch_req->global_task_ids = xmalloc(sizeof(uint32_t *) * req->nnodes); launch_req->ifname = "/dev/null"; launch_req->job_id = req->job_id; launch_req->job_mem_lim = req->job_mem_limit; launch_req->job_step_id = SLURM_EXTERN_CONT; launch_req->nnodes = req->nnodes; launch_req->ntasks = req->nnodes; launch_req->ofname = "/dev/null"; launch_req->partition = req->partition; launch_req->spank_job_env_size = req->spank_job_env_size; launch_req->spank_job_env = req->spank_job_env; launch_req->step_mem_lim = req->job_mem_limit; launch_req->tasks_to_launch = xmalloc(sizeof(uint16_t) * req->nnodes); launch_req->uid = req->uid; for (i = 0; i < req->nnodes; i++) { uint32_t *tmp32 = xmalloc(sizeof(uint32_t)); *tmp32 = i; launch_req->global_task_ids[i] = tmp32; launch_req->tasks_to_launch[i] = 1; } slurm_get_stream_addr(msg->conn_fd, &self); /* Since job could have been killed while the prolog was * running (especially on BlueGene, which can take minutes * for partition booting). Test if the credential has since * been revoked and exit as needed. */ if (slurm_cred_revoked(conf->vctx, req->cred)) { info("Job %u already killed, do not launch extern step", req->job_id); } else { hostset_t step_hset = hostset_create(req->nodes); debug3("%s: call to _forkexec_slurmstepd", __func__); (void) _forkexec_slurmstepd( LAUNCH_TASKS, (void *)launch_req, cli, &self, step_hset, msg->protocol_version); debug3("%s: return from _forkexec_slurmstepd", __func__); if (step_hset) hostset_destroy(step_hset); } for (i = 0; i < req->nnodes; i++) xfree(launch_req->global_task_ids[i]); xfree(launch_req->global_task_ids); xfree(launch_req->tasks_to_launch); xfree(launch_req); } static void _rpc_prolog(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; job_env_t job_env; bool first_job_run; uid_t req_uid; if (req == NULL) return; req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { error("REQUEST_LAUNCH_PROLOG request from uid %u", (unsigned int) req_uid); return; } if (slurm_send_rc_msg(msg, rc) < 0) { error("Error starting prolog: %m"); } if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog start failed status=%d:%d", req->job_id, exit_status, term_sig); rc = ESLURMD_PROLOG_FAILED; } slurm_mutex_lock(&prolog_mutex); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); if (first_job_run) { if (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN) _make_prolog_mem_container(msg); if (container_g_create(req->job_id)) error("container_g_create(%u): %m", req->job_id); slurm_cred_insert_jobid(conf->vctx, req->job_id); _add_job_running_prolog(req->job_id); slurm_mutex_unlock(&prolog_mutex); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.step_id = 0; /* not available */ job_env.node_list = req->nodes; job_env.partition = req->partition; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->uid; job_env.user_name = req->user_name; #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup( req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif rc = _run_prolog(&job_env, req->cred); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog failed status=%d:%d", req->job_id, exit_status, term_sig); rc = ESLURMD_PROLOG_FAILED; } } else slurm_mutex_unlock(&prolog_mutex); if (!(slurmctld_conf.prolog_flags & PROLOG_FLAG_NOHOLD)) _notify_slurmctld_prolog_fini(req->job_id, rc); if (rc == SLURM_SUCCESS) { if (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN) _spawn_prolog_stepd(msg); } else { _launch_job_fail(req->job_id, rc); /* * If job prolog failed or we could not reply, * initiate message to slurmctld with current state */ if ((rc == ESLURMD_PROLOG_FAILED) || (rc == SLURM_COMMUNICATIONS_SEND_ERROR) || (rc == ESLURMD_SETUP_ENVIRONMENT_ERROR)) send_registration_msg(rc, false); } } static void _rpc_batch_job(slurm_msg_t *msg, bool new_msg) { batch_job_launch_msg_t *req = (batch_job_launch_msg_t *)msg->data; bool first_job_run; int rc = SLURM_SUCCESS; bool replied = false, revoked; slurm_addr_t *cli = &msg->orig_addr; if (new_msg) { uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { error("Security violation, batch launch RPC from uid %d", req_uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done; } } if (_launch_job_test(req->job_id)) { error("Job %u already running, do not launch second copy", req->job_id); rc = ESLURM_DUPLICATE_JOB_ID; /* job already running */ _launch_job_fail(req->job_id, rc); goto done; } slurm_cred_handle_reissue(conf->vctx, req->cred); if (slurm_cred_revoked(conf->vctx, req->cred)) { error("Job %u already killed, do not launch batch job", req->job_id); rc = ESLURMD_CREDENTIAL_REVOKED; /* job already ran */ goto done; } task_g_slurmd_batch_request(req->job_id, req); /* determine task affinity */ slurm_mutex_lock(&prolog_mutex); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); /* BlueGene prolog waits for partition boot and is very slow. * On any system we might need to load environment variables * for Moab (see --get-user-env), which could also be slow. * Just reply now and send a separate kill job request if the * prolog or launch fail. */ replied = true; if (new_msg && (slurm_send_rc_msg(msg, rc) < 1)) { /* The slurmctld is no longer waiting for a reply. * This typically indicates that the slurmd was * blocked from memory and/or CPUs and the slurmctld * has requeued the batch job request. */ error("Could not confirm batch launch for job %u, " "aborting request", req->job_id); rc = SLURM_COMMUNICATIONS_SEND_ERROR; slurm_mutex_unlock(&prolog_mutex); goto done; } /* * Insert jobid into credential context to denote that * we've now "seen" an instance of the job */ if (first_job_run) { job_env_t job_env; slurm_cred_insert_jobid(conf->vctx, req->job_id); _add_job_running_prolog(req->job_id); slurm_mutex_unlock(&prolog_mutex); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.step_id = req->step_id; job_env.node_list = req->nodes; job_env.partition = req->partition; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->uid; job_env.user_name = req->user_name; /* * Run job prolog on this node */ #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup( req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif if (container_g_create(req->job_id)) error("container_g_create(%u): %m", req->job_id); rc = _run_prolog(&job_env, req->cred); xfree(job_env.resv_id); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog failed status=%d:%d", req->job_id, exit_status, term_sig); _prolog_error(req, rc); rc = ESLURMD_PROLOG_FAILED; goto done; } } else { slurm_mutex_unlock(&prolog_mutex); _wait_for_job_running_prolog(req->job_id); } if (_get_user_env(req) < 0) { bool requeue = _requeue_setup_env_fail(); if (requeue) { rc = ESLURMD_SETUP_ENVIRONMENT_ERROR; goto done; } } _set_batch_job_limits(msg); /* Since job could have been killed while the prolog was * running (especially on BlueGene, which can take minutes * for partition booting). Test if the credential has since * been revoked and exit as needed. */ if (slurm_cred_revoked(conf->vctx, req->cred)) { info("Job %u already killed, do not launch batch job", req->job_id); rc = ESLURMD_CREDENTIAL_REVOKED; /* job already ran */ goto done; } slurm_mutex_lock(&launch_mutex); if (req->step_id == SLURM_BATCH_SCRIPT) info("Launching batch job %u for UID %d", req->job_id, req->uid); else info("Launching batch job %u.%u for UID %d", req->job_id, req->step_id, req->uid); debug3("_rpc_batch_job: call to _forkexec_slurmstepd"); rc = _forkexec_slurmstepd(LAUNCH_BATCH_JOB, (void *)req, cli, NULL, (hostset_t)NULL, SLURM_PROTOCOL_VERSION); debug3("_rpc_batch_job: return from _forkexec_slurmstepd: %d", rc); slurm_mutex_unlock(&launch_mutex); _launch_complete_add(req->job_id); /* On a busy system, slurmstepd may take a while to respond, * if the job was cancelled in the interim, run through the * abort logic below. */ revoked = slurm_cred_revoked(conf->vctx, req->cred); if (revoked) _launch_complete_rm(req->job_id); if (revoked && _is_batch_job_finished(req->job_id)) { /* If configured with select/serial and the batch job already * completed, consider the job sucessfully launched and do * not repeat termination logic below, which in the worst case * just slows things down with another message. */ revoked = false; } if (revoked) { info("Job %u killed while launch was in progress", req->job_id); sleep(1); /* give slurmstepd time to create * the communication socket */ _terminate_all_steps(req->job_id, true); rc = ESLURMD_CREDENTIAL_REVOKED; goto done; } done: if (!replied) { if (new_msg && (slurm_send_rc_msg(msg, rc) < 1)) { /* The slurmctld is no longer waiting for a reply. * This typically indicates that the slurmd was * blocked from memory and/or CPUs and the slurmctld * has requeued the batch job request. */ error("Could not confirm batch launch for job %u, " "aborting request", req->job_id); rc = SLURM_COMMUNICATIONS_SEND_ERROR; } else { /* No need to initiate separate reply below */ rc = SLURM_SUCCESS; } } if (rc != SLURM_SUCCESS) { /* prolog or job launch failure, * tell slurmctld that the job failed */ if (req->step_id == SLURM_BATCH_SCRIPT) _launch_job_fail(req->job_id, rc); else _abort_step(req->job_id, req->step_id); } /* * If job prolog failed or we could not reply, * initiate message to slurmctld with current state */ if ((rc == ESLURMD_PROLOG_FAILED) || (rc == SLURM_COMMUNICATIONS_SEND_ERROR) || (rc == ESLURMD_SETUP_ENVIRONMENT_ERROR)) { send_registration_msg(rc, false); } } /* * Send notification message to batch job */ static void _rpc_job_notify(slurm_msg_t *msg) { job_notify_msg_t *req = msg->data; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); uid_t job_uid; List steps; ListIterator i; step_loc_t *stepd = NULL; int step_cnt = 0; int fd; debug("_rpc_job_notify, uid = %d, jobid = %u", req_uid, req->job_id); job_uid = _get_job_uid(req->job_id); if ((int)job_uid < 0) goto no_job; /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != job_uid) && (!_slurm_authorized_user(req_uid))) { error("Security violation: job_notify(%u) from uid %d", req->job_id, req_uid); return; } steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if ((stepd->jobid != req->job_id) || (stepd->stepid != SLURM_BATCH_SCRIPT)) { continue; } step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } info("send notification to job %u.%u", stepd->jobid, stepd->stepid); if (stepd_notify_job(fd, stepd->protocol_version, req->message) < 0) debug("notify jobid=%u failed: %m", stepd->jobid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); no_job: if (step_cnt == 0) { debug2("Can't find jobid %u to send notification message", req->job_id); } } static int _launch_job_fail(uint32_t job_id, uint32_t slurm_rc) { complete_batch_script_msg_t comp_msg; struct requeue_msg req_msg; slurm_msg_t resp_msg; int rc = 0, rpc_rc; static time_t config_update = 0; static bool requeue_no_hold = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); requeue_no_hold = (sched_params && strstr( sched_params, "nohold_on_prolog_fail")); xfree(sched_params); config_update = conf->last_update; } slurm_msg_t_init(&resp_msg); if (slurm_rc == ESLURMD_CREDENTIAL_REVOKED) { comp_msg.job_id = job_id; comp_msg.job_rc = INFINITE; comp_msg.slurm_rc = slurm_rc; comp_msg.node_name = conf->node_name; comp_msg.jobacct = NULL; /* unused */ resp_msg.msg_type = REQUEST_COMPLETE_BATCH_SCRIPT; resp_msg.data = &comp_msg; } else { req_msg.job_id = job_id; req_msg.job_id_str = NULL; if (requeue_no_hold) { req_msg.state = JOB_PENDING; } else { req_msg.state = (JOB_REQUEUE_HOLD|JOB_LAUNCH_FAILED); } resp_msg.msg_type = REQUEST_JOB_REQUEUE; resp_msg.data = &req_msg; } rpc_rc = slurm_send_recv_controller_rc_msg(&resp_msg, &rc); if ((resp_msg.msg_type == REQUEST_JOB_REQUEUE) && ((rc == ESLURM_DISABLED) || (rc == ESLURM_BATCH_ONLY))) { info("Could not launch job %u and not able to requeue it, " "cancelling job", job_id); if ((slurm_rc == ESLURMD_PROLOG_FAILED) && (rc == ESLURM_BATCH_ONLY)) { char *buf = NULL; xstrfmtcat(buf, "Prolog failure on node %s", conf->node_name); slurm_notify_job(job_id, buf); xfree(buf); } comp_msg.job_id = job_id; comp_msg.job_rc = INFINITE; comp_msg.slurm_rc = slurm_rc; comp_msg.node_name = conf->node_name; comp_msg.jobacct = NULL; /* unused */ resp_msg.msg_type = REQUEST_COMPLETE_BATCH_SCRIPT; resp_msg.data = &comp_msg; rpc_rc = slurm_send_recv_controller_rc_msg(&resp_msg, &rc); } return rpc_rc; } static int _abort_step(uint32_t job_id, uint32_t step_id) { step_complete_msg_t resp; slurm_msg_t resp_msg; slurm_msg_t_init(&resp_msg); int rc, rc2; resp.job_id = job_id; resp.job_step_id = step_id; resp.range_first = 0; resp.range_last = 0; resp.step_rc = 1; resp.jobacct = jobacctinfo_create(NULL); resp_msg.msg_type = REQUEST_STEP_COMPLETE; resp_msg.data = &resp; rc2 = slurm_send_recv_controller_rc_msg(&resp_msg, &rc); /* Note: we are ignoring the RPC return code */ jobacctinfo_destroy(resp.jobacct); return rc2; } static void _rpc_reconfig(slurm_msg_t *msg) { uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) error("Security violation, reconfig RPC from uid %d", req_uid); else kill(conf->pid, SIGHUP); forward_wait(msg); /* Never return a message, slurmctld does not expect one */ } static void _rpc_shutdown(slurm_msg_t *msg) { uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); forward_wait(msg); if (!_slurm_authorized_user(req_uid)) error("Security violation, shutdown RPC from uid %d", req_uid); else { if (kill(conf->pid, SIGTERM) != 0) error("kill(%u,SIGTERM): %m", conf->pid); } /* Never return a message, slurmctld does not expect one */ } static void _rpc_reboot(slurm_msg_t *msg) { char *reboot_program, *cmd = NULL, *sp; reboot_msg_t *reboot_msg; slurm_ctl_conf_t *cfg; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); int exit_code; if (!_slurm_authorized_user(req_uid)) error("Security violation, reboot RPC from uid %d", req_uid); else { cfg = slurm_conf_lock(); reboot_program = cfg->reboot_program; if (reboot_program) { sp = strchr(reboot_program, ' '); if (sp) sp = xstrndup(reboot_program, (sp - reboot_program)); else sp = xstrdup(reboot_program); reboot_msg = (reboot_msg_t *) msg->data; if (reboot_msg && reboot_msg->features) { info("Node reboot request with features %s being processed", reboot_msg->features); (void) node_features_g_node_set( reboot_msg->features); if (reboot_msg->features[0]) { xstrfmtcat(cmd, "%s %s", sp, reboot_msg->features); } else { cmd = xstrdup(sp); } } else { cmd = xstrdup(sp); info("Node reboot request being processed"); } if (access(sp, R_OK | X_OK) < 0) error("Cannot run RebootProgram [%s]: %m", sp); else if ((exit_code = system(cmd))) error("system(%s) returned %d", reboot_program, exit_code); xfree(sp); xfree(cmd); } else error("RebootProgram isn't defined in config"); slurm_conf_unlock(); } /* Never return a message, slurmctld does not expect one */ /* slurm_send_rc_msg(msg, rc); */ } static void _job_limits_free(void *x) { xfree(x); } static int _job_limits_match(void *x, void *key) { job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; uint32_t *job_id = (uint32_t *) key; if (job_limits_ptr->job_id == *job_id) return 1; return 0; } static int _step_limits_match(void *x, void *key) { job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; step_loc_t *step_ptr = (step_loc_t *) key; if ((job_limits_ptr->job_id == step_ptr->jobid) && (job_limits_ptr->step_id == step_ptr->stepid)) return 1; return 0; } /* Call only with job_limits_mutex locked */ static void _load_job_limits(void) { List steps; ListIterator step_iter; step_loc_t *stepd; int fd; job_mem_limits_t *job_limits_ptr; slurmstepd_mem_info_t stepd_mem_info; if (!job_limits_list) job_limits_list = list_create(_job_limits_free); job_limits_loaded = true; steps = stepd_available(conf->spooldir, conf->node_name); step_iter = list_iterator_create(steps); while ((stepd = list_next(step_iter))) { job_limits_ptr = list_find_first(job_limits_list, _step_limits_match, stepd); if (job_limits_ptr) /* already processed */ continue; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; /* step completed */ if (stepd_get_mem_limits(fd, stepd->protocol_version, &stepd_mem_info) != SLURM_SUCCESS) { error("Error reading step %u.%u memory limits from " "slurmstepd", stepd->jobid, stepd->stepid); close(fd); continue; } if ((stepd_mem_info.job_mem_limit || stepd_mem_info.step_mem_limit)) { /* create entry for this job */ job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); job_limits_ptr->job_id = stepd->jobid; job_limits_ptr->step_id = stepd->stepid; job_limits_ptr->job_mem = stepd_mem_info.job_mem_limit; job_limits_ptr->step_mem = stepd_mem_info.step_mem_limit; #if _LIMIT_INFO info("RecLim step:%u.%u job_mem:%u step_mem:%u", job_limits_ptr->job_id, job_limits_ptr->step_id, job_limits_ptr->job_mem, job_limits_ptr->step_mem); #endif list_append(job_limits_list, job_limits_ptr); } close(fd); } list_iterator_destroy(step_iter); FREE_NULL_LIST(steps); } static void _cancel_step_mem_limit(uint32_t job_id, uint32_t step_id) { slurm_msg_t msg; job_notify_msg_t notify_req; job_step_kill_msg_t kill_req; /* NOTE: Batch jobs may have no srun to get this message */ slurm_msg_t_init(&msg); notify_req.job_id = job_id; notify_req.job_step_id = step_id; notify_req.message = "Exceeded job memory limit"; msg.msg_type = REQUEST_JOB_NOTIFY; msg.data = &notify_req; slurm_send_only_controller_msg(&msg); memset(&kill_req, 0, sizeof(job_step_kill_msg_t)); kill_req.job_id = job_id; kill_req.job_step_id = step_id; kill_req.signal = SIGKILL; kill_req.flags = (uint16_t) 0; msg.msg_type = REQUEST_CANCEL_JOB_STEP; msg.data = &kill_req; slurm_send_only_controller_msg(&msg); } /* Enforce job memory limits here in slurmd. Step memory limits are * enforced within slurmstepd (using jobacct_gather plugin). */ static void _enforce_job_mem_limit(void) { List steps; ListIterator step_iter, job_limits_iter; job_mem_limits_t *job_limits_ptr; step_loc_t *stepd; int fd, i, job_inx, job_cnt; uint16_t vsize_factor; uint64_t step_rss, step_vsize; job_step_id_msg_t acct_req; job_step_stat_t *resp = NULL; struct job_mem_info { uint32_t job_id; uint32_t mem_limit; /* MB */ uint32_t mem_used; /* MB */ uint32_t vsize_limit; /* MB */ uint32_t vsize_used; /* MB */ }; struct job_mem_info *job_mem_info_ptr = NULL; /* If users have configured MemLimitEnforce=no * in their slurm.conf keep going. */ if (conf->mem_limit_enforce == false) return; slurm_mutex_lock(&job_limits_mutex); if (!job_limits_loaded) _load_job_limits(); if (list_count(job_limits_list) == 0) { slurm_mutex_unlock(&job_limits_mutex); return; } /* Build table of job limits, use highest mem limit recorded */ job_mem_info_ptr = xmalloc((list_count(job_limits_list) + 1) * sizeof(struct job_mem_info)); job_cnt = 0; job_limits_iter = list_iterator_create(job_limits_list); while ((job_limits_ptr = list_next(job_limits_iter))) { if (job_limits_ptr->job_mem == 0) /* no job limit */ continue; for (i=0; i<job_cnt; i++) { if (job_mem_info_ptr[i].job_id != job_limits_ptr->job_id) continue; job_mem_info_ptr[i].mem_limit = MAX( job_mem_info_ptr[i].mem_limit, job_limits_ptr->job_mem); break; } if (i < job_cnt) /* job already found & recorded */ continue; job_mem_info_ptr[job_cnt].job_id = job_limits_ptr->job_id; job_mem_info_ptr[job_cnt].mem_limit = job_limits_ptr->job_mem; job_cnt++; } list_iterator_destroy(job_limits_iter); slurm_mutex_unlock(&job_limits_mutex); vsize_factor = slurm_get_vsize_factor(); for (i=0; i<job_cnt; i++) { job_mem_info_ptr[i].vsize_limit = job_mem_info_ptr[i]. mem_limit; job_mem_info_ptr[i].vsize_limit *= (vsize_factor / 100.0); } steps = stepd_available(conf->spooldir, conf->node_name); step_iter = list_iterator_create(steps); while ((stepd = list_next(step_iter))) { for (job_inx=0; job_inx<job_cnt; job_inx++) { if (job_mem_info_ptr[job_inx].job_id == stepd->jobid) break; } if (job_inx >= job_cnt) continue; /* job/step not being tracked */ fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; /* step completed */ acct_req.job_id = stepd->jobid; acct_req.step_id = stepd->stepid; resp = xmalloc(sizeof(job_step_stat_t)); if ((!stepd_stat_jobacct( fd, stepd->protocol_version, &acct_req, resp)) && (resp->jobacct)) { /* resp->jobacct is NULL if account is disabled */ jobacctinfo_getinfo((struct jobacctinfo *) resp->jobacct, JOBACCT_DATA_TOT_RSS, &step_rss, stepd->protocol_version); jobacctinfo_getinfo((struct jobacctinfo *) resp->jobacct, JOBACCT_DATA_TOT_VSIZE, &step_vsize, stepd->protocol_version); #if _LIMIT_INFO info("Step:%u.%u RSS:%"PRIu64" KB VSIZE:%"PRIu64" KB", stepd->jobid, stepd->stepid, step_rss, step_vsize); #endif step_rss /= 1024; /* KB to MB */ step_rss = MAX(step_rss, 1); job_mem_info_ptr[job_inx].mem_used += step_rss; step_vsize /= 1024; /* KB to MB */ step_vsize = MAX(step_vsize, 1); job_mem_info_ptr[job_inx].vsize_used += step_vsize; } slurm_free_job_step_stat(resp); close(fd); } list_iterator_destroy(step_iter); FREE_NULL_LIST(steps); for (i=0; i<job_cnt; i++) { if (job_mem_info_ptr[i].mem_used == 0) { /* no steps found, * purge records for all steps of this job */ slurm_mutex_lock(&job_limits_mutex); list_delete_all(job_limits_list, _job_limits_match, &job_mem_info_ptr[i].job_id); slurm_mutex_unlock(&job_limits_mutex); break; } if ((job_mem_info_ptr[i].mem_limit != 0) && (job_mem_info_ptr[i].mem_used > job_mem_info_ptr[i].mem_limit)) { info("Job %u exceeded memory limit (%u>%u), " "cancelling it", job_mem_info_ptr[i].job_id, job_mem_info_ptr[i].mem_used, job_mem_info_ptr[i].mem_limit); _cancel_step_mem_limit(job_mem_info_ptr[i].job_id, NO_VAL); } else if ((job_mem_info_ptr[i].vsize_limit != 0) && (job_mem_info_ptr[i].vsize_used > job_mem_info_ptr[i].vsize_limit)) { info("Job %u exceeded virtual memory limit (%u>%u), " "cancelling it", job_mem_info_ptr[i].job_id, job_mem_info_ptr[i].vsize_used, job_mem_info_ptr[i].vsize_limit); _cancel_step_mem_limit(job_mem_info_ptr[i].job_id, NO_VAL); } } xfree(job_mem_info_ptr); } static int _rpc_ping(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); static bool first_msg = true; if (!_slurm_authorized_user(req_uid)) { error("Security violation, ping RPC from uid %d", req_uid); if (first_msg) { error("Do you have SlurmUser configured as uid %d?", req_uid); } rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } first_msg = false; if (rc != SLURM_SUCCESS) { /* Return result. If the reply can't be sent this indicates * 1. The network is broken OR * 2. slurmctld has died OR * 3. slurmd was paged out due to full memory * If the reply request fails, we send an registration message * to slurmctld in hopes of avoiding having the node set DOWN * due to slurmd paging and not being able to respond in a * timely fashion. */ if (slurm_send_rc_msg(msg, rc) < 0) { error("Error responding to ping: %m"); send_registration_msg(SLURM_SUCCESS, false); } } else { slurm_msg_t resp_msg; ping_slurmd_resp_msg_t ping_resp; get_cpu_load(&ping_resp.cpu_load); get_free_mem(&ping_resp.free_mem); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_PING_SLURMD; resp_msg.data = &ping_resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); } /* Take this opportunity to enforce any job memory limits */ _enforce_job_mem_limit(); /* Clear up any stalled file transfers as well */ _file_bcast_cleanup(); return rc; } static int _rpc_health_check(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { error("Security violation, health check RPC from uid %d", req_uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } /* Return result. If the reply can't be sent this indicates that * 1. The network is broken OR * 2. slurmctld has died OR * 3. slurmd was paged out due to full memory * If the reply request fails, we send an registration message to * slurmctld in hopes of avoiding having the node set DOWN due to * slurmd paging and not being able to respond in a timely fashion. */ if (slurm_send_rc_msg(msg, rc) < 0) { error("Error responding to health check: %m"); send_registration_msg(SLURM_SUCCESS, false); } if (rc == SLURM_SUCCESS) rc = run_script_health_check(); /* Take this opportunity to enforce any job memory limits */ _enforce_job_mem_limit(); /* Clear up any stalled file transfers as well */ _file_bcast_cleanup(); return rc; } static int _rpc_acct_gather_update(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); static bool first_msg = true; if (!_slurm_authorized_user(req_uid)) { error("Security violation, acct_gather_update RPC from uid %d", req_uid); if (first_msg) { error("Do you have SlurmUser configured as uid %d?", req_uid); } rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } first_msg = false; if (rc != SLURM_SUCCESS) { /* Return result. If the reply can't be sent this indicates * 1. The network is broken OR * 2. slurmctld has died OR * 3. slurmd was paged out due to full memory * If the reply request fails, we send an registration message * to slurmctld in hopes of avoiding having the node set DOWN * due to slurmd paging and not being able to respond in a * timely fashion. */ if (slurm_send_rc_msg(msg, rc) < 0) { error("Error responding to account gather: %m"); send_registration_msg(SLURM_SUCCESS, false); } } else { slurm_msg_t resp_msg; acct_gather_node_resp_msg_t acct_msg; /* Update node energy usage data */ acct_gather_energy_g_update_node_energy(); memset(&acct_msg, 0, sizeof(acct_gather_node_resp_msg_t)); acct_msg.node_name = conf->node_name; acct_msg.sensor_cnt = 1; acct_msg.energy = acct_gather_energy_alloc(acct_msg.sensor_cnt); acct_gather_energy_g_get_data( ENERGY_DATA_NODE_ENERGY, acct_msg.energy); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_ACCT_GATHER_UPDATE; resp_msg.data = &acct_msg; slurm_send_node_msg(msg->conn_fd, &resp_msg); acct_gather_energy_destroy(acct_msg.energy); } return rc; } static int _rpc_acct_gather_energy(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); static bool first_msg = true; if (!_slurm_authorized_user(req_uid)) { error("Security violation, acct_gather_update RPC from uid %d", req_uid); if (first_msg) { error("Do you have SlurmUser configured as uid %d?", req_uid); } rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } first_msg = false; if (rc != SLURM_SUCCESS) { if (slurm_send_rc_msg(msg, rc) < 0) error("Error responding to energy request: %m"); } else { slurm_msg_t resp_msg; acct_gather_node_resp_msg_t acct_msg; time_t now = time(NULL), last_poll = 0; int data_type = ENERGY_DATA_STRUCT; uint16_t sensor_cnt; acct_gather_energy_req_msg_t *req = msg->data; acct_gather_energy_g_get_data(ENERGY_DATA_LAST_POLL, &last_poll); acct_gather_energy_g_get_data(ENERGY_DATA_SENSOR_CNT, &sensor_cnt); /* If we polled later than delta seconds then force a new poll. */ if ((now - last_poll) > req->delta) data_type = ENERGY_DATA_JOULES_TASK; memset(&acct_msg, 0, sizeof(acct_gather_node_resp_msg_t)); acct_msg.sensor_cnt = sensor_cnt; acct_msg.energy = acct_gather_energy_alloc(acct_msg.sensor_cnt); acct_gather_energy_g_get_data(data_type, acct_msg.energy); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_ACCT_GATHER_ENERGY; resp_msg.data = &acct_msg; slurm_send_node_msg(msg->conn_fd, &resp_msg); acct_gather_energy_destroy(acct_msg.energy); } return rc; } static int _signal_jobstep(uint32_t jobid, uint32_t stepid, uid_t req_uid, uint32_t signal) { int fd, rc = SLURM_SUCCESS; uid_t uid; uint16_t protocol_version; /* There will be no stepd if the prolog is still running * Return failure so caller can retry. */ if (_prolog_is_running (jobid)) { info ("signal %d req for %u.%u while prolog is running." " Returning failure.", signal, jobid, stepid); return SLURM_FAILURE; } fd = stepd_connect(conf->spooldir, conf->node_name, jobid, stepid, &protocol_version); if (fd == -1) { debug("signal for nonexistent %u.%u stepd_connect failed: %m", jobid, stepid); return ESLURM_INVALID_JOB_ID; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("_signal_jobstep: couldn't read from the step %u.%u: %m", jobid, stepid); rc = ESLURM_INVALID_JOB_ID; goto done2; } if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { debug("kill req from uid %ld for job %u.%u owned by uid %ld", (long) req_uid, jobid, stepid, (long) uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } #ifdef HAVE_AIX # ifdef SIGMIGRATE # ifdef SIGSOUND /* SIGMIGRATE and SIGSOUND are used to initiate job checkpoint on AIX. * These signals are not sent to the entire process group, but just a * single process, namely the PMD. */ if (signal == SIGMIGRATE || signal == SIGSOUND) { rc = stepd_signal_task_local(fd, protocol_version, signal, 0); goto done2; } # endif # endif #endif rc = stepd_signal_container(fd, protocol_version, signal); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); return rc; } static void _rpc_signal_tasks(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); kill_tasks_msg_t *req = (kill_tasks_msg_t *) msg->data; uint32_t flag; uint32_t sig; flag = req->signal >> 24; sig = req->signal & 0xfff; if (flag & KILL_FULL_JOB) { debug("%s: sending signal %u to entire job %u flag %u", __func__, sig, req->job_id, flag); _kill_all_active_steps(req->job_id, sig, true); } else if (flag & KILL_STEPS_ONLY) { debug("%s: sending signal %u to all steps job %u flag %u", __func__, sig, req->job_id, flag); _kill_all_active_steps(req->job_id, sig, false); } else { debug("%s: sending signal %u to step %u.%u flag %u", __func__, sig, req->job_id, req->job_step_id, flag); rc = _signal_jobstep(req->job_id, req->job_step_id, req_uid, req->signal); } slurm_send_rc_msg(msg, rc); } static void _rpc_checkpoint_tasks(slurm_msg_t *msg) { int fd; int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); checkpoint_tasks_msg_t *req = (checkpoint_tasks_msg_t *) msg->data; uint16_t protocol_version; uid_t uid; fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { debug("checkpoint for nonexistent %u.%u stepd_connect " "failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("_rpc_checkpoint_tasks: couldn't read from the " "step %u.%u: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done2; } if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { debug("checkpoint req from uid %ld for job %u.%u owned by " "uid %ld", (long) req_uid, req->job_id, req->job_step_id, (long) uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } rc = stepd_checkpoint(fd, protocol_version, req->timestamp, req->image_dir); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); done: slurm_send_rc_msg(msg, rc); } static void _rpc_terminate_tasks(slurm_msg_t *msg) { kill_tasks_msg_t *req = (kill_tasks_msg_t *) msg->data; int rc = SLURM_SUCCESS; int fd; uid_t req_uid, uid; uint16_t protocol_version; debug3("Entering _rpc_terminate_tasks"); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { debug("kill for nonexistent job %u.%u stepd_connect " "failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("terminate_tasks couldn't read from the step %u.%u: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done2; } req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { debug("kill req from uid %ld for job %u.%u owned by uid %ld", (long) req_uid, req->job_id, req->job_step_id, (long) uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } rc = stepd_terminate(fd, protocol_version); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); done: slurm_send_rc_msg(msg, rc); } static int _rpc_step_complete(slurm_msg_t *msg) { step_complete_msg_t *req = (step_complete_msg_t *)msg->data; int rc = SLURM_SUCCESS; int fd; uid_t req_uid; uint16_t protocol_version; debug3("Entering _rpc_step_complete"); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { error("stepd_connect to %u.%u failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } /* step completion messages are only allowed from other slurmstepd, so only root or SlurmUser is allowed here */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { debug("step completion from uid %ld for job %u.%u", (long) req_uid, req->job_id, req->job_step_id); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } rc = stepd_completion(fd, protocol_version, req); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); done: slurm_send_rc_msg(msg, rc); return rc; } static void _setup_step_complete_msg(slurm_msg_t *msg, void *data) { slurm_msg_t_init(msg); msg->msg_type = REQUEST_STEP_COMPLETE; msg->data = data; } /* This step_complete RPC came from slurmstepd because we are using * message aggregation configured and we are at the head of the tree. * This just adds the message to the list and goes on it's merry way. */ static int _rpc_step_complete_aggr(slurm_msg_t *msg) { int rc; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(uid)) { error("Security violation: step_complete_aggr from uid %d", uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return SLURM_ERROR; } if (conf->msg_aggr_window_msgs > 1) { slurm_msg_t *req = xmalloc_nz(sizeof(slurm_msg_t)); _setup_step_complete_msg(req, msg->data); msg->data = NULL; msg_aggr_add_msg(req, 1, NULL); } else { slurm_msg_t req; _setup_step_complete_msg(&req, msg->data); while (slurm_send_recv_controller_rc_msg(&req, &rc) < 0) { error("Unable to send step complete, " "trying again in a minute: %m"); } } /* Finish communication with the stepd, we have to wait for * the message back from the slurmctld or we will cause a race * condition with srun. */ slurm_send_rc_msg(msg, SLURM_SUCCESS); return SLURM_SUCCESS; } /* Get list of active jobs and steps, xfree returned value */ static char * _get_step_list(void) { char tmp[64]; char *step_list = NULL; List steps; ListIterator i; step_loc_t *stepd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { int fd; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; if (stepd_state(fd, stepd->protocol_version) == SLURMSTEPD_NOT_RUNNING) { debug("stale domain socket for stepd %u.%u ", stepd->jobid, stepd->stepid); close(fd); continue; } close(fd); if (step_list) xstrcat(step_list, ", "); if (stepd->stepid == NO_VAL) { snprintf(tmp, sizeof(tmp), "%u", stepd->jobid); xstrcat(step_list, tmp); } else { snprintf(tmp, sizeof(tmp), "%u.%u", stepd->jobid, stepd->stepid); xstrcat(step_list, tmp); } } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (step_list == NULL) xstrcat(step_list, "NONE"); return step_list; } static int _rpc_daemon_status(slurm_msg_t *msg) { slurm_msg_t resp_msg; slurmd_status_t *resp = NULL; resp = xmalloc(sizeof(slurmd_status_t)); resp->actual_cpus = conf->actual_cpus; resp->actual_boards = conf->actual_boards; resp->actual_sockets = conf->actual_sockets; resp->actual_cores = conf->actual_cores; resp->actual_threads = conf->actual_threads; resp->actual_real_mem = conf->real_memory_size; resp->actual_tmp_disk = conf->tmp_disk_space; resp->booted = startup; resp->hostname = xstrdup(conf->node_name); resp->step_list = _get_step_list(); resp->last_slurmctld_msg = last_slurmctld_msg; resp->pid = conf->pid; resp->slurmd_debug = conf->debug_level; resp->slurmd_logfile = xstrdup(conf->logfile); resp->version = xstrdup(SLURM_VERSION_STRING); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_SLURMD_STATUS; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_slurmd_status(resp); return SLURM_SUCCESS; } static int _rpc_stat_jobacct(slurm_msg_t *msg) { job_step_id_msg_t *req = (job_step_id_msg_t *)msg->data; slurm_msg_t resp_msg; job_step_stat_t *resp = NULL; int fd; uid_t req_uid, uid; uint16_t protocol_version; debug3("Entering _rpc_stat_jobacct"); /* step completion messages are only allowed from other slurmstepd, so only root or SlurmUser is allowed here */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->step_id, &protocol_version); if (fd == -1) { error("stepd_connect to %u.%u failed: %m", req->job_id, req->step_id); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return ESLURM_INVALID_JOB_ID; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("stat_jobacct couldn't read from the step %u.%u: %m", req->job_id, req->step_id); close(fd); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return ESLURM_INVALID_JOB_ID; } /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { error("stat_jobacct from uid %ld for job %u " "owned by uid %ld", (long) req_uid, req->job_id, (long) uid); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); close(fd); return ESLURM_USER_ID_MISSING;/* or bad in this case */ } } resp = xmalloc(sizeof(job_step_stat_t)); resp->step_pids = xmalloc(sizeof(job_step_pids_t)); resp->step_pids->node_name = xstrdup(conf->node_name); slurm_msg_t_copy(&resp_msg, msg); resp->return_code = SLURM_SUCCESS; if (stepd_stat_jobacct(fd, protocol_version, req, resp) == SLURM_ERROR) { debug("accounting for nonexistent job %u.%u requested", req->job_id, req->step_id); } /* FIX ME: This should probably happen in the stepd_stat_jobacct to get more information about the pids. */ if (stepd_list_pids(fd, protocol_version, &resp->step_pids->pid, &resp->step_pids->pid_cnt) == SLURM_ERROR) { debug("No pids for nonexistent job %u.%u requested", req->job_id, req->step_id); } close(fd); resp_msg.msg_type = RESPONSE_JOB_STEP_STAT; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_job_step_stat(resp); return SLURM_SUCCESS; } static int _callerid_find_job(callerid_conn_t conn, uint32_t *job_id) { ino_t inode; pid_t pid; int rc; rc = callerid_find_inode_by_conn(conn, &inode); if (rc != SLURM_SUCCESS) { debug3("network_callerid inode not found"); return ESLURM_INVALID_JOB_ID; } debug3("network_callerid found inode %lu", (long unsigned int)inode); rc = find_pid_by_inode(&pid, inode); if (rc != SLURM_SUCCESS) { debug3("network_callerid process not found"); return ESLURM_INVALID_JOB_ID; } debug3("network_callerid found process %d", (pid_t)pid); rc = slurm_pid2jobid(pid, job_id); if (rc != SLURM_SUCCESS) { debug3("network_callerid job not found"); return ESLURM_INVALID_JOB_ID; } debug3("network_callerid found job %u", *job_id); return SLURM_SUCCESS; } static int _rpc_network_callerid(slurm_msg_t *msg) { network_callerid_msg_t *req = (network_callerid_msg_t *)msg->data; slurm_msg_t resp_msg; network_callerid_resp_t *resp = NULL; uid_t req_uid = -1; uid_t job_uid = -1; uint32_t job_id = (uint32_t)NO_VAL; callerid_conn_t conn; int rc = ESLURM_INVALID_JOB_ID; char ip_src_str[INET6_ADDRSTRLEN]; char ip_dst_str[INET6_ADDRSTRLEN]; debug3("Entering _rpc_network_callerid"); resp = xmalloc(sizeof(network_callerid_resp_t)); slurm_msg_t_copy(&resp_msg, msg); /* Ideally this would be in an if block only when debug3 is enabled */ inet_ntop(req->af, req->ip_src, ip_src_str, INET6_ADDRSTRLEN); inet_ntop(req->af, req->ip_dst, ip_dst_str, INET6_ADDRSTRLEN); debug3("network_callerid checking %s:%u => %s:%u", ip_src_str, req->port_src, ip_dst_str, req->port_dst); /* My remote is the other's source */ memcpy((void*)&conn.ip_dst, (void*)&req->ip_src, 16); memcpy((void*)&conn.ip_src, (void*)&req->ip_dst, 16); conn.port_src = req->port_dst; conn.port_dst = req->port_src; conn.af = req->af; /* Find the job id */ rc = _callerid_find_job(conn, &job_id); if (rc == SLURM_SUCCESS) { /* We found the job */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { /* Requestor is not root or SlurmUser */ job_uid = _get_job_uid(job_id); if (job_uid != req_uid) { /* RPC call sent by non-root user who does not * own this job. Do not send them the job ID. */ error("Security violation, REQUEST_NETWORK_CALLERID from uid=%d", req_uid); job_id = NO_VAL; rc = ESLURM_INVALID_JOB_ID; } } } resp->job_id = job_id; resp->node_name = xstrdup(conf->node_name); resp_msg.msg_type = RESPONSE_NETWORK_CALLERID; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_network_callerid_resp(resp); return rc; } static int _rpc_list_pids(slurm_msg_t *msg) { job_step_id_msg_t *req = (job_step_id_msg_t *)msg->data; slurm_msg_t resp_msg; job_step_pids_t *resp = NULL; int fd; uid_t req_uid; uid_t job_uid; uint16_t protocol_version = 0; debug3("Entering _rpc_list_pids"); /* step completion messages are only allowed from other slurmstepd, * so only root or SlurmUser is allowed here */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); job_uid = _get_job_uid(req->job_id); if ((int)job_uid < 0) { error("stat_pid for invalid job_id: %u", req->job_id); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return ESLURM_INVALID_JOB_ID; } /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != job_uid) && (!_slurm_authorized_user(req_uid))) { error("stat_pid from uid %ld for job %u " "owned by uid %ld", (long) req_uid, req->job_id, (long) job_uid); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return ESLURM_USER_ID_MISSING;/* or bad in this case */ } } resp = xmalloc(sizeof(job_step_pids_t)); slurm_msg_t_copy(&resp_msg, msg); resp->node_name = xstrdup(conf->node_name); resp->pid_cnt = 0; resp->pid = NULL; fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->step_id, &protocol_version); if (fd == -1) { error("stepd_connect to %u.%u failed: %m", req->job_id, req->step_id); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); slurm_free_job_step_pids(resp); return ESLURM_INVALID_JOB_ID; } if (stepd_list_pids(fd, protocol_version, &resp->pid, &resp->pid_cnt) == SLURM_ERROR) { debug("No pids for nonexistent job %u.%u requested", req->job_id, req->step_id); } close(fd); resp_msg.msg_type = RESPONSE_JOB_STEP_PIDS; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_job_step_pids(resp); return SLURM_SUCCESS; } /* * For the specified job_id: reply to slurmctld, * sleep(configured kill_wait), then send SIGKILL */ static void _rpc_timelimit(slurm_msg_t *msg) { uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); kill_job_msg_t *req = msg->data; int nsteps, rc; if (!_slurm_authorized_user(uid)) { error ("Security violation: rpc_timelimit req from uid %d", uid); slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } /* * Indicate to slurmctld that we've received the message */ slurm_send_rc_msg(msg, SLURM_SUCCESS); slurm_close(msg->conn_fd); msg->conn_fd = -1; if (req->step_id != NO_VAL) { slurm_ctl_conf_t *cf; int delay; /* A jobstep has timed out: * - send the container a SIG_TIME_LIMIT or SIG_PREEMPTED * to log the event * - send a SIGCONT to resume any suspended tasks * - send a SIGTERM to begin termination * - sleep KILL_WAIT * - send a SIGKILL to clean up */ if (msg->msg_type == REQUEST_KILL_TIMELIMIT) { rc = _signal_jobstep(req->job_id, req->step_id, uid, SIG_TIME_LIMIT); } else { rc = _signal_jobstep(req->job_id, req->step_id, uid, SIG_PREEMPTED); } if (rc != SLURM_SUCCESS) return; rc = _signal_jobstep(req->job_id, req->step_id, uid, SIGCONT); if (rc != SLURM_SUCCESS) return; rc = _signal_jobstep(req->job_id, req->step_id, uid, SIGTERM); if (rc != SLURM_SUCCESS) return; cf = slurm_conf_lock(); delay = MAX(cf->kill_wait, 5); slurm_conf_unlock(); sleep(delay); _signal_jobstep(req->job_id, req->step_id, uid, SIGKILL); return; } if (msg->msg_type == REQUEST_KILL_TIMELIMIT) _kill_all_active_steps(req->job_id, SIG_TIME_LIMIT, true); else /* (msg->type == REQUEST_KILL_PREEMPTED) */ _kill_all_active_steps(req->job_id, SIG_PREEMPTED, true); nsteps = _kill_all_active_steps(req->job_id, SIGTERM, false); verbose( "Job %u: timeout: sent SIGTERM to %d active steps", req->job_id, nsteps ); /* Revoke credential, send SIGKILL, run epilog, etc. */ _rpc_terminate_job(msg); } static void _rpc_pid2jid(slurm_msg_t *msg) { job_id_request_msg_t *req = (job_id_request_msg_t *) msg->data; slurm_msg_t resp_msg; job_id_response_msg_t resp; bool found = false; List steps; ListIterator i; step_loc_t *stepd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { int fd; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; if (stepd_pid_in_container( fd, stepd->protocol_version, req->job_pid) || req->job_pid == stepd_daemon_pid( fd, stepd->protocol_version)) { slurm_msg_t_copy(&resp_msg, msg); resp.job_id = stepd->jobid; resp.return_code = SLURM_SUCCESS; found = true; close(fd); break; } close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (found) { debug3("_rpc_pid2jid: pid(%u) found in %u", req->job_pid, resp.job_id); resp_msg.address = msg->address; resp_msg.msg_type = RESPONSE_JOB_ID; resp_msg.data = &resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); } else { debug3("_rpc_pid2jid: pid(%u) not found", req->job_pid); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); } } /* Validate sbcast credential. * NOTE: We can only perform the full credential validation once with * Munge without generating a credential replay error * RET SLURM_SUCCESS or an error code */ static int _valid_sbcast_cred(file_bcast_msg_t *req, uid_t req_uid, uint16_t block_no, uint32_t *job_id) { int rc = SLURM_SUCCESS; char *nodes = NULL; hostset_t hset = NULL; *job_id = NO_VAL; rc = extract_sbcast_cred(conf->vctx, req->cred, block_no, job_id, &nodes); if (rc != 0) { error("Security violation: Invalid sbcast_cred from uid %d", req_uid); return ESLURMD_INVALID_JOB_CREDENTIAL; } if (!(hset = hostset_create(nodes))) { error("Unable to parse sbcast_cred hostlist %s", nodes); rc = ESLURMD_INVALID_JOB_CREDENTIAL; } else if (!hostset_within(hset, conf->node_name)) { error("Security violation: sbcast_cred from %d has " "bad hostset %s", req_uid, nodes); rc = ESLURMD_INVALID_JOB_CREDENTIAL; } if (hset) hostset_destroy(hset); xfree(nodes); /* print_sbcast_cred(req->cred); */ return rc; } static void _fb_rdlock(void) { slurm_mutex_lock(&file_bcast_mutex); while (1) { if ((fb_write_wait_lock == 0) && (fb_write_lock == 0)) { fb_read_lock++; break; } else { /* wait for state change and retry */ pthread_cond_wait(&file_bcast_cond, &file_bcast_mutex); } } slurm_mutex_unlock(&file_bcast_mutex); } static void _fb_rdunlock(void) { slurm_mutex_lock(&file_bcast_mutex); fb_read_lock--; pthread_cond_broadcast(&file_bcast_cond); slurm_mutex_unlock(&file_bcast_mutex); } static void _fb_wrlock(void) { slurm_mutex_lock(&file_bcast_mutex); fb_write_wait_lock++; while (1) { if ((fb_read_lock == 0) && (fb_write_lock == 0)) { fb_write_lock++; fb_write_wait_lock--; break; } else { /* wait for state change and retry */ pthread_cond_wait(&file_bcast_cond, &file_bcast_mutex); } } slurm_mutex_unlock(&file_bcast_mutex); } static void _fb_wrunlock(void) { slurm_mutex_lock(&file_bcast_mutex); fb_write_lock--; pthread_cond_broadcast(&file_bcast_cond); slurm_mutex_unlock(&file_bcast_mutex); } static int _bcast_find_in_list(void *x, void *y) { file_bcast_info_t *info = (file_bcast_info_t *)x; file_bcast_info_t *key = (file_bcast_info_t *)y; /* uid, job_id, and fname must match */ return ((info->uid == key->uid) && (info->job_id == key->job_id) && (!xstrcmp(info->fname, key->fname))); } /* must have read lock */ static file_bcast_info_t *_bcast_lookup_file(file_bcast_info_t *key) { return list_find_first(file_bcast_list, _bcast_find_in_list, key); } /* must not have read lock, will get write lock */ static void _file_bcast_close_file(file_bcast_info_t *key) { _fb_wrlock(); list_delete_all(file_bcast_list, _bcast_find_in_list, key); _fb_wrunlock(); } static void _free_file_bcast_info_t(file_bcast_info_t *f) { xfree(f->fname); if (f->fd) close(f->fd); xfree(f); } static int _bcast_find_in_list_to_remove(void *x, void *y) { file_bcast_info_t *f = (file_bcast_info_t *)x; time_t *now = (time_t *) y; if (f->last_update + FILE_BCAST_TIMEOUT < *now) { error("Removing stalled file_bcast transfer from uid " "%u to file `%s`", f->uid, f->fname); return true; } return false; } /* remove transfers that have stalled */ static void _file_bcast_cleanup(void) { time_t now = time(NULL); _fb_wrlock(); list_delete_all(file_bcast_list, _bcast_find_in_list_to_remove, &now); _fb_wrunlock(); } void file_bcast_init(void) { /* skip locks during slurmd init */ file_bcast_list = list_create((ListDelF) _free_file_bcast_info_t); } void file_bcast_purge(void) { _fb_wrlock(); list_destroy(file_bcast_list); /* destroying list before exit, no need to unlock */ } static int _rpc_file_bcast(slurm_msg_t *msg) { int rc, offset, inx; file_bcast_info_t *file_info; file_bcast_msg_t *req = msg->data; file_bcast_info_t key; key.uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); key.gid = g_slurm_auth_get_gid(msg->auth_cred, conf->auth_info); key.fname = req->fname; rc = _valid_sbcast_cred(req, key.uid, req->block_no, &key.job_id); if ((rc != SLURM_SUCCESS) && !_slurm_authorized_user(key.uid)) return rc; #if 0 info("last_block=%u force=%u modes=%o", req->last_block, req->force, req->modes); info("uid=%u gid=%u atime=%lu mtime=%lu block_len[0]=%u", req->uid, req->gid, req->atime, req->mtime, req->block_len); #if 0 /* when the file being transferred is binary, the following line * can break the terminal output for slurmd */ info("req->block[0]=%s, @ %lu", \ req->block[0], (unsigned long) &req->block); #endif #endif if (req->block_no == 1) { info("sbcast req_uid=%u job_id=%u fname=%s block_no=%u", key.uid, key.job_id, key.fname, req->block_no); } else { debug("sbcast req_uid=%u job_id=%u fname=%s block_no=%u", key.uid, key.job_id, key.fname, req->block_no); } /* first block must register the file and open fd/mmap */ if (req->block_no == 1) { if ((rc = _file_bcast_register_file(msg, &key))) return rc; } _fb_rdlock(); if (!(file_info = _bcast_lookup_file(&key))) { error("No registered file transfer for uid %u file `%s`.", key.uid, key.fname); _fb_rdunlock(); return SLURM_ERROR; } /* now decompress file */ if (bcast_decompress_data(req) < 0) { error("sbcast: data decompression error for UID %u, file %s", key.uid, key.fname); _fb_rdunlock(); return SLURM_FAILURE; } offset = 0; while (req->block_len - offset) { inx = write(file_info->fd, &req->block[offset], (req->block_len - offset)); if (inx == -1) { if ((errno == EINTR) || (errno == EAGAIN)) continue; error("sbcast: uid:%u can't write `%s`: %m", key.uid, key.fname); _fb_rdunlock(); return SLURM_FAILURE; } offset += inx; } file_info->last_update = time(NULL); if (req->last_block && fchmod(file_info->fd, (req->modes & 0777))) { error("sbcast: uid:%u can't chmod `%s`: %m", key.uid, key.fname); } if (req->last_block && fchown(file_info->fd, key.uid, key.gid)) { error("sbcast: uid:%u gid:%u can't chown `%s`: %m", key.uid, key.gid, key.fname); } if (req->last_block && req->atime) { struct utimbuf time_buf; time_buf.actime = req->atime; time_buf.modtime = req->mtime; if (utime(key.fname, &time_buf)) { error("sbcast: uid:%u can't utime `%s`: %m", key.uid, key.fname); } } _fb_rdunlock(); if (req->last_block) { _file_bcast_close_file(&key); } return SLURM_SUCCESS; } /* pass an open file descriptor back to the parent process */ static void _send_back_fd(int socket, int fd) { struct msghdr msg = { 0 }; struct cmsghdr *cmsg; char buf[CMSG_SPACE(sizeof(fd))]; memset(buf, '\0', sizeof(buf)); msg.msg_iov = NULL; msg.msg_iovlen = 0; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(fd)); memmove(CMSG_DATA(cmsg), &fd, sizeof(fd)); msg.msg_controllen = cmsg->cmsg_len; if (sendmsg(socket, &msg, 0) < 0) error("%s: failed to send fd: %m", __func__); } /* receive an open file descriptor from fork()'d child over unix socket */ static int _receive_fd(int socket) { struct msghdr msg = {0}; struct cmsghdr *cmsg; int fd; msg.msg_iov = NULL; msg.msg_iovlen = 0; char c_buffer[256]; msg.msg_control = c_buffer; msg.msg_controllen = sizeof(c_buffer); if (recvmsg(socket, &msg, 0) < 0) { error("%s: failed to receive fd: %m", __func__); return -1; } cmsg = CMSG_FIRSTHDR(&msg); memmove(&fd, CMSG_DATA(cmsg), sizeof(fd)); return fd; } static int _file_bcast_register_file(slurm_msg_t *msg, file_bcast_info_t *key) { file_bcast_msg_t *req = msg->data; int fd, flags, rc; int pipe[2]; gids_t *gids; pid_t child; file_bcast_info_t *file_info; if (!(gids = _gids_cache_lookup(req->user_name, key->gid))) { error("sbcast: gids_cache_lookup for %s failed", req->user_name); return SLURM_ERROR; } if ((rc = container_g_create(key->job_id))) { error("sbcast: container_g_create(%u): %m", key->job_id); _dealloc_gids(gids); return rc; } /* child process will setuid to the user, register the process * with the container, and open the file for us. */ if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pipe) != 0) { error("%s: Failed to open pipe: %m", __func__); _dealloc_gids(gids); return SLURM_ERROR; } child = fork(); if (child == -1) { error("sbcast: fork failure"); _dealloc_gids(gids); close(pipe[0]); close(pipe[1]); return errno; } else if (child > 0) { /* get fd back from pipe */ close(pipe[0]); waitpid(child, &rc, 0); _dealloc_gids(gids); if (rc) { close(pipe[1]); return WEXITSTATUS(rc); } fd = _receive_fd(pipe[1]); close(pipe[1]); file_info = xmalloc(sizeof(file_bcast_info_t)); file_info->fd = fd; file_info->fname = xstrdup(req->fname); file_info->uid = key->uid; file_info->gid = key->gid; file_info->job_id = key->job_id; file_info->start_time = time(NULL); //TODO: mmap the file here _fb_wrlock(); list_append(file_bcast_list, file_info); _fb_wrunlock(); return SLURM_SUCCESS; } /* child process below here */ close(pipe[1]); /* container_g_add_pid needs to be called in the forked process part of the fork to avoid a race condition where if this process makes a file or detacts itself from a child before we add the pid to the container in the parent of the fork. */ if (container_g_add_pid(key->job_id, getpid(), key->uid)) { error("container_g_add_pid(%u): %m", key->job_id); exit(SLURM_ERROR); } /* The child actually performs the I/O and exits with * a return code, do not return! */ /*********************************************************************\ * NOTE: It would be best to do an exec() immediately after the fork() * in order to help prevent a possible deadlock in the child process * due to locks being set at the time of the fork and being freed by * the parent process, but not freed by the child process. Performing * the work inline is done for simplicity. Note that the logging * performed by error() should be safe due to the use of * atfork_install_handlers() as defined in src/common/log.c. * Change the code below with caution. \*********************************************************************/ if (setgroups(gids->ngids, gids->gids) < 0) { error("sbcast: uid: %u setgroups failed: %m", key->uid); exit(errno); } _dealloc_gids(gids); if (setgid(key->gid) < 0) { error("sbcast: uid:%u setgid(%u): %m", key->uid, key->gid); exit(errno); } if (setuid(key->uid) < 0) { error("sbcast: getuid(%u): %m", key->uid); exit(errno); } flags = O_WRONLY | O_CREAT; if (req->force) flags |= O_TRUNC; else flags |= O_EXCL; fd = open(key->fname, flags, 0700); if (fd == -1) { error("sbcast: uid:%u can't open `%s`: %m", key->uid, key->fname); exit(errno); } _send_back_fd(pipe[0], fd); close(fd); exit(SLURM_SUCCESS); } static void _rpc_reattach_tasks(slurm_msg_t *msg) { reattach_tasks_request_msg_t *req = msg->data; reattach_tasks_response_msg_t *resp = xmalloc(sizeof(reattach_tasks_response_msg_t)); slurm_msg_t resp_msg; int rc = SLURM_SUCCESS; uint16_t port = 0; char host[MAXHOSTNAMELEN]; slurm_addr_t ioaddr; void *job_cred_sig; uint32_t len; int fd; uid_t req_uid; slurm_addr_t *cli = &msg->orig_addr; uint32_t nodeid = (uint32_t)NO_VAL; uid_t uid = -1; uint16_t protocol_version; slurm_msg_t_copy(&resp_msg, msg); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { debug("reattach for nonexistent job %u.%u stepd_connect" " failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("_rpc_reattach_tasks couldn't read from the " "step %u.%u: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done2; } nodeid = stepd_get_nodeid(fd, protocol_version); debug2("_rpc_reattach_tasks: nodeid %d in the job step", nodeid); req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { error("uid %ld attempt to attach to job %u.%u owned by %ld", (long) req_uid, req->job_id, req->job_step_id, (long) uid); rc = EPERM; goto done2; } memset(resp, 0, sizeof(reattach_tasks_response_msg_t)); slurm_get_ip_str(cli, &port, host, sizeof(host)); /* * Set response address by resp_port and client address */ memcpy(&resp_msg.address, cli, sizeof(slurm_addr_t)); if (req->num_resp_port > 0) { port = req->resp_port[nodeid % req->num_resp_port]; slurm_set_addr(&resp_msg.address, port, NULL); } /* * Set IO address by io_port and client address */ memcpy(&ioaddr, cli, sizeof(slurm_addr_t)); if (req->num_io_port > 0) { port = req->io_port[nodeid % req->num_io_port]; slurm_set_addr(&ioaddr, port, NULL); } /* * Get the signature of the job credential. slurmstepd will need * this to prove its identity when it connects back to srun. */ slurm_cred_get_signature(req->cred, (char **)(&job_cred_sig), &len); if (len != SLURM_IO_KEY_SIZE) { error("Incorrect slurm cred signature length"); goto done2; } resp->gtids = NULL; resp->local_pids = NULL; /* NOTE: We need to use the protocol_version from * sattach here since responses will be sent back to it. */ if (msg->protocol_version < protocol_version) protocol_version = msg->protocol_version; /* Following call fills in gtids and local_pids when successful. */ rc = stepd_attach(fd, protocol_version, &ioaddr, &resp_msg.address, job_cred_sig, resp); if (rc != SLURM_SUCCESS) { debug2("stepd_attach call failed"); goto done2; } done2: close(fd); done: debug2("update step addrs rc = %d", rc); resp_msg.data = resp; resp_msg.msg_type = RESPONSE_REATTACH_TASKS; resp->node_name = xstrdup(conf->node_name); resp->return_code = rc; debug2("node %s sending rc = %d", conf->node_name, rc); slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_reattach_tasks_response_msg(resp); } static uid_t _get_job_uid(uint32_t jobid) { List steps; ListIterator i; step_loc_t *stepd; uid_t uid = -1; int fd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != jobid) { /* multiple jobs expected on shared nodes */ continue; } fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } uid = stepd_get_uid(fd, stepd->protocol_version); close(fd); if ((int)uid < 0) { debug("stepd_get_uid failed %u.%u: %m", stepd->jobid, stepd->stepid); continue; } break; } list_iterator_destroy(i); FREE_NULL_LIST(steps); return uid; } /* * _kill_all_active_steps - signals the container of all steps of a job * jobid IN - id of job to signal * sig IN - signal to send * batch IN - if true signal batch script, otherwise skip it * RET count of signaled job steps (plus batch script, if applicable) */ static int _kill_all_active_steps(uint32_t jobid, int sig, bool batch) { List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; int fd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != jobid) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u (this jobid=%u)", stepd->jobid, jobid); continue; } if ((stepd->stepid == SLURM_BATCH_SCRIPT) && (!batch)) continue; step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } debug2("container signal %d to job %u.%u", sig, jobid, stepd->stepid); if (stepd_signal_container( fd, stepd->protocol_version, sig) < 0) debug("kill jobid=%u failed: %m", jobid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (step_cnt == 0) debug2("No steps in jobid %u to send signal %d", jobid, sig); return step_cnt; } /* * _terminate_all_steps - signals the container of all steps of a job * jobid IN - id of job to signal * batch IN - if true signal batch script, otherwise skip it * RET count of signaled job steps (plus batch script, if applicable) */ static int _terminate_all_steps(uint32_t jobid, bool batch) { List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; int fd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != jobid) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u (this jobid=%u)", stepd->jobid, jobid); continue; } if ((stepd->stepid == SLURM_BATCH_SCRIPT) && (!batch)) continue; step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } debug2("terminate job step %u.%u", jobid, stepd->stepid); if (stepd_terminate(fd, stepd->protocol_version) < 0) debug("kill jobid=%u.%u failed: %m", jobid, stepd->stepid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (step_cnt == 0) debug2("No steps in job %u to terminate", jobid); return step_cnt; } static bool _job_still_running(uint32_t job_id) { bool retval = false; List steps; ListIterator i; step_loc_t *s = NULL; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((s = list_next(i))) { if (s->jobid == job_id) { int fd; fd = stepd_connect(s->directory, s->nodename, s->jobid, s->stepid, &s->protocol_version); if (fd == -1) continue; if (stepd_state(fd, s->protocol_version) != SLURMSTEPD_NOT_RUNNING) { retval = true; close(fd); break; } close(fd); } } list_iterator_destroy(i); FREE_NULL_LIST(steps); return retval; } /* * Wait until all job steps are in SLURMSTEPD_NOT_RUNNING state. * This indicates that switch_g_job_postfini has completed and * freed the switch windows (as needed only for Federation switch). */ static void _wait_state_completed(uint32_t jobid, int max_delay) { int i; for (i=0; i<max_delay; i++) { if (_steps_completed_now(jobid)) break; sleep(1); } if (i >= max_delay) error("timed out waiting for job %u to complete", jobid); } static bool _steps_completed_now(uint32_t jobid) { List steps; ListIterator i; step_loc_t *stepd; bool rc = true; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid == jobid) { int fd; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; if (stepd_state(fd, stepd->protocol_version) != SLURMSTEPD_NOT_RUNNING) { rc = false; close(fd); break; } close(fd); } } list_iterator_destroy(i); FREE_NULL_LIST(steps); return rc; } static void _epilog_complete_msg_setup( slurm_msg_t *msg, epilog_complete_msg_t *req, uint32_t jobid, int rc) { slurm_msg_t_init(msg); memset(req, 0, sizeof(epilog_complete_msg_t)); req->job_id = jobid; req->return_code = rc; req->node_name = conf->node_name; msg->msg_type = MESSAGE_EPILOG_COMPLETE; msg->data = req; } /* * Send epilog complete message to currently active controller. * If enabled, use message aggregation. * Returns SLURM_SUCCESS if message sent successfully, * SLURM_FAILURE if epilog complete message fails to be sent. */ static int _epilog_complete(uint32_t jobid, int rc) { int ret = SLURM_SUCCESS; if (conf->msg_aggr_window_msgs > 1) { /* message aggregation is enabled */ slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t)); epilog_complete_msg_t *req = xmalloc(sizeof(epilog_complete_msg_t)); _epilog_complete_msg_setup(msg, req, jobid, rc); /* we need to copy this symbol */ req->node_name = xstrdup(conf->node_name); msg_aggr_add_msg(msg, 0, NULL); } else { slurm_msg_t msg; epilog_complete_msg_t req; _epilog_complete_msg_setup(&msg, &req, jobid, rc); /* Note: No return code to message, slurmctld will resend * TERMINATE_JOB request if message send fails */ if (slurm_send_only_controller_msg(&msg) < 0) { error("Unable to send epilog complete message: %m"); ret = SLURM_ERROR; } else { debug("Job %u: sent epilog complete msg: rc = %d", jobid, rc); } } return ret; } /* * Send a signal through the appropriate slurmstepds for each job step * belonging to a given job allocation. */ static void _rpc_signal_job(slurm_msg_t *msg) { signal_job_msg_t *req = msg->data; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); uid_t job_uid; List steps; ListIterator i; step_loc_t *stepd = NULL; int step_cnt = 0; int fd; debug("_rpc_signal_job, uid = %d, signal = %d", req_uid, req->signal); job_uid = _get_job_uid(req->job_id); if ((int)job_uid < 0) goto no_job; /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != job_uid) && (!_slurm_authorized_user(req_uid))) { error("Security violation: kill_job(%u) from uid %d", req->job_id, req_uid); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); if (slurm_close(msg->conn_fd) < 0) error ("_rpc_signal_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } return; } /* * Loop through all job steps for this job and signal the * step's process group through the slurmstepd. */ steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != req->job_id) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u (this jobid=%u)", stepd->jobid, req->job_id); continue; } if (stepd->stepid == SLURM_BATCH_SCRIPT) { debug2("batch script itself not signalled"); continue; } step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } debug2(" signal %d to job %u.%u", req->signal, stepd->jobid, stepd->stepid); if (stepd_signal_container( fd, stepd->protocol_version, req->signal) < 0) debug("signal jobid=%u failed: %m", stepd->jobid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); no_job: if (step_cnt == 0) { debug2("No steps in jobid %u to send signal %d", req->job_id, req->signal); } /* * At this point, if connection still open, we send controller * a "success" reply to indicate that we've recvd the msg. */ if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ("_rpc_signal_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } } /* if a lock is granted to the job then return 1; else return 0 if * the lock for the job is already taken or there's no more locks */ static int _get_suspend_job_lock(uint32_t job_id) { static bool logged = false; int i, empty_loc = -1, rc = 0; slurm_mutex_lock(&suspend_mutex); for (i = 0; i < job_suspend_size; i++) { if (job_suspend_array[i] == 0) { empty_loc = i; continue; } if (job_suspend_array[i] == job_id) { /* another thread already a lock for this job ID */ slurm_mutex_unlock(&suspend_mutex); return rc; } } if (empty_loc != -1) { /* nobody has the lock and here's an available used lock */ job_suspend_array[empty_loc] = job_id; rc = 1; } else if (job_suspend_size < NUM_PARALLEL_SUSP_JOBS) { /* a new lock is available */ job_suspend_array[job_suspend_size++] = job_id; rc = 1; } else if (!logged) { error("Simultaneous job suspend/resume limit reached (%d). " "Configure SchedulerTimeSlice higher.", NUM_PARALLEL_SUSP_JOBS); logged = true; } slurm_mutex_unlock(&suspend_mutex); return rc; } static void _unlock_suspend_job(uint32_t job_id) { int i; slurm_mutex_lock(&suspend_mutex); for (i = 0; i < job_suspend_size; i++) { if (job_suspend_array[i] == job_id) job_suspend_array[i] = 0; } slurm_mutex_unlock(&suspend_mutex); } /* Add record for every launched job so we know they are ready for suspend */ extern void record_launched_jobs(void) { List steps; ListIterator i; step_loc_t *stepd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { _launch_complete_add(stepd->jobid); } list_iterator_destroy(i); FREE_NULL_LIST(steps); } /* * Send a job suspend/resume request through the appropriate slurmstepds for * each job step belonging to a given job allocation. */ static void _rpc_suspend_job(slurm_msg_t *msg) { int time_slice = -1; suspend_int_msg_t *req = msg->data; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; int rc = SLURM_SUCCESS; DEF_TIMERS; if (time_slice == -1) time_slice = slurm_get_time_slice(); if ((req->op != SUSPEND_JOB) && (req->op != RESUME_JOB)) { error("REQUEST_SUSPEND_INT: bad op code %u", req->op); rc = ESLURM_NOT_SUPPORTED; } /* * check that requesting user ID is the SLURM UID or root */ if (!_slurm_authorized_user(req_uid)) { error("Security violation: suspend_job(%u) from uid %d", req->job_id, req_uid); rc = ESLURM_USER_ID_MISSING; } /* send a response now, which will include any errors * detected with the request */ if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, rc); if (slurm_close(msg->conn_fd) < 0) error("_rpc_suspend_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } if (rc != SLURM_SUCCESS) return; /* now we can focus on performing the requested action, * which could take a few seconds to complete */ debug("_rpc_suspend_job jobid=%u uid=%d action=%s", req->job_id, req_uid, req->op == SUSPEND_JOB ? "suspend" : "resume"); /* Try to get a thread lock for this job. If the lock * is not available then sleep and try again */ while (!_get_suspend_job_lock(req->job_id)) { debug3("suspend lock sleep for %u", req->job_id); usleep(10000); } START_TIMER; /* Defer suspend until job prolog and launch complete */ if (req->op == SUSPEND_JOB) _launch_complete_wait(req->job_id); if ((req->op == SUSPEND_JOB) && (req->indf_susp)) switch_g_job_suspend(req->switch_info, 5); /* Release or reclaim resources bound to these tasks (task affinity) */ if (req->op == SUSPEND_JOB) { (void) task_g_slurmd_suspend_job(req->job_id); } else { (void) task_g_slurmd_resume_job(req->job_id); } /* * Loop through all job steps and call stepd_suspend or stepd_resume * as appropriate. Since the "suspend" action may contains a sleep * (if the launch is in progress) suspend multiple jobsteps in parallel. */ steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while (1) { int x, fdi, fd[NUM_PARALLEL_SUSP_STEPS]; uint16_t protocol_version[NUM_PARALLEL_SUSP_STEPS]; fdi = 0; while ((stepd = list_next(i))) { if (stepd->jobid != req->job_id) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u " "(this jobid=%u)", stepd->jobid, req->job_id); continue; } step_cnt++; fd[fdi] = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &protocol_version[fdi]); if (fd[fdi] == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } fdi++; if (fdi >= NUM_PARALLEL_SUSP_STEPS) break; } /* check for open connections */ if (fdi == 0) break; if (req->op == SUSPEND_JOB) { int susp_fail_count = 0; /* The suspend RPCs are processed in parallel for * every step in the job */ for (x = 0; x < fdi; x++) { (void) stepd_suspend(fd[x], protocol_version[x], req, 0); } for (x = 0; x < fdi; x++) { if (stepd_suspend(fd[x], protocol_version[x], req, 1) < 0) { susp_fail_count++; } else { close(fd[x]); fd[x] = -1; } } /* Suspend RPCs can fail at step startup, so retry */ if (susp_fail_count) { sleep(1); for (x = 0; x < fdi; x++) { if (fd[x] == -1) continue; (void) stepd_suspend( fd[x], protocol_version[x], req, 0); if (stepd_suspend( fd[x], protocol_version[x], req, 1) >= 0) continue; debug("Suspend of job %u failed: %m", req->job_id); } } } else { /* The resume RPCs are processed in parallel for * every step in the job */ for (x = 0; x < fdi; x++) { (void) stepd_resume(fd[x], protocol_version[x], req, 0); } for (x = 0; x < fdi; x++) { if (stepd_resume(fd[x], protocol_version[x], req, 1) < 0) { debug("Resume of job %u failed: %m", req->job_id); } } } for (x = 0; x < fdi; x++) { /* fd may have been closed by stepd_suspend */ if (fd[x] != -1) close(fd[x]); } /* check for no more jobs */ if (fdi < NUM_PARALLEL_SUSP_STEPS) break; } list_iterator_destroy(i); FREE_NULL_LIST(steps); if ((req->op == RESUME_JOB) && (req->indf_susp)) switch_g_job_resume(req->switch_info, 5); _unlock_suspend_job(req->job_id); END_TIMER; if (DELTA_TIMER >= (time_slice * 1000000)) { if (req->op == SUSPEND_JOB) { info("Suspend time for job_id %u was %s. " "Configure SchedulerTimeSlice higher.", req->job_id, TIME_STR); } else { info("Resume time for job_id %u was %s. " "Configure SchedulerTimeSlice higher.", req->job_id, TIME_STR); } } if (step_cnt == 0) { debug2("No steps in jobid %u to suspend/resume", req->job_id); } } /* Job shouldn't even be running here, abort it immediately */ static void _rpc_abort_job(slurm_msg_t *msg) { kill_job_msg_t *req = msg->data; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); job_env_t job_env; debug("_rpc_abort_job, uid = %d", uid); /* * check that requesting user ID is the SLURM UID */ if (!_slurm_authorized_user(uid)) { error("Security violation: abort_job(%u) from uid %d", req->job_id, uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } task_g_slurmd_release_resources(req->job_id); /* * "revoke" all future credentials for this jobid */ if (slurm_cred_revoke(conf->vctx, req->job_id, req->time, req->start_time) < 0) { debug("revoking cred for job %u: %m", req->job_id); } else { save_cred_state(conf->vctx); debug("credential for job %u revoked", req->job_id); } /* * At this point, if connection still open, we send controller * a "success" reply to indicate that we've recvd the msg. */ if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ("rpc_abort_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } if (_kill_all_active_steps(req->job_id, SIG_ABORT, true)) { /* * Block until all user processes are complete. */ _pause_for_job_completion (req->job_id, req->nodes, 0); } /* * Begin expiration period for cached information about job. * If expiration period has already begun, then do not run * the epilog again, as that script has already been executed * for this job. */ if (slurm_cred_begin_expiration(conf->vctx, req->job_id) < 0) { debug("Not running epilog for jobid %d: %m", req->job_id); return; } save_cred_state(conf->vctx); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.node_list = req->nodes; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->job_uid; #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif _run_epilog(&job_env); if (container_g_delete(req->job_id)) error("container_g_delete(%u): %m", req->job_id); _launch_complete_rm(req->job_id); xfree(job_env.resv_id); } /* This is a variant of _rpc_terminate_job for use with select/serial */ static void _rpc_terminate_batch_job(uint32_t job_id, uint32_t user_id, char *node_name) { int rc = SLURM_SUCCESS; int nsteps = 0; int delay; time_t now = time(NULL); slurm_ctl_conf_t *cf; job_env_t job_env; task_g_slurmd_release_resources(job_id); if (_waiter_init(job_id) == SLURM_ERROR) return; /* * "revoke" all future credentials for this jobid */ _note_batch_job_finished(job_id); if (slurm_cred_revoke(conf->vctx, job_id, now, now) < 0) { debug("revoking cred for job %u: %m", job_id); } else { save_cred_state(conf->vctx); debug("credential for job %u revoked", job_id); } /* * Tasks might be stopped (possibly by a debugger) * so send SIGCONT first. */ _kill_all_active_steps(job_id, SIGCONT, true); if (errno == ESLURMD_STEP_SUSPENDED) { /* * If the job step is currently suspended, we don't * bother with a "nice" termination. */ debug2("Job is currently suspended, terminating"); nsteps = _terminate_all_steps(job_id, true); } else { nsteps = _kill_all_active_steps(job_id, SIGTERM, true); } #ifndef HAVE_AIX if ((nsteps == 0) && !conf->epilog) { slurm_cred_begin_expiration(conf->vctx, job_id); save_cred_state(conf->vctx); _waiter_complete(job_id); if (container_g_delete(job_id)) error("container_g_delete(%u): %m", job_id); _launch_complete_rm(job_id); return; } #endif /* * Check for corpses */ cf = slurm_conf_lock(); delay = MAX(cf->kill_wait, 5); slurm_conf_unlock(); if (!_pause_for_job_completion(job_id, NULL, delay) && _terminate_all_steps(job_id, true) ) { /* * Block until all user processes are complete. */ _pause_for_job_completion(job_id, NULL, 0); } /* * Begin expiration period for cached information about job. * If expiration period has already begun, then do not run * the epilog again, as that script has already been executed * for this job. */ if (slurm_cred_begin_expiration(conf->vctx, job_id) < 0) { debug("Not running epilog for jobid %d: %m", job_id); goto done; } save_cred_state(conf->vctx); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = job_id; job_env.node_list = node_name; job_env.uid = (uid_t)user_id; /* NOTE: We lack the job's SPANK environment variables */ rc = _run_epilog(&job_env); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] epilog failed status=%d:%d", job_id, exit_status, term_sig); } else debug("completed epilog for jobid %u", job_id); if (container_g_delete(job_id)) error("container_g_delete(%u): %m", job_id); _launch_complete_rm(job_id); done: _wait_state_completed(job_id, 5); _waiter_complete(job_id); } static void _handle_old_batch_job_launch(slurm_msg_t *msg) { if (msg->msg_type != REQUEST_BATCH_JOB_LAUNCH) { error("_handle_batch_job_launch: " "Invalid response msg_type (%u)", msg->msg_type); return; } /* (resp_msg.msg_type == REQUEST_BATCH_JOB_LAUNCH) */ debug2("Processing RPC: REQUEST_BATCH_JOB_LAUNCH"); last_slurmctld_msg = time(NULL); _rpc_batch_job(msg, false); slurm_free_job_launch_msg(msg->data); msg->data = NULL; } /* This complete batch RPC came from slurmstepd because we have select/serial * configured. Terminate the job here. Forward the batch completion RPC to * slurmctld and possible get a new batch launch RPC in response. */ static void _rpc_complete_batch(slurm_msg_t *msg) { int i, rc, msg_rc; slurm_msg_t resp_msg; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); complete_batch_script_msg_t *req = msg->data; static int running_serial = -1; uint16_t msg_type; if (running_serial == -1) { char *select_type = slurm_get_select_type(); if (!xstrcmp(select_type, "select/serial")) running_serial = 1; else running_serial = 0; xfree(select_type); } if (!_slurm_authorized_user(uid)) { error("Security violation: complete_batch(%u) from uid %d", req->job_id, uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } slurm_send_rc_msg(msg, SLURM_SUCCESS); if (running_serial) { _rpc_terminate_batch_job( req->job_id, req->user_id, req->node_name); msg_type = REQUEST_COMPLETE_BATCH_JOB; } else msg_type = msg->msg_type; for (i = 0; i <= MAX_RETRY; i++) { if (conf->msg_aggr_window_msgs > 1) { slurm_msg_t *req_msg = xmalloc_nz(sizeof(slurm_msg_t)); slurm_msg_t_init(req_msg); req_msg->msg_type = msg_type; req_msg->data = msg->data; msg->data = NULL; msg_aggr_add_msg(req_msg, 1, _handle_old_batch_job_launch); return; } else { slurm_msg_t req_msg; slurm_msg_t_init(&req_msg); req_msg.msg_type = msg_type; req_msg.data = msg->data; msg_rc = slurm_send_recv_controller_msg( &req_msg, &resp_msg); if (msg_rc == SLURM_SUCCESS) break; } info("Retrying job complete RPC for job %u", req->job_id); sleep(RETRY_DELAY); } if (i > MAX_RETRY) { error("Unable to send job complete message: %m"); return; } if (resp_msg.msg_type == RESPONSE_SLURM_RC) { last_slurmctld_msg = time(NULL); rc = ((return_code_msg_t *) resp_msg.data)->return_code; slurm_free_return_code_msg(resp_msg.data); if (rc) { error("complete_batch for job %u: %s", req->job_id, slurm_strerror(rc)); } return; } _handle_old_batch_job_launch(&resp_msg); } static void _rpc_terminate_job(slurm_msg_t *msg) { #ifndef HAVE_AIX bool have_spank = false; #endif int rc = SLURM_SUCCESS; kill_job_msg_t *req = msg->data; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); int nsteps = 0; int delay; // slurm_ctl_conf_t *cf; // struct stat stat_buf; job_env_t job_env; debug("_rpc_terminate_job, uid = %d", uid); /* * check that requesting user ID is the SLURM UID */ if (!_slurm_authorized_user(uid)) { error("Security violation: kill_job(%u) from uid %d", req->job_id, uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } task_g_slurmd_release_resources(req->job_id); /* * Initialize a "waiter" thread for this jobid. If another * thread is already waiting on termination of this job, * _waiter_init() will return SLURM_ERROR. In this case, just * notify slurmctld that we recvd the message successfully, * then exit this thread. */ if (_waiter_init(req->job_id) == SLURM_ERROR) { if (msg->conn_fd >= 0) { /* No matter if the step hasn't started yet or * not just send a success to let the * controller know we got this request. */ slurm_send_rc_msg (msg, SLURM_SUCCESS); } return; } /* * "revoke" all future credentials for this jobid */ if (slurm_cred_revoke(conf->vctx, req->job_id, req->time, req->start_time) < 0) { debug("revoking cred for job %u: %m", req->job_id); } else { save_cred_state(conf->vctx); debug("credential for job %u revoked", req->job_id); } /* * Before signalling steps, if the job has any steps that are still * in the process of fork/exec/check in with slurmd, wait on a condition * var for the start. Otherwise a slow-starting step can miss the * job termination message and run indefinitely. */ if (_step_is_starting(req->job_id, NO_VAL)) { if (msg->conn_fd >= 0) { /* If the step hasn't started yet just send a * success to let the controller know we got * this request. */ debug("sent SUCCESS, waiting for step to start"); slurm_send_rc_msg (msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ( "rpc_kill_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } if (_wait_for_starting_step(req->job_id, NO_VAL)) { /* * There's currently no case in which we enter this * error condition. If there was, it's hard to say * whether to to proceed with the job termination. */ error("Error in _wait_for_starting_step"); } } if (IS_JOB_NODE_FAILED(req)) _kill_all_active_steps(req->job_id, SIG_NODE_FAIL, true); if (IS_JOB_PENDING(req)) _kill_all_active_steps(req->job_id, SIG_REQUEUED, true); else if (IS_JOB_FAILED(req)) _kill_all_active_steps(req->job_id, SIG_FAILURE, true); /* * Tasks might be stopped (possibly by a debugger) * so send SIGCONT first. */ _kill_all_active_steps(req->job_id, SIGCONT, true); if (errno == ESLURMD_STEP_SUSPENDED) { /* * If the job step is currently suspended, we don't * bother with a "nice" termination. */ debug2("Job is currently suspended, terminating"); nsteps = _terminate_all_steps(req->job_id, true); } else { nsteps = _kill_all_active_steps(req->job_id, SIGTERM, true); } #ifndef HAVE_AIX if ((nsteps == 0) && !conf->epilog) { struct stat stat_buf; if (conf->plugstack && (stat(conf->plugstack, &stat_buf) == 0)) have_spank = true; } /* * If there are currently no active job steps and no * configured epilog to run, bypass asynchronous reply and * notify slurmctld that we have already completed this * request. We need to send current switch state on AIX * systems, so this bypass can not be used. */ if ((nsteps == 0) && !conf->epilog && !have_spank) { debug4("sent ALREADY_COMPLETE"); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURMD_KILL_JOB_ALREADY_COMPLETE); } slurm_cred_begin_expiration(conf->vctx, req->job_id); save_cred_state(conf->vctx); _waiter_complete(req->job_id); /* * The controller needs to get MESSAGE_EPILOG_COMPLETE to bring * the job out of "completing" state. Otherwise, the job * could remain "completing" unnecessarily, until the request * to terminate is resent. */ _sync_messages_kill(req); if (msg->conn_fd < 0) { /* The epilog complete message processing on * slurmctld is equivalent to that of a * ESLURMD_KILL_JOB_ALREADY_COMPLETE reply above */ _epilog_complete(req->job_id, rc); } if (container_g_delete(req->job_id)) error("container_g_delete(%u): %m", req->job_id); _launch_complete_rm(req->job_id); return; } #endif /* * At this point, if connection still open, we send controller * a "success" reply to indicate that we've recvd the msg. */ if (msg->conn_fd >= 0) { debug4("sent SUCCESS"); slurm_send_rc_msg(msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ("rpc_kill_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } /* * Check for corpses */ delay = MAX(conf->kill_wait, 5); if ( !_pause_for_job_completion (req->job_id, req->nodes, delay) && _terminate_all_steps(req->job_id, true) ) { /* * Block until all user processes are complete. */ _pause_for_job_completion (req->job_id, req->nodes, 0); } /* * Begin expiration period for cached information about job. * If expiration period has already begun, then do not run * the epilog again, as that script has already been executed * for this job. */ if (slurm_cred_begin_expiration(conf->vctx, req->job_id) < 0) { debug("Not running epilog for jobid %d: %m", req->job_id); goto done; } save_cred_state(conf->vctx); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.node_list = req->nodes; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->job_uid; #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup( req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif rc = _run_epilog(&job_env); xfree(job_env.resv_id); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] epilog failed status=%d:%d", req->job_id, exit_status, term_sig); rc = ESLURMD_EPILOG_FAILED; } else debug("completed epilog for jobid %u", req->job_id); if (container_g_delete(req->job_id)) error("container_g_delete(%u): %m", req->job_id); _launch_complete_rm(req->job_id); done: _wait_state_completed(req->job_id, 5); _waiter_complete(req->job_id); _sync_messages_kill(req); _epilog_complete(req->job_id, rc); } /* On a parallel job, every slurmd may send the EPILOG_COMPLETE * message to the slurmctld at the same time, resulting in lost * messages. We add a delay here to spead out the message traffic * assuming synchronized clocks across the cluster. * Allow 10 msec processing time in slurmctld for each RPC. */ static void _sync_messages_kill(kill_job_msg_t *req) { int host_cnt, host_inx; char *host; hostset_t hosts; int epilog_msg_time; hosts = hostset_create(req->nodes); host_cnt = hostset_count(hosts); if (host_cnt <= 64) goto fini; if (conf->hostname == NULL) goto fini; /* should never happen */ for (host_inx=0; host_inx<host_cnt; host_inx++) { host = hostset_shift(hosts); if (host == NULL) break; if (xstrcmp(host, conf->node_name) == 0) { free(host); break; } free(host); } epilog_msg_time = slurm_get_epilog_msg_time(); _delay_rpc(host_inx, host_cnt, epilog_msg_time); fini: hostset_destroy(hosts); } /* Delay a message based upon the host index, total host count and RPC_TIME. * This logic depends upon synchronized clocks across the cluster. */ static void _delay_rpc(int host_inx, int host_cnt, int usec_per_rpc) { struct timeval tv1; uint32_t cur_time; /* current time in usec (just 9 digits) */ uint32_t tot_time; /* total time expected for all RPCs */ uint32_t offset_time; /* relative time within tot_time */ uint32_t target_time; /* desired time to issue the RPC */ uint32_t delta_time; again: if (gettimeofday(&tv1, NULL)) { usleep(host_inx * usec_per_rpc); return; } cur_time = ((tv1.tv_sec % 1000) * 1000000) + tv1.tv_usec; tot_time = host_cnt * usec_per_rpc; offset_time = cur_time % tot_time; target_time = host_inx * usec_per_rpc; if (target_time < offset_time) delta_time = target_time - offset_time + tot_time; else delta_time = target_time - offset_time; if (usleep(delta_time)) { if (errno == EINVAL) /* usleep for more than 1 sec */ usleep(900000); /* errno == EINTR */ goto again; } } /* * Returns true if "uid" is a "slurm authorized user" - i.e. uid == 0 * or uid == slurm user id at this time. */ static bool _slurm_authorized_user(uid_t uid) { return ((uid == (uid_t) 0) || (uid == conf->slurm_user_id)); } struct waiter { uint32_t jobid; pthread_t thd; }; static struct waiter * _waiter_create(uint32_t jobid) { struct waiter *wp = xmalloc(sizeof(struct waiter)); wp->jobid = jobid; wp->thd = pthread_self(); return wp; } static int _find_waiter(struct waiter *w, uint32_t *jp) { return (w->jobid == *jp); } static void _waiter_destroy(struct waiter *wp) { xfree(wp); } static int _waiter_init (uint32_t jobid) { if (!waiters) waiters = list_create((ListDelF) _waiter_destroy); /* * Exit this thread if another thread is waiting on job */ if (list_find_first (waiters, (ListFindF) _find_waiter, &jobid)) return SLURM_ERROR; else list_append(waiters, _waiter_create(jobid)); return (SLURM_SUCCESS); } static int _waiter_complete (uint32_t jobid) { return (list_delete_all (waiters, (ListFindF) _find_waiter, &jobid)); } /* * Like _wait_for_procs(), but only wait for up to max_time seconds * if max_time == 0, send SIGKILL to tasks repeatedly * * Returns true if all job processes are gone */ static bool _pause_for_job_completion (uint32_t job_id, char *nodes, int max_time) { int sec = 0; int pause = 1; bool rc = false; while ((sec < max_time) || (max_time == 0)) { rc = _job_still_running (job_id); if (!rc) break; if ((max_time == 0) && (sec > 1)) { _terminate_all_steps(job_id, true); } if (sec > 10) { /* Reduce logging frequency about unkillable tasks */ if (max_time) pause = MIN((max_time - sec), 10); else pause = 10; } sleep(pause); sec += pause; } /* * Return true if job is NOT running */ return (!rc); } /* * Does nothing and returns SLURM_SUCCESS (if uid authenticates). * * Timelimit is not currently used in the slurmd or slurmstepd. */ static void _rpc_update_time(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if ((req_uid != conf->slurm_user_id) && (req_uid != 0)) { rc = ESLURM_USER_ID_MISSING; error("Security violation, uid %d can't update time limit", req_uid); goto done; } /* if (shm_update_job_timelimit(req->job_id, req->expiration_time) < 0) { */ /* error("updating lifetime for job %u: %m", req->job_id); */ /* rc = ESLURM_INVALID_JOB_ID; */ /* } else */ /* debug("reset job %u lifetime", req->job_id); */ done: slurm_send_rc_msg(msg, rc); } /* NOTE: call _destroy_env() to free returned value */ static char ** _build_env(job_env_t *job_env) { char **env = xmalloc(sizeof(char *)); bool user_name_set = 0; env[0] = NULL; if (!valid_spank_job_env(job_env->spank_job_env, job_env->spank_job_env_size, job_env->uid)) { /* If SPANK job environment is bad, log it and do not use */ job_env->spank_job_env_size = 0; job_env->spank_job_env = (char **) NULL; } if (job_env->spank_job_env_size) { env_array_merge_spank(&env, (const char **) job_env->spank_job_env); } slurm_mutex_lock(&conf->config_mutex); setenvf(&env, "SLURMD_NODENAME", "%s", conf->node_name); setenvf(&env, "SLURM_CONF", conf->conffile); slurm_mutex_unlock(&conf->config_mutex); setenvf(&env, "SLURM_CLUSTER_NAME", "%s", conf->cluster_name); setenvf(&env, "SLURM_JOB_ID", "%u", job_env->jobid); setenvf(&env, "SLURM_JOB_UID", "%u", job_env->uid); #ifndef HAVE_NATIVE_CRAY /* uid_to_string on a cray is a heavy call, so try to avoid it */ if (!job_env->user_name) { job_env->user_name = uid_to_string(job_env->uid); user_name_set = 1; } #endif setenvf(&env, "SLURM_JOB_USER", "%s", job_env->user_name); if (user_name_set) xfree(job_env->user_name); setenvf(&env, "SLURM_JOBID", "%u", job_env->jobid); setenvf(&env, "SLURM_UID", "%u", job_env->uid); if (job_env->node_list) setenvf(&env, "SLURM_NODELIST", "%s", job_env->node_list); if (job_env->partition) setenvf(&env, "SLURM_JOB_PARTITION", "%s", job_env->partition); if (job_env->resv_id) { #if defined(HAVE_BG) setenvf(&env, "MPIRUN_PARTITION", "%s", job_env->resv_id); # ifdef HAVE_BGP /* Needed for HTC jobs */ setenvf(&env, "SUBMIT_POOL", "%s", job_env->resv_id); # endif #elif defined(HAVE_ALPS_CRAY) setenvf(&env, "BASIL_RESERVATION_ID", "%s", job_env->resv_id); #endif } return env; } static void _destroy_env(char **env) { int i=0; if (env) { for(i=0; env[i]; i++) { xfree(env[i]); } xfree(env); } return; } /* Trigger srun of spank prolog or epilog in slurmstepd */ static int _run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid) { pid_t cpid; int status = 0, timeout; int pfds[2]; if (pipe (pfds) < 0) { error ("_run_spank_job_script: pipe: %m"); return (-1); } fd_set_close_on_exec (pfds[1]); debug ("Calling %s spank %s", conf->stepd_loc, mode); if ((cpid = fork ()) < 0) { error ("executing spank %s: %m", mode); return (-1); } if (cpid == 0) { /* Run slurmstepd spank [prolog|epilog] */ char *argv[4] = { (char *) conf->stepd_loc, "spank", (char *) mode, NULL }; /* container_g_add_pid needs to be called in the forked process part of the fork to avoid a race condition where if this process makes a file or detacts itself from a child before we add the pid to the container in the parent of the fork. */ if (container_g_add_pid(job_id, getpid(), getuid()) != SLURM_SUCCESS) error("container_g_add_pid(%u): %m", job_id); if (dup2 (pfds[0], STDIN_FILENO) < 0) fatal ("dup2: %m"); #ifdef SETPGRP_TWO_ARGS setpgrp(0, 0); #else setpgrp(); #endif if (conf->chos_loc && !access(conf->chos_loc, X_OK)) execve(conf->chos_loc, argv, env); else execve(argv[0], argv, env); error ("execve(%s): %m", argv[0]); exit (127); } close (pfds[0]); if (_send_slurmd_conf_lite (pfds[1], conf) < 0) error ("Failed to send slurmd conf to slurmstepd\n"); close (pfds[1]); timeout = MAX(slurm_get_prolog_timeout(), 120); /* 120 secs in v15.08 */ if (waitpid_timeout (mode, cpid, &status, timeout) < 0) { error ("spank/%s timed out after %u secs", mode, timeout); return (-1); } if (status) error ("spank/%s returned status 0x%04x", mode, status); /* * No longer need SPANK option env vars in environment */ spank_clear_remote_options_env (env); return (status); } static int _run_job_script(const char *name, const char *path, uint32_t jobid, int timeout, char **env, uid_t uid) { struct stat stat_buf; int status = 0, rc; /* * Always run both spank prolog/epilog and real prolog/epilog script, * even if spank plugins fail. (May want to alter this in the future) * If both "script" mechanisms fail, prefer to return the "real" * prolog/epilog status. */ if (conf->plugstack && (stat(conf->plugstack, &stat_buf) == 0)) status = _run_spank_job_script(name, env, jobid, uid); if ((rc = run_script(name, path, jobid, timeout, env, uid))) status = rc; return (status); } #ifdef HAVE_BG /* a slow prolog is expected on bluegene systems */ static int _run_prolog(job_env_t *job_env, slurm_cred_t *cred) { int rc; char *my_prolog; char **my_env; my_env = _build_env(job_env); setenvf(&my_env, "SLURM_STEP_ID", "%u", job_env->step_id); slurm_mutex_lock(&conf->config_mutex); my_prolog = xstrdup(conf->prolog); slurm_mutex_unlock(&conf->config_mutex); rc = _run_job_script("prolog", my_prolog, job_env->jobid, -1, my_env, job_env->uid); _remove_job_running_prolog(job_env->jobid); xfree(my_prolog); _destroy_env(my_env); return rc; } #else static void *_prolog_timer(void *x) { int delay_time, rc = SLURM_SUCCESS; struct timespec ts; struct timeval now; slurm_msg_t msg; job_notify_msg_t notify_req; char srun_msg[128]; timer_struct_t *timer_struct = (timer_struct_t *) x; delay_time = MAX(2, (timer_struct->msg_timeout - 2)); gettimeofday(&now, NULL); ts.tv_sec = now.tv_sec + delay_time; ts.tv_nsec = now.tv_usec * 1000; slurm_mutex_lock(timer_struct->timer_mutex); if (!timer_struct->prolog_fini) { rc = pthread_cond_timedwait(timer_struct->timer_cond, timer_struct->timer_mutex, &ts); } slurm_mutex_unlock(timer_struct->timer_mutex); if (rc != ETIMEDOUT) return NULL; slurm_msg_t_init(&msg); snprintf(srun_msg, sizeof(srun_msg), "Prolog hung on node %s", conf->node_name); notify_req.job_id = timer_struct->job_id; notify_req.job_step_id = NO_VAL; notify_req.message = srun_msg; msg.msg_type = REQUEST_JOB_NOTIFY; msg.data = &notify_req; slurm_send_only_controller_msg(&msg); return NULL; } static int _run_prolog(job_env_t *job_env, slurm_cred_t *cred) { DEF_TIMERS; int rc, diff_time; char *my_prolog; time_t start_time = time(NULL); static uint16_t msg_timeout = 0; static uint16_t timeout; pthread_t timer_id; pthread_attr_t timer_attr; pthread_cond_t timer_cond = PTHREAD_COND_INITIALIZER; pthread_mutex_t timer_mutex = PTHREAD_MUTEX_INITIALIZER; timer_struct_t timer_struct; bool prolog_fini = false; char **my_env; my_env = _build_env(job_env); setenvf(&my_env, "SLURM_STEP_ID", "%u", job_env->step_id); if (cred) { slurm_cred_arg_t cred_arg; slurm_cred_get_args(cred, &cred_arg); setenvf(&my_env, "SLURM_JOB_CONSTRAINTS", "%s", cred_arg.job_constraints); gres_plugin_job_set_env(&my_env, cred_arg.job_gres_list); slurm_cred_free_args(&cred_arg); } if (msg_timeout == 0) msg_timeout = slurm_get_msg_timeout(); if (timeout == 0) timeout = slurm_get_prolog_timeout(); slurm_mutex_lock(&conf->config_mutex); my_prolog = xstrdup(conf->prolog); slurm_mutex_unlock(&conf->config_mutex); slurm_attr_init(&timer_attr); timer_struct.job_id = job_env->jobid; timer_struct.msg_timeout = msg_timeout; timer_struct.prolog_fini = &prolog_fini; timer_struct.timer_cond = &timer_cond; timer_struct.timer_mutex = &timer_mutex; pthread_create(&timer_id, &timer_attr, &_prolog_timer, &timer_struct); START_TIMER; if (timeout == (uint16_t)NO_VAL) rc = _run_job_script("prolog", my_prolog, job_env->jobid, -1, my_env, job_env->uid); else rc = _run_job_script("prolog", my_prolog, job_env->jobid, timeout, my_env, job_env->uid); END_TIMER; info("%s: run job script took %s", __func__, TIME_STR); slurm_mutex_lock(&timer_mutex); prolog_fini = true; pthread_cond_broadcast(&timer_cond); slurm_mutex_unlock(&timer_mutex); diff_time = difftime(time(NULL), start_time); info("%s: prolog with lock for job %u ran for %d seconds", __func__, job_env->jobid, diff_time); if (diff_time >= (msg_timeout / 2)) { info("prolog for job %u ran for %d seconds", job_env->jobid, diff_time); } _remove_job_running_prolog(job_env->jobid); xfree(my_prolog); _destroy_env(my_env); pthread_join(timer_id, NULL); return rc; } #endif static int _run_epilog(job_env_t *job_env) { time_t start_time = time(NULL); static uint16_t msg_timeout = 0; static uint16_t timeout; int error_code, diff_time; char *my_epilog; char **my_env = _build_env(job_env); if (msg_timeout == 0) msg_timeout = slurm_get_msg_timeout(); if (timeout == 0) timeout = slurm_get_prolog_timeout(); slurm_mutex_lock(&conf->config_mutex); my_epilog = xstrdup(conf->epilog); slurm_mutex_unlock(&conf->config_mutex); _wait_for_job_running_prolog(job_env->jobid); if (timeout == (uint16_t)NO_VAL) error_code = _run_job_script("epilog", my_epilog, job_env->jobid, -1, my_env, job_env->uid); else error_code = _run_job_script("epilog", my_epilog, job_env->jobid, timeout, my_env, job_env->uid); xfree(my_epilog); _destroy_env(my_env); diff_time = difftime(time(NULL), start_time); if (diff_time >= (msg_timeout / 2)) { info("epilog for job %u ran for %d seconds", job_env->jobid, diff_time); } return error_code; } /**********************************************************************/ /* Because calling initgroups(2)/getgrouplist(3) can be expensive and */ /* is not cached by sssd or nscd, we cache the group access list. */ /**********************************************************************/ typedef struct gid_cache_s { char *user; time_t timestamp; gid_t gid; gids_t *gids; struct gid_cache_s *next; } gids_cache_t; #define GIDS_HASH_LEN 64 static gids_cache_t *gids_hashtbl[GIDS_HASH_LEN] = {NULL}; static pthread_mutex_t gids_mutex = PTHREAD_MUTEX_INITIALIZER; static gids_t * _alloc_gids(int n, gid_t *gids) { gids_t *new; new = (gids_t *)xmalloc(sizeof(gids_t)); new->ngids = n; new->gids = gids; return new; } static void _dealloc_gids(gids_t *p) { xfree(p->gids); xfree(p); } /* Duplicate a gids_t struct. */ static gids_t * _gids_dup(gids_t *g) { int buf_size; gids_t *n = xmalloc(sizeof(gids_t)); n->ngids = g->ngids; buf_size = g->ngids * sizeof(gid_t); n->gids = xmalloc(buf_size); memcpy(n->gids, g->gids, buf_size); return n; } static gids_cache_t * _alloc_gids_cache(char *user, gid_t gid, gids_t *gids, gids_cache_t *next) { gids_cache_t *p; p = (gids_cache_t *)xmalloc(sizeof(gids_cache_t)); p->user = xstrdup(user); p->timestamp = time(NULL); p->gid = gid; p->gids = gids; p->next = next; return p; } static void _dealloc_gids_cache(gids_cache_t *p) { xfree(p->user); _dealloc_gids(p->gids); xfree(p); } static size_t _gids_hashtbl_idx(const char *user) { uint64_t x = siphash_str(user); return x % GIDS_HASH_LEN; } void gids_cache_purge(void) { int i; gids_cache_t *p, *q; slurm_mutex_lock(&gids_mutex); for (i=0; i<GIDS_HASH_LEN; i++) { p = gids_hashtbl[i]; while (p) { q = p->next; _dealloc_gids_cache(p); p = q; } gids_hashtbl[i] = NULL; } slurm_mutex_unlock(&gids_mutex); } static void _gids_cache_register(char *user, gid_t gid, gids_t *gids) { size_t idx; gids_cache_t *p, *q; idx = _gids_hashtbl_idx(user); q = gids_hashtbl[idx]; p = _alloc_gids_cache(user, gid, gids, q); gids_hashtbl[idx] = p; debug2("Cached group access list for %s/%d", user, gid); } /* how many groups to use by default to avoid repeated calls to getgrouplist */ #define NGROUPS_START 64 static gids_t *_gids_cache_lookup(char *user, gid_t gid) { size_t idx; gids_cache_t *p; bool found_but_old = false; time_t now = 0; int ngroups = NGROUPS_START; gid_t *groups; gids_t *ret_gids = NULL; idx = _gids_hashtbl_idx(user); slurm_mutex_lock(&gids_mutex); p = gids_hashtbl[idx]; while (p) { if (xstrcmp(p->user, user) == 0 && p->gid == gid) { slurm_ctl_conf_t *cf = slurm_conf_lock(); int group_ttl = cf->group_info & GROUP_TIME_MASK; slurm_conf_unlock(); if (!group_ttl) { ret_gids = _gids_dup(p->gids); goto done; } now = time(NULL); if (difftime(now, p->timestamp) < group_ttl) { ret_gids = _gids_dup(p->gids); goto done; } else { found_but_old = true; break; } } p = p->next; } /* Cache lookup failed or cached value was too old, fetch new * value and insert it into cache. */ groups = xmalloc(ngroups * sizeof(gid_t)); while (getgrouplist(user, gid, groups, &ngroups) == -1) { /* group list larger than array, resize array to fit */ groups = xrealloc(groups, ngroups * sizeof(gid_t)); } if (found_but_old) { xfree(p->gids->gids); p->gids->gids = groups; p->gids->ngids = ngroups; p->timestamp = now; ret_gids = _gids_dup(p->gids); } else { gids_t *gids = _alloc_gids(ngroups, groups); _gids_cache_register(user, gid, gids); ret_gids = _gids_dup(gids); } done: slurm_mutex_unlock(&gids_mutex); return ret_gids; } extern void destroy_starting_step(void *x) { xfree(x); } static int _add_starting_step(uint16_t type, void *req) { starting_step_t *starting_step; int rc = SLURM_SUCCESS; /* Add the step info to a list of starting processes that cannot reliably be contacted. */ slurm_mutex_lock(&conf->starting_steps_lock); starting_step = xmalloc(sizeof(starting_step_t)); if (!starting_step) { error("%s failed to allocate memory", __func__); rc = SLURM_FAILURE; goto fail; } switch (type) { case LAUNCH_BATCH_JOB: starting_step->job_id = ((batch_job_launch_msg_t *)req)->job_id; starting_step->step_id = ((batch_job_launch_msg_t *)req)->step_id; break; case LAUNCH_TASKS: starting_step->job_id = ((launch_tasks_request_msg_t *)req)->job_id; starting_step->step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; break; case REQUEST_LAUNCH_PROLOG: starting_step->job_id = ((prolog_launch_msg_t *)req)->job_id; starting_step->step_id = SLURM_EXTERN_CONT; break; default: error("%s called with an invalid type: %u", __func__, type); rc = SLURM_FAILURE; xfree(starting_step); goto fail; } if (!list_append(conf->starting_steps, starting_step)) { error("%s failed to allocate memory for list", __func__); rc = SLURM_FAILURE; xfree(starting_step); goto fail; } fail: slurm_mutex_unlock(&conf->starting_steps_lock); return rc; } static int _remove_starting_step(uint16_t type, void *req) { uint32_t job_id, step_id; ListIterator iter; starting_step_t *starting_step; int rc = SLURM_SUCCESS; bool found = false; slurm_mutex_lock(&conf->starting_steps_lock); switch(type) { case LAUNCH_BATCH_JOB: job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; break; case LAUNCH_TASKS: job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; break; default: error("%s called with an invalid type: %u", __func__, type); rc = SLURM_FAILURE; goto fail; } iter = list_iterator_create(conf->starting_steps); while ((starting_step = list_next(iter))) { if (starting_step->job_id == job_id && starting_step->step_id == step_id) { starting_step = list_remove(iter); xfree(starting_step); found = true; pthread_cond_broadcast(&conf->starting_steps_cond); break; } } if (!found) { error("%s: step %u.%u not found", __func__, job_id, step_id); rc = SLURM_FAILURE; } fail: slurm_mutex_unlock(&conf->starting_steps_lock); return rc; } static int _compare_starting_steps(void *listentry, void *key) { starting_step_t *step0 = (starting_step_t *)listentry; starting_step_t *step1 = (starting_step_t *)key; if (step1->step_id != NO_VAL) return (step0->job_id == step1->job_id && step0->step_id == step1->step_id); else return (step0->job_id == step1->job_id); } /* Wait for a step to get far enough in the launch process to have a socket open, ready to handle RPC calls. Pass step_id = NO_VAL to wait on any step for the given job. */ static int _wait_for_starting_step(uint32_t job_id, uint32_t step_id) { starting_step_t starting_step; starting_step.job_id = job_id; starting_step.step_id = step_id; int num_passes = 0; slurm_mutex_lock(&conf->starting_steps_lock); while (list_find_first( conf->starting_steps, &_compare_starting_steps, &starting_step )) { if (num_passes == 0) { if (step_id != NO_VAL) debug( "Blocked waiting for step %d.%d", job_id, step_id); else debug( "Blocked waiting for job %d, all steps", job_id); } num_passes++; pthread_cond_wait(&conf->starting_steps_cond, &conf->starting_steps_lock); } if (num_passes > 0) { if (step_id != NO_VAL) debug( "Finished wait for step %d.%d", job_id, step_id); else debug( "Finished wait for job %d, all steps", job_id); } slurm_mutex_unlock(&conf->starting_steps_lock); return SLURM_SUCCESS; } /* Return true if the step has not yet confirmed that its socket to handle RPC calls has been created. Pass step_id = NO_VAL to return true if any of the job's steps are still starting. */ static bool _step_is_starting(uint32_t job_id, uint32_t step_id) { starting_step_t starting_step; starting_step.job_id = job_id; starting_step.step_id = step_id; bool ret = false; slurm_mutex_lock(&conf->starting_steps_lock); if (list_find_first( conf->starting_steps, &_compare_starting_steps, &starting_step )) { ret = true; } slurm_mutex_unlock(&conf->starting_steps_lock); return ret; } /* Add this job to the list of jobs currently running their prolog */ static void _add_job_running_prolog(uint32_t job_id) { uint32_t *job_running_prolog; /* Add the job to a list of jobs whose prologs are running */ slurm_mutex_lock(&conf->prolog_running_lock); job_running_prolog = xmalloc(sizeof(uint32_t)); if (!job_running_prolog) { error("_add_job_running_prolog failed to allocate memory"); goto fail; } *job_running_prolog = job_id; if (!list_append(conf->prolog_running_jobs, job_running_prolog)) { error("_add_job_running_prolog failed to append job to list"); xfree(job_running_prolog); } fail: slurm_mutex_unlock(&conf->prolog_running_lock); } /* Remove this job from the list of jobs currently running their prolog */ static void _remove_job_running_prolog(uint32_t job_id) { ListIterator iter; uint32_t *job_running_prolog; bool found = false; slurm_mutex_lock(&conf->prolog_running_lock); iter = list_iterator_create(conf->prolog_running_jobs); while ((job_running_prolog = list_next(iter))) { if (*job_running_prolog == job_id) { job_running_prolog = list_remove(iter); xfree(job_running_prolog); found = true; pthread_cond_broadcast(&conf->prolog_running_cond); break; } } if (!found) error("_remove_job_running_prolog: job not found"); slurm_mutex_unlock(&conf->prolog_running_lock); } static int _match_jobid(void *listentry, void *key) { uint32_t *job0 = (uint32_t *)listentry; uint32_t *job1 = (uint32_t *)key; return (*job0 == *job1); } static int _prolog_is_running (uint32_t jobid) { int rc = 0; if (list_find_first (conf->prolog_running_jobs, (ListFindF) _match_jobid, &jobid)) rc = 1; return (rc); } /* Wait for the job's prolog to complete */ static void _wait_for_job_running_prolog(uint32_t job_id) { debug( "Waiting for job %d's prolog to complete", job_id); slurm_mutex_lock(&conf->prolog_running_lock); while (_prolog_is_running (job_id)) { pthread_cond_wait(&conf->prolog_running_cond, &conf->prolog_running_lock); } slurm_mutex_unlock(&conf->prolog_running_lock); debug( "Finished wait for job %d's prolog to complete", job_id); } static void _rpc_forward_data(slurm_msg_t *msg) { forward_data_msg_t *req = (forward_data_msg_t *)msg->data; uint32_t req_uid; struct sockaddr_un sa; int fd = -1, rc = 0; debug3("Entering _rpc_forward_data, address: %s, len: %u", req->address, req->len); /* sanity check */ if (strlen(req->address) > sizeof(sa.sun_path) - 1) { slurm_seterrno(EINVAL); rc = errno; goto done; } /* connect to specified address */ fd = socket(AF_UNIX, SOCK_STREAM, 0); if (fd < 0) { rc = errno; error("failed creating UNIX domain socket: %m"); goto done; } memset(&sa, 0, sizeof(sa)); sa.sun_family = AF_UNIX; strcpy(sa.sun_path, req->address); while (((rc = connect(fd, (struct sockaddr *)&sa, SUN_LEN(&sa))) < 0) && (errno == EINTR)); if (rc < 0) { rc = errno; debug2("failed connecting to specified socket '%s': %m", req->address); goto done; } req_uid = (uint32_t)g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); /* * although always in localhost, we still convert it to network * byte order, to make it consistent with pack/unpack. */ req_uid = htonl(req_uid); safe_write(fd, &req_uid, sizeof(uint32_t)); req_uid = htonl(req->len); safe_write(fd, &req_uid, sizeof(uint32_t)); safe_write(fd, req->data, req->len); rwfail: done: if (fd >= 0){ close(fd); } slurm_send_rc_msg(msg, rc); } static void _launch_complete_add(uint32_t job_id) { int j, empty; slurm_mutex_lock(&job_state_mutex); empty = -1; for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) break; if ((active_job_id[j] == 0) && (empty == -1)) empty = j; } if (j >= JOB_STATE_CNT || job_id != active_job_id[j]) { if (empty == -1) /* Discard oldest job */ empty = 0; for (j = empty + 1; j < JOB_STATE_CNT; j++) { active_job_id[j - 1] = active_job_id[j]; } active_job_id[JOB_STATE_CNT - 1] = 0; for (j = 0; j < JOB_STATE_CNT; j++) { if (active_job_id[j] == 0) { active_job_id[j] = job_id; break; } } } pthread_cond_signal(&job_state_cond); slurm_mutex_unlock(&job_state_mutex); _launch_complete_log("job add", job_id); } static void _launch_complete_log(char *type, uint32_t job_id) { #if 0 int j; info("active %s %u", type, job_id); slurm_mutex_lock(&job_state_mutex); for (j = 0; j < JOB_STATE_CNT; j++) { if (active_job_id[j] != 0) { info("active_job_id[%d]=%u", j, active_job_id[j]); } } slurm_mutex_unlock(&job_state_mutex); #endif } /* Test if we have a specific job ID still running */ static bool _launch_job_test(uint32_t job_id) { bool found = false; int j; slurm_mutex_lock(&job_state_mutex); for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) { found = true; break; } } slurm_mutex_unlock(&job_state_mutex); return found; } static void _launch_complete_rm(uint32_t job_id) { int j; slurm_mutex_lock(&job_state_mutex); for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) break; } if (j < JOB_STATE_CNT && job_id == active_job_id[j]) { for (j = j + 1; j < JOB_STATE_CNT; j++) { active_job_id[j - 1] = active_job_id[j]; } active_job_id[JOB_STATE_CNT - 1] = 0; } slurm_mutex_unlock(&job_state_mutex); _launch_complete_log("job remove", job_id); } static void _launch_complete_wait(uint32_t job_id) { int i, j, empty; time_t start = time(NULL); struct timeval now; struct timespec timeout; slurm_mutex_lock(&job_state_mutex); for (i = 0; ; i++) { empty = -1; for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) break; if ((active_job_id[j] == 0) && (empty == -1)) empty = j; } if (j < JOB_STATE_CNT) /* Found job, ready to return */ break; if (difftime(time(NULL), start) <= 9) { /* Retry for 9 secs */ debug2("wait for launch of job %u before suspending it", job_id); gettimeofday(&now, NULL); timeout.tv_sec = now.tv_sec + 1; timeout.tv_nsec = now.tv_usec * 1000; pthread_cond_timedwait(&job_state_cond,&job_state_mutex, &timeout); continue; } if (empty == -1) /* Discard oldest job */ empty = 0; for (j = empty + 1; j < JOB_STATE_CNT; j++) { active_job_id[j - 1] = active_job_id[j]; } active_job_id[JOB_STATE_CNT - 1] = 0; for (j = 0; j < JOB_STATE_CNT; j++) { if (active_job_id[j] == 0) { active_job_id[j] = job_id; break; } } break; } slurm_mutex_unlock(&job_state_mutex); _launch_complete_log("job wait", job_id); } static bool _requeue_setup_env_fail(void) { static time_t config_update = 0; static bool requeue = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); requeue = (sched_params && (strstr(sched_params, "no_env_cache") || strstr(sched_params, "requeue_setup_env_fail"))); xfree(sched_params); config_update = conf->last_update; } return requeue; }
./CrossVul/dataset_final_sorted/CWE-284/c/good_4770_1
crossvul-cpp_data_bad_5199_0
/* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> #include <linux/hash.h> #include <asm/uaccess.h> #include "internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname)) struct filename * getname_flags(const char __user *filename, int flags, int *empty) { struct filename *result; char *kname; int len; result = audit_reusename(filename); if (result) return result; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); /* * First, try to embed the struct filename inside the names_cache * allocation */ kname = (char *)result->iname; result->name = kname; len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); if (unlikely(len < 0)) { __putname(result); return ERR_PTR(len); } /* * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a * separate struct filename so we can dedicate the entire * names_cache allocation for the pathname, and re-do the copy from * userland. */ if (unlikely(len == EMBEDDED_NAME_MAX)) { const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; /* * size is chosen that way we to guarantee that * result->iname[0] is within the same object and that * kname can't be equal to result->iname, no matter what. */ result = kzalloc(size, GFP_KERNEL); if (unlikely(!result)) { __putname(kname); return ERR_PTR(-ENOMEM); } result->name = kname; len = strncpy_from_user(kname, filename, PATH_MAX); if (unlikely(len < 0)) { __putname(kname); kfree(result); return ERR_PTR(len); } if (unlikely(len == PATH_MAX)) { __putname(kname); kfree(result); return ERR_PTR(-ENAMETOOLONG); } } result->refcnt = 1; /* The empty path is special. */ if (unlikely(!len)) { if (empty) *empty = 1; if (!(flags & LOOKUP_EMPTY)) { putname(result); return ERR_PTR(-ENOENT); } } result->uptr = filename; result->aname = NULL; audit_getname(result); return result; } struct filename * getname(const char __user * filename) { return getname_flags(filename, 0, NULL); } struct filename * getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { struct filename *tmp; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; } void putname(struct filename *name) { BUG_ON(name->refcnt <= 0); if (--name->refcnt > 0) return; if (name->name != name->iname) { __putname(name->name); kfree(name); } else __putname(name); } static int check_acl(struct inode *inode, int mask) { #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *acl; if (mask & MAY_NOT_BLOCK) { acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ if (acl == ACL_NOT_CACHED) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } acl = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } #endif return -EAGAIN; } /* * This does the basic permission checking */ static int acl_permission_check(struct inode *inode, int mask) { unsigned int mode = inode->i_mode; if (likely(uid_eq(current_fsuid(), inode->i_uid))) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask) { int ret; /* * Do the basic permission checks. */ ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; if (!(mask & MAY_WRITE)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /* * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } EXPORT_SYMBOL(generic_permission); /* * We _really_ want to just do "generic_permission()" without * even looking at the inode->i_op values. So we keep a cache * flag in inode->i_opflags, that says "this has not special * permission function, use the fast case". */ static inline int do_inode_permission(struct inode *inode, int mask) { if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { if (likely(inode->i_op->permission)) return inode->i_op->permission(inode, mask); /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_FASTPERM; spin_unlock(&inode->i_lock); } return generic_permission(inode, mask); } /** * __inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. * * This does not check for a read-only file system. You probably want * inode_permission(). */ int __inode_permission(struct inode *inode, int mask) { int retval; if (unlikely(mask & MAY_WRITE)) { /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } retval = do_inode_permission(inode, mask); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } EXPORT_SYMBOL(__inode_permission); /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. */ static int sb_permission(struct super_block *sb, struct inode *inode, int mask) { if (unlikely(mask & MAY_WRITE)) { umode_t mode = inode->i_mode; /* Nobody gets write access to a read-only fs. */ if ((sb->s_flags & MS_RDONLY) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; } return 0; } /** * inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. We use fs[ug]id for * this, letting us set arbitrary permissions for filesystem access without * changing the "normal" UIDs which are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. */ int inode_permission(struct inode *inode, int mask) { int retval; retval = sb_permission(inode->i_sb, inode, mask); if (retval) return retval; return __inode_permission(inode, mask); } EXPORT_SYMBOL(inode_permission); /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(const struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(const struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); #define EMBEDDED_LEVELS 2 struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; unsigned seq, m_seq; int last_type; unsigned depth; int total_link_count; struct saved { struct path link; struct delayed_call done; const char *name; unsigned seq; } *stack, internal[EMBEDDED_LEVELS]; struct filename *name; struct nameidata *saved; struct inode *link_inode; unsigned root_seq; int dfd; }; static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) { struct nameidata *old = current->nameidata; p->stack = p->internal; p->dfd = dfd; p->name = name; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; } static void restore_nameidata(void) { struct nameidata *now = current->nameidata, *old = now->saved; current->nameidata = old; if (old) old->total_link_count = now->total_link_count; if (now->stack != now->internal) kfree(now->stack); } static int __nd_alloc_stack(struct nameidata *nd) { struct saved *p; if (nd->flags & LOOKUP_RCU) { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_ATOMIC); if (unlikely(!p)) return -ECHILD; } else { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; } memcpy(p, nd->internal, sizeof(nd->internal)); nd->stack = p; return 0; } /** * path_connected - Verify that a path->dentry is below path->mnt.mnt_root * @path: nameidate to verify * * Rename can sometimes move a file or directory outside of a bind * mount, path_connected allows those cases to be detected. */ static bool path_connected(const struct path *path) { struct vfsmount *mnt = path->mnt; /* Only bind mounts can have disconnected paths */ if (mnt->mnt_root == mnt->mnt_sb->s_root) return true; return is_subdir(path->dentry, mnt->mnt_root); } static inline int nd_alloc_stack(struct nameidata *nd) { if (likely(nd->depth != EMBEDDED_LEVELS)) return 0; if (likely(nd->stack != nd->internal)) return 0; return __nd_alloc_stack(nd); } static void drop_links(struct nameidata *nd) { int i = nd->depth; while (i--) { struct saved *last = nd->stack + i; do_delayed_call(&last->done); clear_delayed_call(&last->done); } } static void terminate_walk(struct nameidata *nd) { drop_links(nd); if (!(nd->flags & LOOKUP_RCU)) { int i; path_put(&nd->path); for (i = 0; i < nd->depth; i++) path_put(&nd->stack[i].link); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } nd->depth = 0; } /* path_put is needed afterwards regardless of success or failure */ static bool legitimize_path(struct nameidata *nd, struct path *path, unsigned seq) { int res = __legitimize_mnt(path->mnt, nd->m_seq); if (unlikely(res)) { if (res > 0) path->mnt = NULL; path->dentry = NULL; return false; } if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) { path->dentry = NULL; return false; } return !read_seqcount_retry(&path->dentry->d_seq, seq); } static bool legitimize_links(struct nameidata *nd) { int i; for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { drop_links(nd); nd->depth = i + 1; return false; } } return true; } /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to ref-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry or NULL * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd or NULL. Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq) { struct dentry *parent = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; if (unlikely(!lockref_get_not_dead(&parent->d_lockref))) goto out1; /* * For a negative lookup, the lookup sequence point is the parents * sequence point, and it only needs to revalidate the parent dentry. * * For a positive lookup, we need to move both the parent and the * dentry from the RCU domain to be properly refcounted. And the * sequence number in the dentry validates *both* dentry counters, * since we checked the sequence number of the parent after we got * the child sequence number. So we know the parent must still * be valid if the child sequence number is still valid. */ if (!dentry) { if (read_seqcount_retry(&parent->d_seq, nd->seq)) goto out; BUG_ON(nd->inode != parent->d_inode); } else { if (!lockref_get_not_dead(&dentry->d_lockref)) goto out; if (read_seqcount_retry(&dentry->d_seq, seq)) goto drop_dentry; } /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. */ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) { rcu_read_unlock(); dput(dentry); return -ECHILD; } } rcu_read_unlock(); return 0; drop_dentry: rcu_read_unlock(); dput(dentry); goto drop_root_mnt; out2: nd->path.mnt = NULL; out1: nd->path.dentry = NULL; out: rcu_read_unlock(); drop_root_mnt: if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; return -ECHILD; } static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) { if (unlikely(!legitimize_path(nd, link, seq))) { drop_links(nd); nd->depth = 0; nd->flags &= ~LOOKUP_RCU; nd->path.mnt = NULL; nd->path.dentry = NULL; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) { return 0; } path_put(link); return -ECHILD; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { return dentry->d_op->d_revalidate(dentry, flags); } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; if (unlikely(unlazy_walk(nd, NULL, 0))) return -ECHILD; } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) return 0; status = dentry->d_op->d_weak_revalidate(dentry, nd->flags); if (status > 0) return 0; if (!status) status = -ESTALE; return status; } static void set_root(struct nameidata *nd) { struct fs_struct *fs = current->fs; if (nd->flags & LOOKUP_RCU) { unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_root(fs, &nd->root); } } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static int nd_jump_root(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { struct dentry *d; nd->path = nd->root; d = nd->path.dentry; nd->inode = d->d_inode; nd->seq = nd->root_seq; if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq))) return -ECHILD; } else { path_put(&nd->path); nd->path = nd->root; path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } nd->flags |= LOOKUP_JUMPED; return 0; } /* * Helper to directly jump to a known parsed path from ->get_link, * caller must have taken a reference to path beforehand. */ void nd_jump_link(struct path *path) { struct nameidata *nd = current->nameidata; path_put(&nd->path); nd->path = *path; nd->inode = nd->path.dentry->d_inode; nd->flags |= LOOKUP_JUMPED; } static inline void put_link(struct nameidata *nd) { struct saved *last = nd->stack + --nd->depth; do_delayed_call(&last->done); if (!(nd->flags & LOOKUP_RCU)) path_put(&last->link); } int sysctl_protected_symlinks __read_mostly = 0; int sysctl_protected_hardlinks __read_mostly = 0; /** * may_follow_link - Check symlink following for unsafe situations * @nd: nameidata pathwalk data * * In the case of the sysctl_protected_symlinks sysctl being enabled, * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is * in a sticky world-writable directory. This is to protect privileged * processes from failing races against path names that may change out * from under them by way of other users creating malicious symlinks. * It will permit symlinks to be followed only when outside a sticky * world-writable directory, or when the uid of the symlink and follower * match, or when the directory owner matches the symlink's owner. * * Returns 0 if following the symlink is allowed, -ve on error. */ static inline int may_follow_link(struct nameidata *nd) { const struct inode *inode; const struct inode *parent; if (!sysctl_protected_symlinks) return 0; /* Allowed if owner and follower match. */ inode = nd->link_inode; if (uid_eq(current_cred()->fsuid, inode->i_uid)) return 0; /* Allowed if parent directory not sticky and world-writable. */ parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; /* Allowed if parent directory and link owner match. */ if (uid_eq(parent->i_uid, inode->i_uid)) return 0; if (nd->flags & LOOKUP_RCU) return -ECHILD; audit_log_link_denied("follow_link", &nd->stack[0].link); return -EACCES; } /** * safe_hardlink_source - Check for safe hardlink conditions * @inode: the source inode to hardlink from * * Return false if at least one of the following conditions: * - inode is not a regular file * - inode is setuid * - inode is setgid and group-exec * - access failure for read and write * * Otherwise returns true. */ static bool safe_hardlink_source(struct inode *inode) { umode_t mode = inode->i_mode; /* Special files should not get pinned to the filesystem. */ if (!S_ISREG(mode)) return false; /* Setuid files should not get pinned to the filesystem. */ if (mode & S_ISUID) return false; /* Executable setgid files should not get pinned to the filesystem. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) return false; /* Hardlinking to unreadable or unwritable sources is dangerous. */ if (inode_permission(inode, MAY_READ | MAY_WRITE)) return false; return true; } /** * may_linkat - Check permissions for creating a hardlink * @link: the source to hardlink from * * Block hardlink when all of: * - sysctl_protected_hardlinks enabled * - fsuid does not match inode * - hardlink source is unsafe (see safe_hardlink_source() above) * - not CAP_FOWNER in a namespace with the inode owner uid mapped * * Returns 0 if successful, -ve on error. */ static int may_linkat(struct path *link) { struct inode *inode; if (!sysctl_protected_hardlinks) return 0; inode = link->dentry->d_inode; /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ if (inode_owner_or_capable(inode) || safe_hardlink_source(inode)) return 0; audit_log_link_denied("linkat", link); return -EPERM; } static __always_inline const char *get_link(struct nameidata *nd) { struct saved *last = nd->stack + nd->depth - 1; struct dentry *dentry = last->link.dentry; struct inode *inode = nd->link_inode; int error; const char *res; if (!(nd->flags & LOOKUP_RCU)) { touch_atime(&last->link); cond_resched(); } else if (atime_needs_update(&last->link, inode)) { if (unlikely(unlazy_walk(nd, NULL, 0))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } error = security_inode_follow_link(dentry, inode, nd->flags & LOOKUP_RCU); if (unlikely(error)) return ERR_PTR(error); nd->last_type = LAST_BIND; res = inode->i_link; if (!res) { const char * (*get)(struct dentry *, struct inode *, struct delayed_call *); get = inode->i_op->get_link; if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { if (unlikely(unlazy_walk(nd, NULL, 0))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } } else { res = get(dentry, inode, &last->done); } if (IS_ERR_OR_NULL(res)) return res; } if (*res == '/') { if (!nd->root.mnt) set_root(nd); if (unlikely(nd_jump_root(nd))) return ERR_PTR(-ECHILD); while (unlikely(*++res == '/')) ; } if (!*res) res = NULL; return res; } /* * follow_up - Find the mountpoint of path's vfsmount * * Given a path, find the mountpoint of its source file system. * Replace @path with the path of the mountpoint in the parent mount. * Up is towards /. * * Return 1 if we went up a level and 0 if we were already at the * root. */ int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; read_seqlock_excl(&mount_lock); parent = mnt->mnt_parent; if (parent == mnt) { read_sequnlock_excl(&mount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); read_sequnlock_excl(&mount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = &parent->mnt; return 1; } EXPORT_SYMBOL(follow_up); /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, struct nameidata *nd, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone's just doing a stat - * unless they're stat'ing a directory and appended a '/' to * the name. * * We do, however, want to mount if someone wants to open or * create a file of any type under the mountpoint, wants to * traverse through the mountpoint or wants to open the * mounted directory. Also, autofs may mark negative dentries * as being automount points. These will need the attentions * of the daemon to instantiate them before they can be used. */ if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && path->dentry->d_inode) return -EISDIR; nd->total_link_count++; if (nd->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, struct nameidata *nd) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before lookup_mnt() could * get it */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, nd, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR || !ret) ret = 1; if (need_mntput) nd->flags |= LOOKUP_JUMPED; if (unlikely(ret < 0)) path_put_conditional(path, nd); return ret; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } EXPORT_SYMBOL(follow_down_one); static inline int managed_dentry_rcu(struct dentry *dentry) { return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? dentry->d_op->d_manage(dentry, true) : 0; } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { for (;;) { struct mount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ switch (managed_dentry_rcu(path->dentry)) { case -ECHILD: default: return false; case -EISDIR: return true; case 0: break; } if (!d_mountpoint(path->dentry)) return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); mounted = __lookup_mnt(path->mnt, path->dentry); if (!mounted) break; path->mnt = &mounted->mnt; path->dentry = mounted->mnt.mnt_root; nd->flags |= LOOKUP_JUMPED; *seqp = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } return !read_seqretry(&mount_lock, nd->m_seq) && !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); } static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; while (1) { if (path_equal(&nd->path, &nd->root)) break; if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; inode = parent->d_inode; seq = read_seqcount_begin(&parent->d_seq); if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq))) return -ECHILD; nd->path.dentry = parent; nd->seq = seq; if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } else { struct mount *mnt = real_mount(nd->path.mnt); struct mount *mparent = mnt->mnt_parent; struct dentry *mountpoint = mnt->mnt_mountpoint; struct inode *inode2 = mountpoint->d_inode; unsigned seq = read_seqcount_begin(&mountpoint->d_seq); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (&mparent->mnt == nd->path.mnt) break; /* we know that mountpoint was pinned */ nd->path.dentry = mountpoint; nd->path.mnt = &mparent->mnt; inode = inode2; nd->seq = seq; } } while (unlikely(d_mountpoint(nd->path.dentry))) { struct mount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (!mounted) break; nd->path.mnt = &mounted->mnt; nd->path.dentry = mounted->mnt.mnt_root; inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } nd->inode = inode; return 0; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage( path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } EXPORT_SYMBOL(follow_down); /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static int follow_dotdot(struct nameidata *nd) { while(1) { struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ nd->path.dentry = dget_parent(nd->path.dentry); dput(old); if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; return 0; } /* * This looks up the name in dcache, possibly revalidates the old dentry and * allocates a new one if not found or not valid. In the need_lookup argument * returns whether i_op->lookup is necessary. */ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry; int error; dentry = d_lookup(dir, name); if (dentry) { if (dentry->d_flags & DCACHE_OP_REVALIDATE) { error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); dput(dentry); return ERR_PTR(error); } } } return dentry; } /* * Call i_op->lookup on the dentry. The dentry must be negative and * unhashed. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { dput(dentry); return ERR_PTR(-ENOENT); } old = dir->i_op->lookup(dir, dentry, flags); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } static struct dentry *__lookup_hash(const struct qstr *name, struct dentry *base, unsigned int flags) { struct dentry *dentry = lookup_dcache(name, base, flags); if (dentry) return dentry; dentry = d_alloc(base, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); return lookup_real(base->d_inode, dentry, flags); } static int lookup_fast(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, the caller is * going to fall back to non-racy lookup. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; return 0; } /* * This sequence count validates that the inode matches * the dentry name information from lookup. */ *inode = d_backing_inode(dentry); negative = d_is_negative(dentry); if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) return -ECHILD; /* * This sequence count validates that the parent had no * changes while we did the lookup of the dentry above. * * The memory barrier in read_seqcount_begin of child is * enough, we can use __read_seqcount_retry here. */ if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq))) return -ECHILD; *seqp = seq; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) status = d_revalidate(dentry, nd->flags); if (unlikely(status <= 0)) { if (unlazy_walk(nd, dentry, seq)) return -ECHILD; if (status == -ECHILD) status = d_revalidate(dentry, nd->flags); } else { /* * Note: do negative dentry check after revalidation in * case that drops it. */ if (unlikely(negative)) return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; if (unlazy_walk(nd, dentry, seq)) return -ECHILD; } } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) d_invalidate(dentry); dput(dentry); return status; } if (unlikely(d_is_negative(dentry))) { dput(dentry); return -ENOENT; } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd); if (likely(err > 0)) *inode = d_backing_inode(path->dentry); return err; } /* Fast lookup failed, do it the slow way */ static struct dentry *lookup_slow(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry; inode_lock(dir->d_inode); dentry = d_lookup(dir, name); if (unlikely(dentry)) { if ((dentry->d_flags & DCACHE_OP_REVALIDATE) && !(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); dput(dentry); dentry = ERR_PTR(error); } } if (dentry) { inode_unlock(dir->d_inode); return dentry; } } dentry = d_alloc(dir, name); if (unlikely(!dentry)) { inode_unlock(dir->d_inode); return ERR_PTR(-ENOMEM); } dentry = lookup_real(dir->d_inode, dentry, flags); inode_unlock(dir->d_inode); return dentry; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (!nd->root.mnt) set_root(nd); if (nd->flags & LOOKUP_RCU) { return follow_dotdot_rcu(nd); } else return follow_dotdot(nd); } return 0; } static int pick_link(struct nameidata *nd, struct path *link, struct inode *inode, unsigned seq) { int error; struct saved *last; if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) { path_to_nameidata(link, nd); return -ELOOP; } if (!(nd->flags & LOOKUP_RCU)) { if (link->mnt == nd->path.mnt) mntget(link->mnt); } error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { if (unlikely(unlazy_link(nd, link, seq))) return -ECHILD; error = nd_alloc_stack(nd); } if (error) { path_put(link); return error; } } last = nd->stack + nd->depth++; last->link = *link; clear_delayed_call(&last->done); nd->link_inode = inode; last->seq = seq; return 1; } /* * Do we need to follow links? We _really_ want to be able * to do this check without having to look at inode->i_op, * so we keep a cache of "no, this doesn't need follow_link" * for the common case. */ static inline int should_follow_link(struct nameidata *nd, struct path *link, int follow, struct inode *inode, unsigned seq) { if (likely(!d_is_symlink(link->dentry))) return 0; if (!follow) return 0; /* make sure that d_is_symlink above matches inode */ if (nd->flags & LOOKUP_RCU) { if (read_seqcount_retry(&link->dentry->d_seq, seq)) return -ECHILD; } return pick_link(nd, link, inode, seq); } enum {WALK_GET = 1, WALK_PUT = 2}; static int walk_component(struct nameidata *nd, int flags) { struct path path; struct inode *inode; unsigned seq; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(nd->last_type != LAST_NORM)) { err = handle_dots(nd, nd->last_type); if (flags & WALK_PUT) put_link(nd); return err; } err = lookup_fast(nd, &path, &inode, &seq); if (unlikely(err <= 0)) { if (err < 0) return err; path.dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags); if (IS_ERR(path.dentry)) return PTR_ERR(path.dentry); path.mnt = nd->path.mnt; err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } seq = 0; /* we are already out of RCU mode */ inode = d_backing_inode(path.dentry); } if (flags & WALK_PUT) put_link(nd); err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq); if (unlikely(err)) return err; path_to_nameidata(&path, nd); nd->inode = inode; nd->seq = seq; return 0; } /* * We can do the critical dentry name comparison and hashing * operations one word at a time, but we are limited to: * * - Architectures with fast unaligned word accesses. We could * do a "get_unaligned()" if this helps and is sufficiently * fast. * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. * * - Furthermore, we need an efficient 64-bit compile for the * 64-bit case in order to generate the "number of bytes in * the final mask". Again, that could be replaced with a * efficient population count instruction or similar. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> #ifdef CONFIG_64BIT static inline unsigned int fold_hash(unsigned long hash) { return hash_64(hash, 32); } #else /* 32-bit case */ #define fold_hash(x) (x) #endif unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long a, mask; unsigned long hash = 0; for (;;) { a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; hash += a; hash *= 9; name += sizeof(unsigned long); len -= sizeof(unsigned long); if (!len) goto done; } mask = bytemask_from_count(len); hash += mask & a; done: return fold_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * Calculate the length and hash of the path component, and * return the "hash_len" as the result. */ static inline u64 hash_name(const char *name) { unsigned long a, b, adata, bdata, mask, hash, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; hash = a = 0; len = -sizeof(unsigned long); do { hash = (hash + a) * 9; len += sizeof(unsigned long); a = load_unaligned_zeropad(name+len); b = a ^ REPEAT_BYTE('/'); } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); adata = prep_zero_mask(a, adata, &constants); bdata = prep_zero_mask(b, bdata, &constants); mask = create_zero_mask(adata | bdata); hash += a & zero_bytemask(mask); len += find_zero(mask); return hashlen_create(fold_hash(hash), len); } #else unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long hash = init_name_hash(); while (len--) hash = partial_name_hash(*name++, hash); return end_name_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * We know there's a real path component here of at least * one character. */ static inline u64 hash_name(const char *name) { unsigned long hash = init_name_hash(); unsigned long len = 0, c; c = (unsigned char)*name; do { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } while (c && c != '/'); return hashlen_create(end_name_hash(hash), len); } #endif /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { int err; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { u64 hash_len; int type; err = may_lookup(nd); if (err) return err; hash_len = hash_name(name); type = LAST_NORM; if (name[0] == '.') switch (hashlen_len(hash_len)) { case 2: if (name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { struct qstr this = { { .hash_len = hash_len }, .name = name }; err = parent->d_op->d_hash(parent, &this); if (err < 0) return err; hash_len = this.hash_len; name = this.name; } } nd->last.hash_len = hash_len; nd->last.name = name; nd->last_type = type; name += hashlen_len(hash_len); if (!*name) goto OK; /* * If it wasn't NUL, we know it was '/'. Skip that * slash, and continue until no more slashes. */ do { name++; } while (unlikely(*name == '/')); if (unlikely(!*name)) { OK: /* pathname body, done */ if (!nd->depth) return 0; name = nd->stack[nd->depth - 1].name; /* trailing symlink, done */ if (!name) return 0; /* last component of nested symlink */ err = walk_component(nd, WALK_GET | WALK_PUT); } else { err = walk_component(nd, WALK_GET); } if (err < 0) return err; if (err) { const char *s = get_link(nd); if (IS_ERR(s)) return PTR_ERR(s); err = 0; if (unlikely(!s)) { /* jumped */ put_link(nd); } else { nd->stack[nd->depth - 1].name = name; name = s; continue; } } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } return -ENOTDIR; } } } static const char *path_init(struct nameidata *nd, unsigned flags) { int retval = 0; const char *s = nd->name->name; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; if (*s) { if (!d_can_lookup(root)) return ERR_PTR(-ENOTDIR); retval = inode_permission(inode, MAY_EXEC); if (retval) return ERR_PTR(retval); } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); nd->root_seq = nd->seq; nd->m_seq = read_seqbegin(&mount_lock); } else { path_get(&nd->path); } return s; } nd->root.mnt = NULL; nd->path.mnt = NULL; nd->path.dentry = NULL; nd->m_seq = read_seqbegin(&mount_lock); if (*s == '/') { if (flags & LOOKUP_RCU) rcu_read_lock(); set_root(nd); if (likely(!nd_jump_root(nd))) return s; nd->root.mnt = NULL; rcu_read_unlock(); return ERR_PTR(-ECHILD); } else if (nd->dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->inode = nd->path.dentry->d_inode; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); nd->inode = nd->path.dentry->d_inode; } return s; } else { /* Caller must check execute permissions on the starting path component */ struct fd f = fdget_raw(nd->dfd); struct dentry *dentry; if (!f.file) return ERR_PTR(-EBADF); dentry = f.file->f_path.dentry; if (*s) { if (!d_can_lookup(dentry)) { fdput(f); return ERR_PTR(-ENOTDIR); } } nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } fdput(f); return s; } } static const char *trailing_symlink(struct nameidata *nd) { const char *s; int error = may_follow_link(nd); if (unlikely(error)) return ERR_PTR(error); nd->flags |= LOOKUP_PARENT; nd->stack[0].name = NULL; s = get_link(nd); return s ? s : ""; } static inline int lookup_last(struct nameidata *nd) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, nd->flags & LOOKUP_FOLLOW ? nd->depth ? WALK_PUT | WALK_GET : WALK_GET : 0); } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && ((err = lookup_last(nd)) > 0)) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) if (!d_can_lookup(nd->path.dentry)) err = -ENOTDIR; if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root) { int retval; struct nameidata nd; if (IS_ERR(name)) return PTR_ERR(name); if (unlikely(root)) { nd.root = *root; flags |= LOOKUP_ROOT; } set_nameidata(&nd, dfd, name); retval = path_lookupat(&nd, flags | LOOKUP_RCU, path); if (unlikely(retval == -ECHILD)) retval = path_lookupat(&nd, flags, path); if (unlikely(retval == -ESTALE)) retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path); if (likely(!retval)) audit_inode(name, path->dentry, flags & LOOKUP_PARENT); restore_nameidata(); putname(name); return retval; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_parentat(struct nameidata *nd, unsigned flags, struct path *parent) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); err = link_path_walk(s, nd); if (!err) err = complete_walk(nd); if (!err) { *parent = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static struct filename *filename_parentat(int dfd, struct filename *name, unsigned int flags, struct path *parent, struct qstr *last, int *type) { int retval; struct nameidata nd; if (IS_ERR(name)) return name; set_nameidata(&nd, dfd, name); retval = path_parentat(&nd, flags | LOOKUP_RCU, parent); if (unlikely(retval == -ECHILD)) retval = path_parentat(&nd, flags, parent); if (unlikely(retval == -ESTALE)) retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent); if (likely(!retval)) { *last = nd.last; *type = nd.last_type; audit_inode(name, parent->dentry, LOOKUP_PARENT); } else { putname(name); name = ERR_PTR(retval); } restore_nameidata(); return name; } /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { struct filename *filename; struct dentry *d; struct qstr last; int type; filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path, &last, &type); if (IS_ERR(filename)) return ERR_CAST(filename); if (unlikely(type != LAST_NORM)) { path_put(path); putname(filename); return ERR_PTR(-EINVAL); } inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); d = __lookup_hash(&last, path->dentry, 0); if (IS_ERR(d)) { inode_unlock(path->dentry->d_inode); path_put(path); } putname(filename); return d; } int kern_path(const char *name, unsigned int flags, struct path *path) { return filename_lookup(AT_FDCWD, getname_kernel(name), flags, path, NULL); } EXPORT_SYMBOL(kern_path); /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { struct path root = {.mnt = mnt, .dentry = dentry}; /* the first argument of filename_lookup() is ignored with root */ return filename_lookup(AT_FDCWD, getname_kernel(name), flags , path, &root); } EXPORT_SYMBOL(vfs_path_lookup); /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * The caller must hold base->i_mutex. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); this.name = name; this.len = len; this.hash = full_name_hash(name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); return __lookup_hash(&this, base, 0); } EXPORT_SYMBOL(lookup_one_len); /** * lookup_one_len_unlocked - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * Unlike lookup_one_len, it should be called without the parent * i_mutex held, and will take the i_mutex itself if necessary. */ struct dentry *lookup_one_len_unlocked(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; struct dentry *ret; this.name = name; this.len = len; this.hash = full_name_hash(name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); ret = lookup_dcache(&this, base, 0); if (!ret) ret = lookup_slow(&this, base, 0); return ret; } EXPORT_SYMBOL(lookup_one_len_unlocked); int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { return filename_lookup(dfd, getname_flags(name, flags, empty), flags, path, NULL); } EXPORT_SYMBOL(user_path_at_empty); /* * NB: most callers don't do anything directly with the reference to the * to struct filename, but the nd->last pointer points into the name string * allocated by getname. So we must hold the reference to it until all * path-walking is complete. */ static inline struct filename * user_path_parent(int dfd, const char __user *path, struct path *parent, struct qstr *last, int *type, unsigned int flags) { /* only LOOKUP_REVAL is allowed in extra flags */ return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL, parent, last, type); } /** * mountpoint_last - look up last component for umount * @nd: pathwalk nameidata - currently pointing at parent directory of "last" * @path: pointer to container for result * * This is a special lookup_last function just for umount. In this case, we * need to resolve the path without doing any revalidation. * * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since * mountpoints are always pinned in the dcache, their ancestors are too. Thus, * in almost all cases, this lookup will be served out of the dcache. The only * cases where it won't are if nd->last refers to a symlink or the path is * bogus and it doesn't exist. * * Returns: * -error: if there was an error during lookup. This includes -ENOENT if the * lookup found a negative dentry. The nd->path reference will also be * put in this case. * * 0: if we successfully resolved nd->path and found it to not to be a * symlink that needs to be followed. "path" will also be populated. * The nd->path reference will also be put. * * 1: if we successfully resolved nd->last and found it to be a symlink * that needs to be followed. "path" will be populated with the path * to the link, and nd->path will *not* be put. */ static int mountpoint_last(struct nameidata *nd, struct path *path) { int error = 0; struct dentry *dentry; struct dentry *dir = nd->path.dentry; /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } nd->flags &= ~LOOKUP_PARENT; if (unlikely(nd->last_type != LAST_NORM)) { error = handle_dots(nd, nd->last_type); if (error) return error; dentry = dget(nd->path.dentry); } else { dentry = d_lookup(dir, &nd->last); if (!dentry) { /* * No cached dentry. Mounted dentries are pinned in the * cache, so that means that this dentry is probably * a symlink or the path doesn't actually point * to a mounted dentry. */ dentry = lookup_slow(&nd->last, dir, nd->flags | LOOKUP_NO_REVAL); if (IS_ERR(dentry)) return PTR_ERR(dentry); } } if (d_is_negative(dentry)) { dput(dentry); return -ENOENT; } if (nd->depth) put_link(nd); path->dentry = dentry; path->mnt = nd->path.mnt; error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW, d_backing_inode(dentry), 0); if (unlikely(error)) return error; mntget(path->mnt); follow_mount(path); return 0; } /** * path_mountpoint - look up a path to be umounted * @nd: lookup context * @flags: lookup flags * @path: pointer to container for result * * Look up the given name, but don't attempt to revalidate the last component. * Returns 0 and "path" will be valid on success; Returns error otherwise. */ static int path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && (err = mountpoint_last(nd, path)) > 0) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } terminate_walk(nd); return err; } static int filename_mountpoint(int dfd, struct filename *name, struct path *path, unsigned int flags) { struct nameidata nd; int error; if (IS_ERR(name)) return PTR_ERR(name); set_nameidata(&nd, dfd, name); error = path_mountpoint(&nd, flags | LOOKUP_RCU, path); if (unlikely(error == -ECHILD)) error = path_mountpoint(&nd, flags, path); if (unlikely(error == -ESTALE)) error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path); if (likely(!error)) audit_inode(name, path->dentry, 0); restore_nameidata(); putname(name); return error; } /** * user_path_mountpoint_at - lookup a path from userland in order to umount it * @dfd: directory file descriptor * @name: pathname from userland * @flags: lookup flags * @path: pointer to container to hold result * * A umount is a special case for path walking. We're not actually interested * in the inode in this situation, and ESTALE errors can be a problem. We * simply want track down the dentry and vfsmount attached at the mountpoint * and avoid revalidating the last component. * * Returns 0 and populates "path" on success. */ int user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) { return filename_mountpoint(dfd, getname(name), path, flags); } int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { return filename_mountpoint(dfd, getname_kernel(name), path, flags); } EXPORT_SYMBOL(kern_path_mountpoint); int __check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } EXPORT_SYMBOL(__check_sticky); /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) { struct inode *inode = d_backing_inode(victim); int error; if (d_is_negative(victim)) return -ENOENT; BUG_ON(!inode); BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) || IS_SWAPFILE(inode)) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { inode_lock_nested(p2->d_inode, I_MUTEX_PARENT); inode_lock_nested(p1->d_inode, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_CHILD); return p; } inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2); return NULL; } EXPORT_SYMBOL(lock_rename); void unlock_rename(struct dentry *p1, struct dentry *p2) { inode_unlock(p1->d_inode); if (p1 != p2) { inode_unlock(p2->d_inode); mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } EXPORT_SYMBOL(unlock_rename); int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool want_excl) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_create); static int may_open(struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, MAY_OPEN | acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; return 0; } static int handle_truncate(struct file *filp) { struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(filp); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } static inline int open_to_namei_flags(int flag) { if ((flag & O_ACCMODE) == 3) flag--; return flag; } static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) { int error = security_path_mknod(dir, dentry, mode, 0); if (error) return error; error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); if (error) return error; return security_inode_create(dir->dentry->d_inode, dentry, mode); } /* * Attempt to atomically look up, create and open a file from a negative * dentry. * * Returns 0 if successful. The file will have been created and attached to * @file by the filesystem calling finish_open(). * * Returns 1 if the file was looked up only or didn't need creating. The * caller will need to perform the open themselves. @path will have been * updated to point to the new dentry. This may be negative. * * Returns an error code otherwise. */ static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, bool got_write, bool need_lookup, int *opened) { struct inode *dir = nd->path.dentry->d_inode; unsigned open_flag = open_to_namei_flags(op->open_flag); umode_t mode; int error; int acc_mode; int create_error = 0; struct dentry *const DENTRY_NOT_SET = (void *) -1UL; bool excl; BUG_ON(dentry->d_inode); /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { error = -ENOENT; goto out; } mode = op->mode; if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) mode &= ~current_umask(); excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT); if (excl) open_flag &= ~O_TRUNC; /* * Checking write permission is tricky, bacuse we don't know if we are * going to actually need it: O_CREAT opens should work as long as the * file exists. But checking existence breaks atomicity. The trick is * to check access and if not granted clear O_CREAT from the flags. * * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ if (((open_flag & (O_CREAT | O_TRUNC)) || (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { if (!(open_flag & O_CREAT)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open */ goto no_open; } else if (open_flag & (O_EXCL | O_TRUNC)) { /* Fall back and fail with the right error */ create_error = -EROFS; goto no_open; } else { /* No side effects, safe to clear O_CREAT */ create_error = -EROFS; open_flag &= ~O_CREAT; } } if (open_flag & O_CREAT) { error = may_o_create(&nd->path, dentry, mode); if (error) { create_error = error; if (open_flag & O_EXCL) goto no_open; open_flag &= ~O_CREAT; } } if (nd->flags & LOOKUP_DIRECTORY) open_flag |= O_DIRECTORY; file->f_path.dentry = DENTRY_NOT_SET; file->f_path.mnt = nd->path.mnt; error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode, opened); if (error < 0) { if (create_error && error == -ENOENT) error = create_error; goto out; } if (error) { /* returned 1, that is */ if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { error = -EIO; goto out; } if (file->f_path.dentry) { dput(dentry); dentry = file->f_path.dentry; } if (*opened & FILE_CREATED) fsnotify_create(dir, dentry); if (!dentry->d_inode) { WARN_ON(*opened & FILE_CREATED); if (create_error) { error = create_error; goto out; } } else { if (excl && !(*opened & FILE_CREATED)) { error = -EEXIST; goto out; } } goto looked_up; } /* * We didn't have the inode before the open, so check open permission * here. */ acc_mode = op->acc_mode; if (*opened & FILE_CREATED) { WARN_ON(!(open_flag & O_CREAT)); fsnotify_create(dir, dentry); acc_mode = 0; } error = may_open(&file->f_path, acc_mode, open_flag); if (error) fput(file); out: dput(dentry); return error; no_open: if (need_lookup) { dentry = lookup_real(dir, dentry, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (create_error) { int open_flag = op->open_flag; error = create_error; if ((open_flag & O_EXCL)) { if (!dentry->d_inode) goto out; } else if (!dentry->d_inode) { goto out; } else if ((open_flag & O_TRUNC) && d_is_reg(dentry)) { goto out; } /* will fail later, go on to get the right error */ } } looked_up: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; } /* * Look up and maybe create and open the last component. * * Must be called with i_mutex held on parent. * * Returns 0 if the file was successfully atomically created (if necessary) and * opened. In this case the file will be returned attached to @file. * * Returns 1 if the file was not completely opened at this time, though lookups * and creations will have been performed and the dentry returned in @path will * be positive upon return if O_CREAT was specified. If O_CREAT wasn't * specified then a negative dentry may be returned. * * An error code is returned otherwise. * * FILE_CREATE will be set in @*opened if the dentry was created and will be * cleared otherwise prior to returning. */ static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, bool got_write, int *opened) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; struct dentry *dentry; int error; bool need_lookup = false; *opened &= ~FILE_CREATED; dentry = lookup_dcache(&nd->last, dir, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!dentry) { dentry = d_alloc(dir, &nd->last); if (unlikely(!dentry)) return -ENOMEM; need_lookup = true; } else if (dentry->d_inode) { /* Cached positive dentry: will open in f_op->open */ goto out_no_open; } if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { return atomic_open(nd, dentry, path, file, op, got_write, need_lookup, opened); } if (need_lookup) { BUG_ON(dentry->d_inode); dentry = lookup_real(dir_inode, dentry, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); } /* Negative dentry, just create the file */ if (!dentry->d_inode && (op->open_flag & O_CREAT)) { umode_t mode = op->mode; if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); /* * This write is needed to ensure that a * rw->ro transition does not occur between * the time when the file is created and when * a permanent write count is taken through * the 'struct file' in finish_open(). */ if (!got_write) { error = -EROFS; goto out_dput; } *opened |= FILE_CREATED; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) goto out_dput; error = vfs_create(dir->d_inode, dentry, mode, nd->flags & LOOKUP_EXCL); if (error) goto out_dput; } out_no_open: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; out_dput: dput(dentry); return error; } /* * Handle the last step of open() */ static int do_last(struct nameidata *nd, struct file *file, const struct open_flags *op, int *opened) { struct dentry *dir = nd->path.dentry; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; bool got_write = false; int acc_mode = op->acc_mode; unsigned seq; struct inode *inode; struct path save_parent = { .dentry = NULL, .mnt = NULL }; struct path path; bool retried = false; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; if (nd->last_type != LAST_NORM) { error = handle_dots(nd, nd->last_type); if (unlikely(error)) return error; goto finish_open; } if (!(open_flag & O_CREAT)) { if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; /* we _can_ be in RCU mode here */ error = lookup_fast(nd, &path, &inode, &seq); if (likely(error > 0)) goto finish_lookup; if (error < 0) return error; BUG_ON(nd->inode != dir->d_inode); BUG_ON(nd->flags & LOOKUP_RCU); } else { /* create side of things */ /* * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED * has been cleared when we got to the last component we are * about to look up */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, dir, LOOKUP_PARENT); /* trailing slashes? */ if (unlikely(nd->last.name[nd->last.len])) return -EISDIR; } retry_lookup: if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { error = mnt_want_write(nd->path.mnt); if (!error) got_write = true; /* * do _not_ fail yet - we might not need that or fail with * a different error; let lookup_open() decide; we'll be * dropping this one anyway. */ } inode_lock(dir->d_inode); error = lookup_open(nd, &path, file, op, got_write, opened); inode_unlock(dir->d_inode); if (error <= 0) { if (error) goto out; if ((*opened & FILE_CREATED) || !S_ISREG(file_inode(file)->i_mode)) will_truncate = false; audit_inode(nd->name, file->f_path.dentry, 0); goto opened; } if (*opened & FILE_CREATED) { /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = false; acc_mode = 0; path_to_nameidata(&path, nd); goto finish_open_created; } /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } /* * create/update audit record if it already exists. */ audit_inode(nd->name, path.dentry, 0); if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) { path_to_nameidata(&path, nd); return -EEXIST; } error = follow_managed(&path, nd); if (unlikely(error < 0)) return error; seq = 0; /* out of RCU mode, so the value doesn't matter */ inode = d_backing_inode(path.dentry); finish_lookup: if (nd->depth) put_link(nd); error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW, inode, seq); if (unlikely(error)) return error; if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { path_to_nameidata(&path, nd); } else { save_parent.dentry = nd->path.dentry; save_parent.mnt = mntget(path.mnt); nd->path.dentry = path.dentry; } nd->inode = inode; nd->seq = seq; /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ finish_open: error = complete_walk(nd); if (error) { path_put(&save_parent); return error; } audit_inode(nd->name, nd->path.dentry, 0); if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) { error = -ELOOP; goto out; } error = -EISDIR; if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) goto out; error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) goto out; if (!d_is_reg(nd->path.dentry)) will_truncate = false; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto out; got_write = true; } finish_open_created: if (likely(!(open_flag & O_PATH))) { error = may_open(&nd->path, acc_mode, open_flag); if (error) goto out; } BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ error = vfs_open(&nd->path, file, current_cred()); if (!error) { *opened |= FILE_OPENED; } else { if (error == -EOPENSTALE) goto stale_open; goto out; } opened: error = open_check_o_direct(file); if (error) goto exit_fput; error = ima_file_check(file, op->acc_mode, *opened); if (error) goto exit_fput; if (will_truncate) { error = handle_truncate(file); if (error) goto exit_fput; } out: if (unlikely(error > 0)) { WARN_ON(1); error = -EINVAL; } if (got_write) mnt_drop_write(nd->path.mnt); path_put(&save_parent); return error; exit_fput: fput(file); goto out; stale_open: /* If no saved parent or already retried then can't retry */ if (!save_parent.dentry || retried) goto out; BUG_ON(save_parent.dentry != dir); path_put(&nd->path); nd->path = save_parent; nd->inode = dir->d_inode; save_parent.mnt = NULL; save_parent.dentry = NULL; if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } retried = true; goto retry_lookup; } static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child; struct inode *dir; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) return error; error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; dir = path.dentry->d_inode; /* we want directory to be writable */ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out2; if (!dir->i_op->tmpfile) { error = -EOPNOTSUPP; goto out2; } child = d_alloc(path.dentry, &name); if (unlikely(!child)) { error = -ENOMEM; goto out2; } dput(path.dentry); path.dentry = child; error = dir->i_op->tmpfile(dir, child, op->mode); if (error) goto out2; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); if (error) goto out2; file->f_path.mnt = path.mnt; error = finish_open(file, child, NULL, opened); if (error) goto out2; error = open_check_o_direct(file); if (error) { fput(file); } else if (!(op->open_flag & O_EXCL)) { struct inode *inode = file_inode(file); spin_lock(&inode->i_lock); inode->i_state |= I_LINKABLE; spin_unlock(&inode->i_lock); } out2: mnt_drop_write(path.mnt); out: path_put(&path); return error; } static struct file *path_openat(struct nameidata *nd, const struct open_flags *op, unsigned flags) { const char *s; struct file *file; int opened = 0; int error; file = get_empty_filp(); if (IS_ERR(file)) return file; file->f_flags = op->open_flag; if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(nd, flags, op, file, &opened); goto out2; } s = path_init(nd, flags); if (IS_ERR(s)) { put_filp(file); return ERR_CAST(s); } while (!(error = link_path_walk(s, nd)) && (error = do_last(nd, file, op, &opened)) > 0) { nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); s = trailing_symlink(nd); if (IS_ERR(s)) { error = PTR_ERR(s); break; } } terminate_walk(nd); out2: if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); } if (unlikely(error)) { if (error == -EOPENSTALE) { if (flags & LOOKUP_RCU) error = -ECHILD; else error = -ESTALE; } file = ERR_PTR(error); } return file; } struct file *do_filp_open(int dfd, struct filename *pathname, const struct open_flags *op) { struct nameidata nd; int flags = op->lookup_flags; struct file *filp; set_nameidata(&nd, dfd, pathname); filp = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(&nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (IS_ERR(filename)) return ERR_CAST(filename); set_nameidata(&nd, -1, filename); file = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(&nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); putname(filename); return file; } static struct dentry *filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); struct qstr last; int type; int err2; int error; bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); /* * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any * other flags passed in are ignored! */ lookup_flags &= LOOKUP_REVAL; name = filename_parentat(dfd, name, lookup_flags, path, &last, &type); if (IS_ERR(name)) return ERR_CAST(name); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (unlikely(type != LAST_NORM)) goto out; /* don't fail immediately if it's r/o, at least try to report other errors */ err2 = mnt_want_write(path->mnt); /* * Do the final lookup. */ lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL; inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path->dentry, lookup_flags); if (IS_ERR(dentry)) goto unlock; error = -EEXIST; if (d_is_positive(dentry)) goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && last.name[last.len])) { error = -ENOENT; goto fail; } if (unlikely(err2)) { error = err2; goto fail; } putname(name); return dentry; fail: dput(dentry); dentry = ERR_PTR(error); unlock: inode_unlock(path->dentry->d_inode); if (!err2) mnt_drop_write(path->mnt); out: path_put(path); putname(name); return dentry; } struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname_kernel(pathname), path, lookup_flags); } EXPORT_SYMBOL(kern_path_create); void done_path_create(struct path *path, struct dentry *dentry) { dput(dentry); inode_unlock(path->dentry->d_inode); mnt_drop_write(path->mnt); path_put(path); } EXPORT_SYMBOL(done_path_create); inline struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname(pathname), path, lookup_flags); } EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mknod); static int may_mknod(umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = 0; error = may_mknod(mode); if (error) return error; retry: dentry = user_path_create(dfd, filename, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out: done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); unsigned max_links = dir->i_sb->s_max_links; if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; if (max_links && dir->i_nlink >= max_links) return -EMLINK; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mkdir); SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = LOOKUP_DIRECTORY; retry: dentry = user_path_create(dfd, pathname, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mkdir(&path, dentry, mode); if (!error) error = vfs_mkdir(path.dentry->d_inode, dentry, mode); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; dget(dentry); inode_lock(dentry->d_inode); error = -EBUSY; if (is_local_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) d_delete(dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: name = user_path_parent(dfd, pathname, &path, &last, &type, lookup_flags); if (IS_ERR(name)) return PTR_ERR(name); switch (type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } error = mnt_want_write(path.mnt); if (error) goto exit1; inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = security_path_rmdir(&path, dentry); if (error) goto exit3; error = vfs_rmdir(path.dentry->d_inode, dentry); exit3: dput(dentry); exit2: inode_unlock(path.dentry->d_inode); mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } /** * vfs_unlink - unlink a filesystem object * @dir: parent directory * @dentry: victim * @delegated_inode: returns victim inode, if the inode is delegated. * * The caller must hold dir->i_mutex. * * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and * return a reference to the inode in delegated_inode. The caller * should then break the delegation on that inode and retry. Because * breaking a delegation may take a long time, the caller should drop * dir->i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) { struct inode *target = dentry->d_inode; int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; inode_lock(target); if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = try_break_deleg(target, delegated_inode); if (error) goto out; error = dir->i_op->unlink(dir, dentry); if (!error) { dont_mount(dentry); detach_mounts(dentry); } } } out: inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(target); d_delete(dentry); } return error; } EXPORT_SYMBOL(vfs_unlink); /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; struct inode *inode = NULL; struct inode *delegated_inode = NULL; unsigned int lookup_flags = 0; retry: name = user_path_parent(dfd, pathname, &path, &last, &type, lookup_flags); if (IS_ERR(name)) return PTR_ERR(name); error = -EISDIR; if (type != LAST_NORM) goto exit1; error = mnt_want_write(path.mnt); if (error) goto exit1; retry_deleg: inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (last.name[last.len]) goto slashes; inode = dentry->d_inode; if (d_is_negative(dentry)) goto slashes; ihold(inode); error = security_path_unlink(&path, dentry); if (error) goto exit2; error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode); exit2: dput(dentry); } inode_unlock(path.dentry->d_inode); if (inode) iput(inode); /* truncate the inode here */ inode = NULL; if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; inode = NULL; goto retry; } return error; slashes: if (d_is_negative(dentry)) error = -ENOENT; else if (d_is_dir(dentry)) error = -EISDIR; else error = -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_symlink); SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; struct filename *from; struct dentry *dentry; struct path path; unsigned int lookup_flags = 0; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); retry: dentry = user_path_create(newdfd, newname, &path, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_putname; error = security_path_symlink(&path, dentry, from->name); if (!error) error = vfs_symlink(path.dentry->d_inode, dentry, from->name); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } /** * vfs_link - create a new link * @old_dentry: object to be linked * @dir: new parent * @new_dentry: where to create the new link * @delegated_inode: returns inode needing a delegation break * * The caller must hold dir->i_mutex * * If vfs_link discovers a delegation on the to-be-linked file in need * of breaking, it will return -EWOULDBLOCK and return a reference to the * inode in delegated_inode. The caller should then break the delegation * and retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; inode_lock(inode); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else { error = try_break_deleg(inode, delegated_inode); if (!error) error = dir->i_op->link(old_dentry, dir, new_dentry); } if (!error && (inode->i_state & I_LINKABLE)) { spin_lock(&inode->i_lock); inode->i_state &= ~I_LINKABLE; spin_unlock(&inode->i_lock); } inode_unlock(inode); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } EXPORT_SYMBOL(vfs_link); /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct path old_path, new_path; struct inode *delegated_inode = NULL; int how = 0; int error; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; retry: error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; new_dentry = user_path_create(newdfd, newname, &new_path, (how & LOOKUP_REVAL)); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out; error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; error = may_linkat(&old_path); if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); out_dput: done_path_create(&new_path, new_dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) { path_put(&old_path); goto retry; } } if (retry_estale(error, how)) { path_put(&old_path); how |= LOOKUP_REVAL; goto retry; } out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /** * vfs_rename - rename a filesystem object * @old_dir: parent of source * @old_dentry: source * @new_dir: parent of destination * @new_dentry: destination * @delegated_inode: returns an inode needing a delegation break * @flags: rename flags * * The caller must hold multiple mutexes--see lock_rename()). * * If vfs_rename discovers a delegation in need of breaking at either * the source or destination, it will return -EWOULDBLOCK and return a * reference to the inode in delegated_inode. The caller should then * break the delegation and retry. Because breaking a delegation may * take a long time, the caller should drop all locks before doing * so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. * * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _four_ objects - parents and victim (if it exists), * and source (if it is not a directory). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, struct inode **delegated_inode, unsigned int flags) { int error; bool is_dir = d_is_dir(old_dentry); const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; if (source == target) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!target) { error = may_create(new_dir, new_dentry); } else { new_is_dir = d_is_dir(new_dentry); if (!(flags & RENAME_EXCHANGE)) error = may_delete(new_dir, new_dentry, is_dir); else error = may_delete(new_dir, new_dentry, new_is_dir); } if (error) return error; if (!old_dir->i_op->rename && !old_dir->i_op->rename2) return -EPERM; if (flags && !old_dir->i_op->rename2) return -EINVAL; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { if (is_dir) { error = inode_permission(source, MAY_WRITE); if (error) return error; } if ((flags & RENAME_EXCHANGE) && new_is_dir) { error = inode_permission(target, MAY_WRITE); if (error) return error; } } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) return error; old_name = fsnotify_oldname_init(old_dentry->d_name.name); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); else if (target) inode_lock(target); error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; if (max_links && new_dir != old_dir) { error = -EMLINK; if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links) goto out; if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir && old_dir->i_nlink >= max_links) goto out; } if (is_dir && !(flags & RENAME_EXCHANGE) && target) shrink_dcache_parent(new_dentry); if (!is_dir) { error = try_break_deleg(source, delegated_inode); if (error) goto out; } if (target && !new_is_dir) { error = try_break_deleg(target, delegated_inode); if (error) goto out; } if (!old_dir->i_op->rename2) { error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); } else { WARN_ON(old_dir->i_op->rename != NULL); error = old_dir->i_op->rename2(old_dir, old_dentry, new_dir, new_dentry, flags); } if (error) goto out; if (!(flags & RENAME_EXCHANGE) && target) { if (is_dir) target->i_flags |= S_DEAD; dont_mount(new_dentry); detach_mounts(new_dentry); } if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) { if (!(flags & RENAME_EXCHANGE)) d_move(old_dentry, new_dentry); else d_exchange(old_dentry, new_dentry); } out: if (!is_dir || (flags & RENAME_EXCHANGE)) unlock_two_nondirectories(source, target); else if (target) inode_unlock(target); dput(new_dentry); if (!error) { fsnotify_move(old_dir, new_dir, old_name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } fsnotify_oldname_free(old_name); return error; } EXPORT_SYMBOL(vfs_rename); SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct path old_path, new_path; struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; struct filename *from; struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; int error; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) return -EINVAL; if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) return -EPERM; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: from = user_path_parent(olddfd, oldname, &old_path, &old_last, &old_type, lookup_flags); if (IS_ERR(from)) { error = PTR_ERR(from); goto exit; } to = user_path_parent(newdfd, newname, &new_path, &new_last, &new_type, lookup_flags); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; } error = -EXDEV; if (old_path.mnt != new_path.mnt) goto exit2; error = -EBUSY; if (old_type != LAST_NORM) goto exit2; if (flags & RENAME_NOREPLACE) error = -EEXIST; if (new_type != LAST_NORM) goto exit2; error = mnt_want_write(old_path.mnt); if (error) goto exit2; retry_deleg: trap = lock_rename(new_path.dentry, old_path.dentry); old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (d_is_negative(old_dentry)) goto exit4; new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; error = -EEXIST; if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) goto exit5; if (flags & RENAME_EXCHANGE) { error = -ENOENT; if (d_is_negative(new_dentry)) goto exit5; if (!d_is_dir(new_dentry)) { error = -ENOTDIR; if (new_last.name[new_last.len]) goto exit5; } } /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!d_is_dir(old_dentry)) { error = -ENOTDIR; if (old_last.name[old_last.len]) goto exit5; if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len]) goto exit5; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit5; /* target should not be an ancestor of source */ if (!(flags & RENAME_EXCHANGE)) error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = security_path_rename(&old_path, old_dentry, &new_path, new_dentry, flags); if (error) goto exit5; error = vfs_rename(old_path.dentry->d_inode, old_dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode, flags); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_path.dentry, old_path.dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(old_path.mnt); exit2: if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); putname(to); exit1: path_put(&old_path); putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } exit: return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { return sys_renameat2(olddfd, oldname, newdfd, newname, 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } int vfs_whiteout(struct inode *dir, struct dentry *dentry) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mknod) return -EPERM; return dir->i_op->mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); } EXPORT_SYMBOL(vfs_whiteout); int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } EXPORT_SYMBOL(readlink_copy); /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->get_link() not calling nd_jump_link(). Using (or not using) it * for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); struct inode *inode = d_inode(dentry); const char *link = inode->i_link; int res; if (!link) { link = inode->i_op->get_link(dentry, inode, &done); if (IS_ERR(link)) return PTR_ERR(link); } res = readlink_copy(buffer, buflen, link); do_delayed_call(&done); return res; } EXPORT_SYMBOL(generic_readlink); /* get the link contents into pagecache */ const char *page_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; if (!dentry) { page = find_get_page(mapping, 0); if (!page) return ERR_PTR(-ECHILD); if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-ECHILD); } } else { page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); kaddr = page_address(page); nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } EXPORT_SYMBOL(page_get_link); void page_put_link(void *arg) { put_page(arg); } EXPORT_SYMBOL(page_put_link); int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); int res = readlink_copy(buffer, buflen, page_get_link(dentry, d_inode(dentry), &done)); do_delayed_call(&done); return res; } EXPORT_SYMBOL(page_readlink); /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; memcpy(page_address(page), symname, len-1); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } EXPORT_SYMBOL(__page_symlink); int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !mapping_gfp_constraint(inode->i_mapping, __GFP_FS)); } EXPORT_SYMBOL(page_symlink); const struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .get_link = page_get_link, }; EXPORT_SYMBOL(page_symlink_inode_operations);
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5199_0
crossvul-cpp_data_good_4896_2
/* * Common NFSv4 ACL handling code. * * Copyright (c) 2002, 2003 The Regents of the University of Michigan. * All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * Jeff Sedlak <jsedlak@umich.edu> * J. Bruce Fields <bfields@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/posix_acl.h> #include "nfsfh.h" #include "nfsd.h" #include "acl.h" #include "vfs.h" #define NFS4_ACL_TYPE_DEFAULT 0x01 #define NFS4_ACL_DIR 0x02 #define NFS4_ACL_OWNER 0x04 /* mode bit translations: */ #define NFS4_READ_MODE (NFS4_ACE_READ_DATA) #define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_APPEND_DATA) #define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE #define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE) #define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL) /* flags used to simulate posix default ACLs */ #define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \ | NFS4_ACE_DIRECTORY_INHERIT_ACE) #define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS \ | NFS4_ACE_INHERIT_ONLY_ACE \ | NFS4_ACE_IDENTIFIER_GROUP) static u32 mask_from_posix(unsigned short perm, unsigned int flags) { int mask = NFS4_ANYONE_MODE; if (flags & NFS4_ACL_OWNER) mask |= NFS4_OWNER_MODE; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } static u32 deny_mask_from_posix(unsigned short perm, u32 flags) { u32 mask = 0; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } /* XXX: modify functions to return NFS errors; they're only ever * used by nfs code, after all.... */ /* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the * side of being more restrictive, so the mode bit mapping below is * pessimistic. An optimistic version would be needed to handle DENY's, * but we expect to coalesce all ALLOWs and DENYs before mapping to mode * bits. */ static void low_mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags) { u32 write_mode = NFS4_WRITE_MODE; if (flags & NFS4_ACL_DIR) write_mode |= NFS4_ACE_DELETE_CHILD; *mode = 0; if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE) *mode |= ACL_READ; if ((perm & write_mode) == write_mode) *mode |= ACL_WRITE; if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE) *mode |= ACL_EXECUTE; } static short ace2type(struct nfs4_ace *); static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int); int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl) { struct inode *inode = d_inode(dentry); int error = 0; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; int size = 0; pacl = get_acl(inode, ACL_TYPE_ACCESS); if (!pacl) pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(pacl)) return PTR_ERR(pacl); /* allocate for worst case: one (deny, allow) pair each: */ size += 2 * pacl->a_count; if (S_ISDIR(inode->i_mode)) { flags = NFS4_ACL_DIR; dpacl = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(dpacl)) { error = PTR_ERR(dpacl); goto rel_pacl; } if (dpacl) size += 2 * dpacl->a_count; } *acl = kmalloc(nfs4_acl_bytes(size), GFP_KERNEL); if (*acl == NULL) { error = -ENOMEM; goto out; } (*acl)->naces = 0; _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); if (dpacl) _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); out: posix_acl_release(dpacl); rel_pacl: posix_acl_release(pacl); return error; } struct posix_acl_summary { unsigned short owner; unsigned short users; unsigned short group; unsigned short groups; unsigned short other; unsigned short mask; }; static void summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas) { struct posix_acl_entry *pa, *pe; /* * Only pas.users and pas.groups need initialization; previous * posix_acl_valid() calls ensure that the other fields will be * initialized in the following loop. But, just to placate gcc: */ memset(pas, 0, sizeof(*pas)); pas->mask = 07; pe = acl->a_entries + acl->a_count; FOREACH_ACL_ENTRY(pa, acl, pe) { switch (pa->e_tag) { case ACL_USER_OBJ: pas->owner = pa->e_perm; break; case ACL_GROUP_OBJ: pas->group = pa->e_perm; break; case ACL_USER: pas->users |= pa->e_perm; break; case ACL_GROUP: pas->groups |= pa->e_perm; break; case ACL_OTHER: pas->other = pa->e_perm; break; case ACL_MASK: pas->mask = pa->e_perm; break; } } /* We'll only care about effective permissions: */ pas->users &= pas->mask; pas->group &= pas->mask; pas->groups &= pas->mask; } /* We assume the acl has been verified with posix_acl_valid. */ static void _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, unsigned int flags) { struct posix_acl_entry *pa, *group_owner_entry; struct nfs4_ace *ace; struct posix_acl_summary pas; unsigned short deny; int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ? NFS4_INHERITANCE_FLAGS | NFS4_ACE_INHERIT_ONLY_ACE : 0); BUG_ON(pacl->a_count < 3); summarize_posix_acl(pacl, &pas); pa = pacl->a_entries; ace = acl->aces + acl->naces; /* We could deny everything not granted by the owner: */ deny = ~pas.owner; /* * but it is equivalent (and simpler) to deny only what is not * granted by later entries: */ deny &= pas.users | pas.group | pas.groups | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_USER) { deny = ~(pa->e_perm & pas.mask); deny &= pas.groups | pas.group | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_uid = pa->e_uid; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_uid = pa->e_uid; ace++; acl->naces++; pa++; } /* In the case of groups, we apply allow ACEs first, then deny ACEs, * since a user can be in more than one group. */ /* allow ACEs */ group_owner_entry = pa; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pas.group, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_GROUP) { ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_gid = pa->e_gid; ace++; acl->naces++; pa++; } /* deny ACEs */ pa = group_owner_entry; deny = ~pas.group & pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; } pa++; while (pa->e_tag == ACL_GROUP) { deny = ~(pa->e_perm & pas.mask); deny &= pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_gid = pa->e_gid; ace++; acl->naces++; } pa++; } if (pa->e_tag == ACL_MASK) pa++; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags); ace->whotype = NFS4_ACL_WHO_EVERYONE; acl->naces++; } static bool pace_gt(struct posix_acl_entry *pace1, struct posix_acl_entry *pace2) { if (pace1->e_tag != pace2->e_tag) return pace1->e_tag > pace2->e_tag; if (pace1->e_tag == ACL_USER) return uid_gt(pace1->e_uid, pace2->e_uid); if (pace1->e_tag == ACL_GROUP) return gid_gt(pace1->e_gid, pace2->e_gid); return false; } static void sort_pacl_range(struct posix_acl *pacl, int start, int end) { int sorted = 0, i; /* We just do a bubble sort; easy to do in place, and we're not * expecting acl's to be long enough to justify anything more. */ while (!sorted) { sorted = 1; for (i = start; i < end; i++) { if (pace_gt(&pacl->a_entries[i], &pacl->a_entries[i+1])) { sorted = 0; swap(pacl->a_entries[i], pacl->a_entries[i + 1]); } } } } static void sort_pacl(struct posix_acl *pacl) { /* posix_acl_valid requires that users and groups be in order * by uid/gid. */ int i, j; /* no users or groups */ if (!pacl || pacl->a_count <= 4) return; i = 1; while (pacl->a_entries[i].e_tag == ACL_USER) i++; sort_pacl_range(pacl, 1, i-1); BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); j = ++i; while (pacl->a_entries[j].e_tag == ACL_GROUP) j++; sort_pacl_range(pacl, i, j-1); return; } /* * While processing the NFSv4 ACE, this maintains bitmasks representing * which permission bits have been allowed and which denied to a given * entity: */ struct posix_ace_state { u32 allow; u32 deny; }; struct posix_user_ace_state { union { kuid_t uid; kgid_t gid; }; struct posix_ace_state perms; }; struct posix_ace_state_array { int n; struct posix_user_ace_state aces[]; }; /* * While processing the NFSv4 ACE, this maintains the partial permissions * calculated so far: */ struct posix_acl_state { int empty; struct posix_ace_state owner; struct posix_ace_state group; struct posix_ace_state other; struct posix_ace_state everyone; struct posix_ace_state mask; /* Deny unused in this case */ struct posix_ace_state_array *users; struct posix_ace_state_array *groups; }; static int init_state(struct posix_acl_state *state, int cnt) { int alloc; memset(state, 0, sizeof(struct posix_acl_state)); state->empty = 1; /* * In the worst case, each individual acl could be for a distinct * named user or group, but we don't know which, so we allocate * enough space for either: */ alloc = sizeof(struct posix_ace_state_array) + cnt*sizeof(struct posix_user_ace_state); state->users = kzalloc(alloc, GFP_KERNEL); if (!state->users) return -ENOMEM; state->groups = kzalloc(alloc, GFP_KERNEL); if (!state->groups) { kfree(state->users); return -ENOMEM; } return 0; } static void free_state(struct posix_acl_state *state) { kfree(state->users); kfree(state->groups); } static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_state *astate) { state->mask.allow |= astate->allow; } static struct posix_acl * posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) { struct posix_acl_entry *pace; struct posix_acl *pacl; int nace; int i; /* * ACLs with no ACEs are treated differently in the inheritable * and effective cases: when there are no inheritable ACEs, * calls ->set_acl with a NULL ACL structure. */ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) return NULL; /* * When there are no effective ACEs, the following will end * up setting a 3-element effective posix ACL with all * permissions zero. */ if (!state->users->n && !state->groups->n) nace = 3; else /* Note we also include a MASK ACE in this case: */ nace = 4 + state->users->n + state->groups->n; pacl = posix_acl_alloc(nace, GFP_KERNEL); if (!pacl) return ERR_PTR(-ENOMEM); pace = pacl->a_entries; pace->e_tag = ACL_USER_OBJ; low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags); for (i=0; i < state->users->n; i++) { pace++; pace->e_tag = ACL_USER; low_mode_from_nfs4(state->users->aces[i].perms.allow, &pace->e_perm, flags); pace->e_uid = state->users->aces[i].uid; add_to_mask(state, &state->users->aces[i].perms); } pace++; pace->e_tag = ACL_GROUP_OBJ; low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags); add_to_mask(state, &state->group); for (i=0; i < state->groups->n; i++) { pace++; pace->e_tag = ACL_GROUP; low_mode_from_nfs4(state->groups->aces[i].perms.allow, &pace->e_perm, flags); pace->e_gid = state->groups->aces[i].gid; add_to_mask(state, &state->groups->aces[i].perms); } if (state->users->n || state->groups->n) { pace++; pace->e_tag = ACL_MASK; low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); } pace++; pace->e_tag = ACL_OTHER; low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags); return pacl; } static inline void allow_bits(struct posix_ace_state *astate, u32 mask) { /* Allow all bits in the mask not already denied: */ astate->allow |= mask & ~astate->deny; } static inline void deny_bits(struct posix_ace_state *astate, u32 mask) { /* Deny all bits in the mask not already allowed: */ astate->deny |= mask & ~astate->allow; } static int find_uid(struct posix_acl_state *state, kuid_t uid) { struct posix_ace_state_array *a = state->users; int i; for (i = 0; i < a->n; i++) if (uid_eq(a->aces[i].uid, uid)) return i; /* Not found: */ a->n++; a->aces[i].uid = uid; a->aces[i].perms.allow = state->everyone.allow; a->aces[i].perms.deny = state->everyone.deny; return i; } static int find_gid(struct posix_acl_state *state, kgid_t gid) { struct posix_ace_state_array *a = state->groups; int i; for (i = 0; i < a->n; i++) if (gid_eq(a->aces[i].gid, gid)) return i; /* Not found: */ a->n++; a->aces[i].gid = gid; a->aces[i].perms.allow = state->everyone.allow; a->aces[i].perms.deny = state->everyone.deny; return i; } static void deny_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) deny_bits(&a->aces[i].perms, mask); } static void allow_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) allow_bits(&a->aces[i].perms, mask); } static void process_one_v4_ace(struct posix_acl_state *state, struct nfs4_ace *ace) { u32 mask = ace->access_mask; int i; state->empty = 0; switch (ace2type(ace)) { case ACL_USER_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); } else { deny_bits(&state->owner, mask); } break; case ACL_USER: i = find_uid(state, ace->who_uid); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->users->aces[i].perms, mask); } else { deny_bits(&state->users->aces[i].perms, mask); mask = state->users->aces[i].perms.deny; deny_bits(&state->owner, mask); } break; case ACL_GROUP_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->group, mask); } else { deny_bits(&state->group, mask); mask = state->group.deny; deny_bits(&state->owner, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_GROUP: i = find_gid(state, ace->who_gid); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->groups->aces[i].perms, mask); } else { deny_bits(&state->groups->aces[i].perms, mask); mask = state->groups->aces[i].perms.deny; deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_OTHER: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); allow_bits(&state->group, mask); allow_bits(&state->other, mask); allow_bits(&state->everyone, mask); allow_bits_array(state->users, mask); allow_bits_array(state->groups, mask); } else { deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->other, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } } } static int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl, struct posix_acl **dpacl, unsigned int flags) { struct posix_acl_state effective_acl_state, default_acl_state; struct nfs4_ace *ace; int ret; ret = init_state(&effective_acl_state, acl->naces); if (ret) return ret; ret = init_state(&default_acl_state, acl->naces); if (ret) goto out_estate; ret = -EINVAL; for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) { if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE && ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) goto out_dstate; if (ace->flag & ~NFS4_SUPPORTED_FLAGS) goto out_dstate; if ((ace->flag & NFS4_INHERITANCE_FLAGS) == 0) { process_one_v4_ace(&effective_acl_state, ace); continue; } if (!(flags & NFS4_ACL_DIR)) goto out_dstate; /* * Note that when only one of FILE_INHERIT or DIRECTORY_INHERIT * is set, we're effectively turning on the other. That's OK, * according to rfc 3530. */ process_one_v4_ace(&default_acl_state, ace); if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE)) process_one_v4_ace(&effective_acl_state, ace); } *pacl = posix_state_to_acl(&effective_acl_state, flags); if (IS_ERR(*pacl)) { ret = PTR_ERR(*pacl); *pacl = NULL; goto out_dstate; } *dpacl = posix_state_to_acl(&default_acl_state, flags | NFS4_ACL_TYPE_DEFAULT); if (IS_ERR(*dpacl)) { ret = PTR_ERR(*dpacl); *dpacl = NULL; posix_acl_release(*pacl); *pacl = NULL; goto out_dstate; } sort_pacl(*pacl); sort_pacl(*dpacl); ret = 0; out_dstate: free_state(&default_acl_state); out_estate: free_state(&effective_acl_state); return ret; } __be32 nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_acl *acl) { __be32 error; int host_error; struct dentry *dentry; struct inode *inode; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; /* Get inode */ error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR); if (error) return error; dentry = fhp->fh_dentry; inode = d_inode(dentry); if (S_ISDIR(inode->i_mode)) flags = NFS4_ACL_DIR; host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags); if (host_error == -EINVAL) return nfserr_attrnotsupp; if (host_error < 0) goto out_nfserr; fh_lock(fhp); host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl); if (host_error < 0) goto out_drop_lock; if (S_ISDIR(inode->i_mode)) { host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl); } out_drop_lock: fh_unlock(fhp); posix_acl_release(pacl); posix_acl_release(dpacl); out_nfserr: if (host_error == -EOPNOTSUPP) return nfserr_attrnotsupp; else return nfserrno(host_error); } static short ace2type(struct nfs4_ace *ace) { switch (ace->whotype) { case NFS4_ACL_WHO_NAMED: return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ? ACL_GROUP : ACL_USER); case NFS4_ACL_WHO_OWNER: return ACL_USER_OBJ; case NFS4_ACL_WHO_GROUP: return ACL_GROUP_OBJ; case NFS4_ACL_WHO_EVERYONE: return ACL_OTHER; } BUG(); return -1; } /* * return the size of the struct nfs4_acl required to represent an acl * with @entries entries. */ int nfs4_acl_bytes(int entries) { return sizeof(struct nfs4_acl) + entries * sizeof(struct nfs4_ace); } static struct { char *string; int stringlen; int type; } s2t_map[] = { { .string = "OWNER@", .stringlen = sizeof("OWNER@") - 1, .type = NFS4_ACL_WHO_OWNER, }, { .string = "GROUP@", .stringlen = sizeof("GROUP@") - 1, .type = NFS4_ACL_WHO_GROUP, }, { .string = "EVERYONE@", .stringlen = sizeof("EVERYONE@") - 1, .type = NFS4_ACL_WHO_EVERYONE, }, }; int nfs4_acl_get_whotype(char *p, u32 len) { int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].stringlen == len && 0 == memcmp(s2t_map[i].string, p, len)) return s2t_map[i].type; } return NFS4_ACL_WHO_NAMED; } __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who) { __be32 *p; int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].type != who) continue; p = xdr_reserve_space(xdr, s2t_map[i].stringlen + 4); if (!p) return nfserr_resource; p = xdr_encode_opaque(p, s2t_map[i].string, s2t_map[i].stringlen); return 0; } WARN_ON_ONCE(1); return nfserr_serverfault; }
./CrossVul/dataset_final_sorted/CWE-284/c/good_4896_2
crossvul-cpp_data_bad_880_1
/* * Copyright (C) 2014-2019 Firejail Authors * * This file is part of firejail project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "firejail.h" #include "../include/ldd_utils.h" #include <sys/mount.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <dirent.h> #include <glob.h> #define MAXBUF 4096 extern void fslib_install_stdc(void); extern void fslib_install_system(void); static int lib_cnt = 0; static int dir_cnt = 0; static void report_duplication(const char *full_path) { char *fname = strrchr(full_path, '/'); if (fname && *(++fname) != '\0') { // report the file on all bin paths int i = 0; while (default_lib_paths[i]) { char *p; if (asprintf(&p, "%s/%s", default_lib_paths[i], fname) == -1) errExit("asprintf"); fs_logger2("clone", p); free(p); i++; } } } static char *build_dest_dir(const char *full_path) { assert(full_path); if (strstr(full_path, "/x86_64-linux-gnu/")) return RUN_LIB_DIR "/x86_64-linux-gnu"; return RUN_LIB_DIR; } // copy fname in private_run_dir void fslib_duplicate(const char *full_path) { assert(full_path); struct stat s; if (stat(full_path, &s) != 0 || s.st_uid != 0 || access(full_path, R_OK)) return; char *dest_dir = build_dest_dir(full_path); // don't copy it if the file is already there char *ptr = strrchr(full_path, '/'); if (!ptr) return; ptr++; if (*ptr == '\0') return; char *name; if (asprintf(&name, "%s/%s", dest_dir, ptr) == -1) errExit("asprintf"); if (stat(name, &s) == 0) { free(name); return; } free(name); if (arg_debug || arg_debug_private_lib) printf(" copying %s to private %s\n", full_path, dest_dir); sbox_run(SBOX_ROOT| SBOX_SECCOMP, 4, PATH_FCOPY, "--follow-link", full_path, dest_dir); report_duplication(full_path); lib_cnt++; } // requires full path for lib // it could be a library or an executable // lib is not copied, only libraries used by it void fslib_copy_libs(const char *full_path) { assert(full_path); if (arg_debug || arg_debug_private_lib) printf(" fslib_copy_libs %s\n", full_path); // if library/executable does not exist or the user does not have read access to it // print a warning and exit the function. if (access(full_path, R_OK)) { if (arg_debug || arg_debug_private_lib) printf("cannot find %s for private-lib, skipping...\n", full_path); return; } // create an empty RUN_LIB_FILE and allow the user to write to it unlink(RUN_LIB_FILE); // in case is there create_empty_file_as_root(RUN_LIB_FILE, 0644); if (chown(RUN_LIB_FILE, getuid(), getgid())) errExit("chown"); // run fldd to extract the list of files if (arg_debug || arg_debug_private_lib) printf(" running fldd %s\n", full_path); sbox_run(SBOX_USER | SBOX_SECCOMP | SBOX_CAPS_NONE, 3, PATH_FLDD, full_path, RUN_LIB_FILE); // open the list of libraries and install them on by one FILE *fp = fopen(RUN_LIB_FILE, "r"); if (!fp) errExit("fopen"); char buf[MAXBUF]; while (fgets(buf, MAXBUF, fp)) { // remove \n char *ptr = strchr(buf, '\n'); if (ptr) *ptr = '\0'; fslib_duplicate(buf); } fclose(fp); } void fslib_copy_dir(const char *full_path) { assert(full_path); if (arg_debug || arg_debug_private_lib) printf(" fslib_copy_dir %s\n", full_path); // do nothing if the directory does not exist or is not owned by root struct stat s; if (stat(full_path, &s) != 0 || s.st_uid != 0 || !S_ISDIR(s.st_mode) || access(full_path, R_OK)) return; char *dir_name = strrchr(full_path, '/'); assert(dir_name); dir_name++; assert(*dir_name != '\0'); // do nothing if the directory is already there char *dest; if (asprintf(&dest, "%s/%s", build_dest_dir(full_path), dir_name) == -1) errExit("asprintf"); if (stat(dest, &s) == 0) { free(dest); return; } // create new directory and mount the original on top of it mkdir_attr(dest, 0755, 0, 0); if (mount(full_path, dest, NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, dest, NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("clone", full_path); fs_logger2("mount", full_path); dir_cnt++; free(dest); } // fname should be a vallid full path at this point static void load_library(const char *fname) { assert(fname); assert(*fname == '/'); // existing file owned by root, read access struct stat s; if (stat(fname, &s) == 0 && s.st_uid == 0 && !access(fname, R_OK)) { // load directories, regular 64 bit libraries, and 64 bit executables if (is_dir(fname) || is_lib_64(fname)) { if (is_dir(fname)) fslib_copy_dir(fname); else { if (strstr(fname, ".so") || access(fname, X_OK) != 0) // don't duplicate executables, just install the libraries fslib_duplicate(fname); fslib_copy_libs(fname); } } } } static void install_list_entry(const char *lib) { assert(lib); // filename check int len = strlen(lib); if (strcspn(lib, "\\&!?\"'<>%^(){}[];,") != (size_t)len || strstr(lib, "..")) { fprintf(stderr, "Error: \"%s\" is an invalid library\n", lib); exit(1); } // if this is a full path, use it as is if (*lib == '/') return load_library(lib); // find the library int i; for (i = 0; default_lib_paths[i]; i++) { char *fname = NULL; if (asprintf(&fname, "%s/%s", default_lib_paths[i], lib) == -1) errExit("asprintf"); #define DO_GLOBBING #ifdef DO_GLOBBING // globbing glob_t globbuf; int globerr = glob(fname, GLOB_NOCHECK | GLOB_NOSORT | GLOB_PERIOD, NULL, &globbuf); if (globerr) { fprintf(stderr, "Error: failed to glob private-lib pattern %s\n", fname); exit(1); } size_t j; for (j = 0; j < globbuf.gl_pathc; j++) { assert(globbuf.gl_pathv[j]); //printf("glob %s\n", globbuf.gl_pathv[j]); // GLOB_NOCHECK - no pattern matched returns the original pattern; try to load it anyway load_library(globbuf.gl_pathv[j]); } globfree(&globbuf); #else load_library(fname); #endif free(fname); } // fwarning("%s library not found, skipping...\n", lib); return; } void fslib_install_list(const char *lib_list) { assert(lib_list); if (arg_debug || arg_debug_private_lib) printf(" fslib_install_list %s\n", lib_list); char *dlist = strdup(lib_list); if (!dlist) errExit("strdup"); char *ptr = strtok(dlist, ","); if (!ptr) { fprintf(stderr, "Error: invalid private-lib argument\n"); exit(1); } install_list_entry(ptr); while ((ptr = strtok(NULL, ",")) != NULL) install_list_entry(ptr); free(dlist); fs_logger_print(); } static void mount_directories(void) { if (arg_debug || arg_debug_private_lib) printf("Mount-bind %s on top of /lib /lib64 /usr/lib\n", RUN_LIB_DIR); if (is_dir("/lib")) { if (mount(RUN_LIB_DIR, "/lib", NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, "/lib", NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("tmpfs", "/lib"); fs_logger("mount /lib"); } if (is_dir("/lib64")) { if (mount(RUN_LIB_DIR, "/lib64", NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, "/lib64", NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("tmpfs", "/lib64"); fs_logger("mount /lib64"); } if (is_dir("/usr/lib")) { if (mount(RUN_LIB_DIR, "/usr/lib", NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, "/usr/lib", NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("tmpfs", "/usr/lib"); fs_logger("mount /usr/lib"); } // for amd64 only - we'll deal with i386 later if (is_dir("/lib32")) { if (mount(RUN_RO_DIR, "/lib32", "none", MS_BIND, "mode=400,gid=0") < 0) errExit("disable file"); fs_logger("blacklist-nolog /lib32"); } if (is_dir("/libx32")) { if (mount(RUN_RO_DIR, "/libx32", "none", MS_BIND, "mode=400,gid=0") < 0) errExit("disable file"); fs_logger("blacklist-nolog /libx32"); } } void fs_private_lib(void) { #ifndef __x86_64__ fwarning("private-lib feature is currently available only on amd64 platforms\n"); return; #endif char *private_list = cfg.lib_private_keep; if (arg_debug || arg_debug_private_lib) printf("Starting private-lib processing: program %s, shell %s\n", (cfg.original_program_index > 0)? cfg.original_argv[cfg.original_program_index]: "none", (arg_shell_none)? "none": cfg.shell); // create /run/firejail/mnt/lib directory mkdir_attr(RUN_LIB_DIR, 0755, 0, 0); // install standard C libraries if (arg_debug || arg_debug_private_lib) printf("Installing standard C library\n"); fslib_install_stdc(); // start timetrace timetrace_start(); // copy the libs in the new lib directory for the main exe if (cfg.original_program_index > 0) { if (arg_debug || arg_debug_private_lib) printf("Installing sandboxed program libraries\n"); fslib_install_list(cfg.original_argv[cfg.original_program_index]); } // for the shell if (!arg_shell_none) { if (arg_debug || arg_debug_private_lib) printf("Installing shell libraries\n"); fslib_install_list(cfg.shell); // a shell is useless without some basic commands fslib_install_list("/bin/ls,/bin/cat,/bin/mv,/bin/rm"); } // for the listed libs and directories if (private_list && *private_list != '\0') { if (arg_debug || arg_debug_private_lib) printf("Processing private-lib files\n"); fslib_install_list(private_list); } // for private-bin files if (arg_private_bin && cfg.bin_private_lib && *cfg.bin_private_lib != '\0') { if (arg_debug || arg_debug_private_lib) printf("Processing private-bin files\n"); fslib_install_list(cfg.bin_private_lib); } fmessage("Program libraries installed in %0.2f ms\n", timetrace_end()); // install the reset of the system libraries if (arg_debug || arg_debug_private_lib) printf("Installing system libraries\n"); fslib_install_system(); // bring in firejail directory for --trace and seccomp post exec // bring in firejail executable libraries in case we are redirected here by a firejail symlink from /usr/local/bin/firejail fslib_install_list("/usr/bin/firejail,firejail"); // todo: use the installed path for the executable fmessage("Installed %d %s and %d %s\n", lib_cnt, (lib_cnt == 1)? "library": "libraries", dir_cnt, (dir_cnt == 1)? "directory": "directories"); // mount lib filesystem mount_directories(); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_880_1
crossvul-cpp_data_good_5016_0
/* * libndp.c - Neighbour discovery library * Copyright (C) 2013-2015 Jiri Pirko <jiri@resnulli.us> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/select.h> #include <netinet/in.h> #include <netinet/icmp6.h> #include <arpa/inet.h> #include <net/ethernet.h> #include <assert.h> #include <ndp.h> #include "ndp_private.h" #include "list.h" /** * SECTION: logging * @short_description: libndp logging facility */ void ndp_log(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, ...) { va_list args; va_start(args, format); ndp->log_fn(ndp, priority, file, line, fn, format, args); va_end(args); } static void log_stderr(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args) { fprintf(stderr, "libndp: %s: ", fn); vfprintf(stderr, format, args); fprintf(stderr, "\n"); } static int log_priority(const char *priority) { char *endptr; int prio; prio = strtol(priority, &endptr, 10); if (endptr[0] == '\0' || isspace(endptr[0])) return prio; if (strncmp(priority, "err", 3) == 0) return LOG_ERR; if (strncmp(priority, "info", 4) == 0) return LOG_INFO; if (strncmp(priority, "debug", 5) == 0) return LOG_DEBUG; return 0; } /** * ndp_set_log_fn: * @ndp: libndp library context * @log_fn: function to be called for logging messages * * The built-in logging writes to stderr. It can be * overridden by a custom function, to plug log messages * into the user's logging functionality. **/ NDP_EXPORT void ndp_set_log_fn(struct ndp *ndp, void (*log_fn)(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args)) { ndp->log_fn = log_fn; dbg(ndp, "Custom logging function %p registered.", log_fn); } /** * ndp_get_log_priority: * @ndp: libndp library context * * Returns: the current logging priority. **/ NDP_EXPORT int ndp_get_log_priority(struct ndp *ndp) { return ndp->log_priority; } /** * ndp_set_log_priority: * @ndp: libndp library context * @priority: the new logging priority * * Set the current logging priority. The value controls which messages * are logged. **/ NDP_EXPORT void ndp_set_log_priority(struct ndp *ndp, int priority) { ndp->log_priority = priority; } /** * SECTION: helpers * @short_description: various internal helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUG_ON(expr) { if (expr) assert(0); } static void *myzalloc(size_t size) { return calloc(1, size); } static int myrecvfrom6(int sockfd, void *buf, size_t *buflen, int flags, struct in6_addr *addr, uint32_t *ifindex, int *hoplimit) { struct sockaddr_in6 sin6; unsigned char cbuf[2 * CMSG_SPACE(sizeof(struct in6_pktinfo))]; struct iovec iovec; struct msghdr msghdr; struct cmsghdr *cmsghdr; ssize_t len; iovec.iov_len = *buflen; iovec.iov_base = buf; memset(&msghdr, 0, sizeof(msghdr)); msghdr.msg_name = &sin6; msghdr.msg_namelen = sizeof(sin6); msghdr.msg_iov = &iovec; msghdr.msg_iovlen = 1; msghdr.msg_control = cbuf; msghdr.msg_controllen = sizeof(cbuf); len = recvmsg(sockfd, &msghdr, flags); if (len == -1) return -errno; *buflen = len; /* Set ifindex to scope_id now. But since scope_id gets not * set by kernel for linklocal addresses, use pktinfo to obtain that * value right after. */ *ifindex = sin6.sin6_scope_id; for (cmsghdr = CMSG_FIRSTHDR(&msghdr); cmsghdr; cmsghdr = CMSG_NXTHDR(&msghdr, cmsghdr)) { if (cmsghdr->cmsg_level != IPPROTO_IPV6) continue; switch(cmsghdr->cmsg_type) { case IPV6_PKTINFO: if (cmsghdr->cmsg_len == CMSG_LEN(sizeof(struct in6_pktinfo))) { struct in6_pktinfo *pktinfo; pktinfo = (struct in6_pktinfo *) CMSG_DATA(cmsghdr); *ifindex = pktinfo->ipi6_ifindex; } break; case IPV6_HOPLIMIT: if (cmsghdr->cmsg_len == CMSG_LEN(sizeof(int))) { int *val; val = (int *) CMSG_DATA(cmsghdr); *hoplimit = *val; } break; } } *addr = sin6.sin6_addr; return 0; } static int mysendto6(int sockfd, void *buf, size_t buflen, int flags, struct in6_addr *addr, uint32_t ifindex) { struct sockaddr_in6 sin6; ssize_t ret; memset(&sin6, 0, sizeof(sin6)); memcpy(&sin6.sin6_addr, addr, sizeof(sin6.sin6_addr)); sin6.sin6_scope_id = ifindex; resend: ret = sendto(sockfd, buf, buflen, flags, &sin6, sizeof(sin6)); if (ret == -1) { switch(errno) { case EINTR: goto resend; default: return -errno; } } return 0; } static const char *str_in6_addr(struct in6_addr *addr) { static char buf[INET6_ADDRSTRLEN]; return inet_ntop(AF_INET6, addr, buf, sizeof(buf)); } /** * SECTION: NDP implementation * @short_description: functions that actually implements NDP */ static int ndp_sock_open(struct ndp *ndp) { int sock; //struct icmp6_filter flt; int ret; int err; int val; sock = socket(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6); if (sock == -1) { err(ndp, "Failed to create ICMP6 socket."); return -errno; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVPKTINFO."); err = -errno; goto close_sock; } val = 255; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_MULTICAST_HOPS."); err = -errno; goto close_sock; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVHOPLIMIT, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVHOPLIMIT,."); err = -errno; goto close_sock; } ndp->sock = sock; return 0; close_sock: close(sock); return err; } static void ndp_sock_close(struct ndp *ndp) { close(ndp->sock); } struct ndp_msggeneric { void *dataptr; /* must be first */ }; struct ndp_msgrs { struct nd_router_solicit *rs; /* must be first */ }; struct ndp_msgra { struct nd_router_advert *ra; /* must be first */ }; struct ndp_msgns { struct nd_neighbor_solicit *ns; /* must be first */ }; struct ndp_msgna { struct nd_neighbor_advert *na; /* must be first */ }; struct ndp_msgr { struct nd_redirect *r; /* must be first */ }; struct ndp_msg { #define NDP_MSG_BUFLEN 1500 unsigned char buf[NDP_MSG_BUFLEN]; size_t len; struct in6_addr addrto; uint32_t ifindex; int hoplimit; struct icmp6_hdr * icmp6_hdr; unsigned char * opts_start; /* pointer to buf at the place where opts start */ union { struct ndp_msggeneric generic; struct ndp_msgrs rs; struct ndp_msgra ra; struct ndp_msgns ns; struct ndp_msgna na; struct ndp_msgr r; } nd_msg; }; struct ndp_msg_type_info { #define NDP_STRABBR_SIZE 4 char strabbr[NDP_STRABBR_SIZE]; uint8_t raw_type; size_t raw_struct_size; void (*addrto_adjust)(struct in6_addr *addr); bool (*addrto_validate)(struct in6_addr *addr); }; static void ndp_msg_addrto_adjust_all_nodes(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x1); } static void ndp_msg_addrto_adjust_all_routers(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x2); } static bool ndp_msg_addrto_validate_link_local(struct in6_addr *addr) { return IN6_IS_ADDR_LINKLOCAL (addr); } static struct ndp_msg_type_info ndp_msg_type_info_list[] = { [NDP_MSG_RS] = { .strabbr = "RS", .raw_type = ND_ROUTER_SOLICIT, .raw_struct_size = sizeof(struct nd_router_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_routers, }, [NDP_MSG_RA] = { .strabbr = "RA", .raw_type = ND_ROUTER_ADVERT, .raw_struct_size = sizeof(struct nd_router_advert), .addrto_validate = ndp_msg_addrto_validate_link_local, }, [NDP_MSG_NS] = { .strabbr = "NS", .raw_type = ND_NEIGHBOR_SOLICIT, .raw_struct_size = sizeof(struct nd_neighbor_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_nodes, }, [NDP_MSG_NA] = { .strabbr = "NA", .raw_type = ND_NEIGHBOR_ADVERT, .raw_struct_size = sizeof(struct nd_neighbor_advert), }, [NDP_MSG_R] = { .strabbr = "R", .raw_type = ND_REDIRECT, .raw_struct_size = sizeof(struct nd_redirect), .addrto_validate = ndp_msg_addrto_validate_link_local, }, }; #define NDP_MSG_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_type_info_list) struct ndp_msg_type_info *ndp_msg_type_info(enum ndp_msg_type msg_type) { return &ndp_msg_type_info_list[msg_type]; } static int ndp_msg_type_by_raw_type(enum ndp_msg_type *p_msg_type, uint8_t raw_type) { int i; for (i = 0; i < NDP_MSG_TYPE_LIST_SIZE; i++) { if (ndp_msg_type_info(i)->raw_type == raw_type) { *p_msg_type = i; return 0; } } return -ENOENT; } static bool ndp_msg_check_valid(struct ndp_msg *msg) { size_t len = ndp_msg_payload_len(msg); enum ndp_msg_type msg_type = ndp_msg_type(msg); if (len < ndp_msg_type_info(msg_type)->raw_struct_size) return false; if (ndp_msg_type_info(msg_type)->addrto_validate) return ndp_msg_type_info(msg_type)->addrto_validate(&msg->addrto); else return true; } static struct ndp_msg *ndp_msg_alloc(void) { struct ndp_msg *msg; msg = myzalloc(sizeof(*msg)); if (!msg) return NULL; msg->icmp6_hdr = (struct icmp6_hdr *) msg->buf; return msg; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type); static void ndp_msg_init(struct ndp_msg *msg, enum ndp_msg_type msg_type) { size_t raw_struct_size = ndp_msg_type_info(msg_type)->raw_struct_size; ndp_msg_type_set(msg, msg_type); msg->len = raw_struct_size; msg->opts_start = msg->buf + raw_struct_size; /* Set-up "first pointers" in all ndp_msgrs, ndp_msgra, ndp_msgns, * ndp_msgna, ndp_msgr structures. */ msg->nd_msg.generic.dataptr = ndp_msg_payload(msg); } /** * ndp_msg_new: * @p_msg: pointer where new message structure address will be stored * @msg_type: message type * * Allocate new message structure of a specified type and initialize it. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_new(struct ndp_msg **p_msg, enum ndp_msg_type msg_type) { struct ndp_msg *msg; if (msg_type == NDP_MSG_ALL) return -EINVAL; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; ndp_msg_init(msg, msg_type); *p_msg = msg; return 0; } /** * ndp_msg_destroy: * * Destroy message structure. **/ NDP_EXPORT void ndp_msg_destroy(struct ndp_msg *msg) { free(msg); } /** * ndp_msg_payload: * @msg: message structure * * Get raw Neighbour discovery packet data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload(struct ndp_msg *msg) { return msg->buf; } /** * ndp_msg_payload_maxlen: * @msg: message structure * * Get raw Neighbour discovery packet data maximum length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_maxlen(struct ndp_msg *msg) { return sizeof(msg->buf); } /** * ndp_msg_payload_len: * @msg: message structure * * Get raw Neighbour discovery packet data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_len(struct ndp_msg *msg) { return msg->len; } /** * ndp_msg_payload_len_set: * @msg: message structure * * Set raw Neighbour discovery packet data length. **/ NDP_EXPORT void ndp_msg_payload_len_set(struct ndp_msg *msg, size_t len) { if (len > sizeof(msg->buf)) len = sizeof(msg->buf); msg->len = len; } /** * ndp_msg_payload_opts: * @msg: message structure * * Get raw Neighbour discovery packet options part data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload_opts(struct ndp_msg *msg) { return msg->opts_start; } static void *ndp_msg_payload_opts_offset(struct ndp_msg *msg, int offset) { unsigned char *ptr = ndp_msg_payload_opts(msg); return ptr + offset; } /** * ndp_msg_payload_opts_len: * @msg: message structure * * Get raw Neighbour discovery packet options part data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_opts_len(struct ndp_msg *msg) { return msg->len - (msg->opts_start - msg->buf); } /** * ndp_msgrs: * @msg: message structure * * Get RS message structure by passed @msg. * * Returns: RS message structure or NULL in case the message is not of type RS. **/ NDP_EXPORT struct ndp_msgrs *ndp_msgrs(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RS) return NULL; return &msg->nd_msg.rs; } /** * ndp_msgra: * @msg: message structure * * Get RA message structure by passed @msg. * * Returns: RA message structure or NULL in case the message is not of type RA. **/ NDP_EXPORT struct ndp_msgra *ndp_msgra(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RA) return NULL; return &msg->nd_msg.ra; } /** * ndp_msgns: * @msg: message structure * * Get NS message structure by passed @msg. * * Returns: NS message structure or NULL in case the message is not of type NS. **/ NDP_EXPORT struct ndp_msgns *ndp_msgns(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NS) return NULL; return &msg->nd_msg.ns; } /** * ndp_msgna: * @msg: message structure * * Get NA message structure by passed @msg. * * Returns: NA message structure or NULL in case the message is not of type NA. **/ NDP_EXPORT struct ndp_msgna *ndp_msgna(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NA) return NULL; return &msg->nd_msg.na; } /** * ndp_msgr: * @msg: message structure * * Get R message structure by passed @msg. * * Returns: R message structure or NULL in case the message is not of type R. **/ NDP_EXPORT struct ndp_msgr *ndp_msgr(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_R) return NULL; return &msg->nd_msg.r; } /** * ndp_msg_type: * @msg: message structure * * Get type of message. * * Returns: Message type **/ NDP_EXPORT enum ndp_msg_type ndp_msg_type(struct ndp_msg *msg) { enum ndp_msg_type msg_type; int err; err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); /* Type should be always set correctly (ensured by ndp_msg_init) */ BUG_ON(err); return msg_type; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type) { msg->icmp6_hdr->icmp6_type = ndp_msg_type_info(msg_type)->raw_type; } /** * ndp_msg_addrto: * @msg: message structure * * Get "to address" of message. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_addrto(struct ndp_msg *msg) { return &msg->addrto; } /** * ndp_msg_ifindex: * @msg: message structure * * Get interface index of message. * * Returns: Interface index **/ NDP_EXPORT uint32_t ndp_msg_ifindex(struct ndp_msg *msg) { return msg->ifindex; } /** * ndp_msg_ifindex_set: * @msg: message structure * * Set raw interface index of message. **/ NDP_EXPORT void ndp_msg_ifindex_set(struct ndp_msg *msg, uint32_t ifindex) { msg->ifindex = ifindex; } /** * ndp_msg_send: * @ndp: libndp library context * @msg: message structure * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send(struct ndp *ndp, struct ndp_msg *msg) { return ndp_msg_send_with_flags(ndp, msg, ND_OPT_NORMAL); } /** * ndp_msg_send_with_flags: * @ndp: libndp library context * @msg: message structure * @flags: option flags within message type * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send_with_flags(struct ndp *ndp, struct ndp_msg *msg, uint8_t flags) { enum ndp_msg_type msg_type = ndp_msg_type(msg); if (ndp_msg_type_info(msg_type)->addrto_adjust) ndp_msg_type_info(msg_type)->addrto_adjust(&msg->addrto); switch (msg_type) { case NDP_MSG_NA: if (flags & ND_OPT_NA_UNSOL) { ndp_msgna_flag_override_set((struct ndp_msgna*)&msg->nd_msg, true); ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, false); ndp_msg_addrto_adjust_all_nodes(&msg->addrto); } else { ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, true); } break; default: break; } return mysendto6(ndp->sock, msg->buf, msg->len, 0, &msg->addrto, msg->ifindex); } /** * SECTION: msgra getters/setters * @short_description: Getters and setters for RA message */ /** * ndp_msgra_curhoplimit: * @msgra: RA message structure * * Get RA curhoplimit. * * Returns: curhoplimit. **/ NDP_EXPORT uint8_t ndp_msgra_curhoplimit(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_curhoplimit; } /** * ndp_msgra_curhoplimit_set: * @msgra: RA message structure * * Set RA curhoplimit. **/ NDP_EXPORT void ndp_msgra_curhoplimit_set(struct ndp_msgra *msgra, uint8_t curhoplimit) { msgra->ra->nd_ra_curhoplimit = curhoplimit; } /** * ndp_msgra_flag_managed: * @msgra: RA message structure * * Get RA managed flag. * * Returns: managed flag. **/ NDP_EXPORT bool ndp_msgra_flag_managed(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_managed_set: * @msgra: RA message structure * * Set RA managed flag. **/ NDP_EXPORT void ndp_msgra_flag_managed_set(struct ndp_msgra *msgra, bool flag_managed) { if (flag_managed) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_other: * @msgra: RA message structure * * Get RA other flag. * * Returns: other flag. **/ NDP_EXPORT bool ndp_msgra_flag_other(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_other_set: * @msgra: RA message structure * * Set RA other flag. **/ NDP_EXPORT void ndp_msgra_flag_other_set(struct ndp_msgra *msgra, bool flag_other) { if (flag_other) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_home_agent: * @msgra: RA message structure * * Get RA home_agent flag. * * Returns: home_agent flag. **/ NDP_EXPORT bool ndp_msgra_flag_home_agent(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_flag_home_agent_set: * @msgra: RA message structure * * Set RA home_agent flag. **/ NDP_EXPORT void ndp_msgra_flag_home_agent_set(struct ndp_msgra *msgra, bool flag_home_agent) { if (flag_home_agent) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_route_preference: * @msgra: RA message structure * * Get route preference. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msgra_route_preference(struct ndp_msgra *msgra) { uint8_t prf = (msgra->ra->nd_ra_flags_reserved >> 3) & 3; /* rfc4191 says: * If the Router Lifetime is zero, the preference value MUST be set to * (00) by the sender and MUST be ignored by the receiver. * If the Reserved (10) value is received, the receiver MUST treat the * value as if it were (00). */ if (prf == 2 || !ndp_msgra_router_lifetime(msgra)) prf = 0; return prf; } /** * ndp_msgra_route_preference_set: * @msgra: RA message structure * @pref: preference * * Set route preference. **/ NDP_EXPORT void ndp_msgra_route_preference_set(struct ndp_msgra *msgra, enum ndp_route_preference pref) { msgra->ra->nd_ra_flags_reserved &= ~(3 << 3); msgra->ra->nd_ra_flags_reserved |= (pref << 3); } /** * ndp_msgra_router_lifetime: * @msgra: RA message structure * * Get RA router lifetime. * * Returns: router lifetime in seconds. **/ NDP_EXPORT uint16_t ndp_msgra_router_lifetime(struct ndp_msgra *msgra) { return ntohs(msgra->ra->nd_ra_router_lifetime); } /** * ndp_msgra_router_lifetime_set: * @msgra: RA message structure * * Set RA router lifetime. **/ NDP_EXPORT void ndp_msgra_router_lifetime_set(struct ndp_msgra *msgra, uint16_t router_lifetime) { msgra->ra->nd_ra_router_lifetime = htons(router_lifetime); } /** * ndp_msgra_reachable_time: * @msgra: RA message structure * * Get RA reachable time. * * Returns: reachable time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_reachable_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_reachable); } /** * ndp_msgra_reachable_time_set: * @msgra: RA message structure * * Set RA reachable time. **/ NDP_EXPORT void ndp_msgra_reachable_time_set(struct ndp_msgra *msgra, uint32_t reachable_time) { msgra->ra->nd_ra_reachable = htonl(reachable_time); } /** * ndp_msgra_retransmit_time: * @msgra: RA message structure * * Get RA retransmit time. * * Returns: retransmit time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_retransmit_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_retransmit); } /** * ndp_msgra_retransmit_time_set: * @msgra: RA message structure * * Set RA retransmit time. **/ NDP_EXPORT void ndp_msgra_retransmit_time_set(struct ndp_msgra *msgra, uint32_t retransmit_time) { msgra->ra->nd_ra_retransmit = htonl(retransmit_time); } /** * SECTION: msgna getters/setters * @short_description: Getters and setters for NA message */ /** * ndp_msgna_flag_router: * @msgna: NA message structure * * Get NA router flag. * * Returns: router flag. **/ NDP_EXPORT bool ndp_msgna_flag_router(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_router_set: * @msgna: NA message structure * * Set NA router flag. **/ NDP_EXPORT void ndp_msgna_flag_router_set(struct ndp_msgna *msgna, bool flag_router) { if (flag_router) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_ROUTER; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_solicited: * @msgna: NA message structure * * Get NA solicited flag. * * Returns: solicited flag. **/ NDP_EXPORT bool ndp_msgna_flag_solicited(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_solicited_set: * @msgna: NA message structure * * Set NA managed flag. **/ NDP_EXPORT void ndp_msgna_flag_solicited_set(struct ndp_msgna *msgna, bool flag_solicited) { if (flag_solicited) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_SOLICITED; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_override: * @msgna: NA message structure * * Get NA override flag. * * Returns: override flag. **/ NDP_EXPORT bool ndp_msgna_flag_override(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_OVERRIDE; } /** * ndp_msgna_flag_override_set: * @msgra: NA message structure * * Set NA override flag. */ NDP_EXPORT void ndp_msgna_flag_override_set(struct ndp_msgna *msgna, bool flag_override) { if (flag_override) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_OVERRIDE; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_OVERRIDE; } /** * SECTION: msg_opt infrastructure * @short_description: Infrastructure for options */ struct ndp_msg_opt_type_info { uint8_t raw_type; size_t raw_struct_size; bool (*check_valid)(void *opt_data); }; static bool ndp_msg_opt_route_check_valid(void *opt_data) { struct __nd_opt_route_info *ri = opt_data; /* rfc4191 says: * If the Reserved (10) value is received, the Route Information Option * MUST be ignored. */ if (((ri->nd_opt_ri_prf_reserved >> 3) & 3) == 2) return false; return true; } static struct ndp_msg_opt_type_info ndp_msg_opt_type_info_list[] = { [NDP_MSG_OPT_SLLADDR] = { .raw_type = ND_OPT_SOURCE_LINKADDR, }, [NDP_MSG_OPT_TLLADDR] = { .raw_type = ND_OPT_TARGET_LINKADDR, }, [NDP_MSG_OPT_PREFIX] = { .raw_type = ND_OPT_PREFIX_INFORMATION, .raw_struct_size = sizeof(struct nd_opt_prefix_info), }, [NDP_MSG_OPT_REDIR] = { .raw_type = ND_OPT_REDIRECTED_HEADER, }, [NDP_MSG_OPT_MTU] = { .raw_type = ND_OPT_MTU, .raw_struct_size = sizeof(struct nd_opt_mtu), }, [NDP_MSG_OPT_ROUTE] = { .raw_type = __ND_OPT_ROUTE_INFO, .raw_struct_size = sizeof(struct __nd_opt_route_info), .check_valid = ndp_msg_opt_route_check_valid, }, [NDP_MSG_OPT_RDNSS] = { .raw_type = __ND_OPT_RDNSS, .raw_struct_size = sizeof(struct __nd_opt_rdnss), }, [NDP_MSG_OPT_DNSSL] = { .raw_type = __ND_OPT_DNSSL, .raw_struct_size = sizeof(struct __nd_opt_dnssl), }, }; #define NDP_MSG_OPT_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_opt_type_info_list) struct ndp_msg_opt_type_info *ndp_msg_opt_type_info(enum ndp_msg_opt_type msg_opt_type) { return &ndp_msg_opt_type_info_list[msg_opt_type]; } struct ndp_msg_opt_type_info *ndp_msg_opt_type_info_by_raw_type(uint8_t raw_type) { struct ndp_msg_opt_type_info *info; int i; for (i = 0; i < NDP_MSG_OPT_TYPE_LIST_SIZE; i++) { info = &ndp_msg_opt_type_info_list[i]; if (info->raw_type == raw_type) return info; } return NULL; } /** * ndp_msg_next_opt_offset: * @msg: message structure * @offset: option payload offset * @opt_type: option type * * Find next offset of option of given type. If offset is -1, start from * beginning, otherwise start from the given offset. * This funstion is internally used by ndp_msg_opt_for_each_offset() macro. * * Returns: offset in opt payload of found opt of -1 in case it was not found. **/ NDP_EXPORT int ndp_msg_next_opt_offset(struct ndp_msg *msg, int offset, enum ndp_msg_opt_type opt_type) { unsigned char *opts_start = ndp_msg_payload_opts(msg); unsigned char *ptr = opts_start; size_t len = ndp_msg_payload_opts_len(msg); uint8_t opt_raw_type = ndp_msg_opt_type_info(opt_type)->raw_type; bool ignore = true; if (offset == -1) { offset = 0; ignore = false; } ptr += offset; len -= offset; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len || len < cur_opt_len) break; if (cur_opt_raw_type == opt_raw_type && !ignore) return ptr - opts_start; ptr += cur_opt_len; len -= cur_opt_len; ignore = false; } return -1; } #define __INVALID_OPT_TYPE_MAGIC 0xff /* * Check for validity of options and mark by magic opt type in case it is not * so ndp_msg_next_opt_offset() will ignore it. */ static bool ndp_msg_check_opts(struct ndp_msg *msg) { unsigned char *ptr = ndp_msg_payload_opts(msg); size_t len = ndp_msg_payload_opts_len(msg); struct ndp_msg_opt_type_info *info; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len) return false; if (len < cur_opt_len) break; info = ndp_msg_opt_type_info_by_raw_type(cur_opt_raw_type); if (info) { if (cur_opt_len < info->raw_struct_size || (info->check_valid && !info->check_valid(ptr))) ptr[0] = __INVALID_OPT_TYPE_MAGIC; } ptr += cur_opt_len; len -= cur_opt_len; } return true; } /** * SECTION: msg_opt getters/setters * @short_description: Getters and setters for options */ /** * ndp_msg_opt_slladdr: * @msg: message structure * @offset: in-message offset * * Get source linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to source linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_slladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_slladdr_len: * @msg: message structure * @offset: in-message offset * * Get source linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: source linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_slladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_tlladdr: * @msg: message structure * @offset: in-message offset * * Get target linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to target linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_tlladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_tlladdr_len: * @msg: message structure * @offset: in-message offset * * Get target linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: target linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_tlladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_prefix: * @msg: message structure * @offset: in-message offset * * Get prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_prefix(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return &pi->nd_opt_pi_prefix; } /** * ndp_msg_opt_prefix_len: * @msg: message structure * @offset: in-message offset * * Get prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_prefix_len(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_prefix_len; } /** * ndp_msg_opt_prefix_valid_time: * @msg: message structure * @offset: in-message offset * * Get prefix valid time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: valid time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_valid_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_valid_time); } /** * ndp_msg_opt_prefix_preferred_time: * @msg: message structure * @offset: in-message offset * * Get prefix preferred time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: preferred time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_preferred_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_preferred_time); } /** * ndp_msg_opt_prefix_flag_on_link: * @msg: message structure * @offset: in-message offset * * Get on-link flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: on-link flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_on_link(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_ONLINK; } /** * ndp_msg_opt_prefix_flag_auto_addr_conf: * @msg: message structure * @offset: in-message offset * * Get autonomous address-configuration flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: autonomous address-configuration flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_auto_addr_conf(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_AUTO; } /** * ndp_msg_opt_prefix_flag_router_addr: * @msg: message structure * @offset: in-message offset * * Get router address flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: router address flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_router_addr(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_RADDR; } /** * ndp_msg_opt_mtu: * @msg: message structure * @offset: in-message offset * * Get MTU. User should check if mtu option is present before calling this. * * Returns: MTU. **/ NDP_EXPORT uint32_t ndp_msg_opt_mtu(struct ndp_msg *msg, int offset) { struct nd_opt_mtu *mtu = ndp_msg_payload_opts_offset(msg, offset); return ntohl(mtu->nd_opt_mtu_mtu); } /** * ndp_msg_opt_route_prefix: * @msg: message structure * @offset: in-message offset * * Get route prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_route_prefix(struct ndp_msg *msg, int offset) { static struct in6_addr prefix; struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); memset(&prefix, 0, sizeof(prefix)); memcpy(&prefix, &ri->nd_opt_ri_prefix, (ri->nd_opt_ri_len - 1) << 3); return &prefix; } /** * ndp_msg_opt_route_prefix_len: * @msg: message structure * @offset: in-message offset * * Get route prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of route prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_route_prefix_len(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ri->nd_opt_ri_prefix_len; } /** * ndp_msg_opt_route_lifetime: * @msg: message structure * @offset: in-message offset * * Get route lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_route_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ntohl(ri->nd_opt_ri_lifetime); } /** * ndp_msg_opt_route_preference: * @msg: message structure * @offset: in-message offset * * Get route preference. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msg_opt_route_preference(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return (ri->nd_opt_ri_prf_reserved >> 3) & 3; } /** * ndp_msg_opt_rdnss_lifetime: * @msg: message structure * @offset: in-message offset * * Get Recursive DNS Server lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_rdnss_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); return ntohl(rdnss->nd_opt_rdnss_lifetime); } /** * ndp_msg_opt_rdnss_addr: * @msg: message structure * @offset: in-message offset * @addr_index: address index * * Get Recursive DNS Server address. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_rdnss_addr(struct ndp_msg *msg, int offset, int addr_index) { static struct in6_addr addr; struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); size_t len = rdnss->nd_opt_rdnss_len << 3; /* convert to bytes */ len -= in_struct_offset(struct __nd_opt_rdnss, nd_opt_rdnss_addresses); if ((addr_index + 1) * sizeof(addr) > len) return NULL; memcpy(&addr, &rdnss->nd_opt_rdnss_addresses[addr_index * sizeof(addr)], sizeof(addr)); return &addr; } /** * ndp_msg_opt_dnssl_lifetime: * @msg: message structure * @offset: in-message offset * * Get DNS Search List lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_dnssl_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); return ntohl(dnssl->nd_opt_dnssl_lifetime); } /** * ndp_msg_opt_dnssl_domain: * @msg: message structure * @offset: in-message offset * @domain_index: domain index * * Get DNS Search List domain. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT char *ndp_msg_opt_dnssl_domain(struct ndp_msg *msg, int offset, int domain_index) { int i; static char buf[256]; struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); size_t len = dnssl->nd_opt_dnssl_len << 3; /* convert to bytes */ char *ptr; len -= in_struct_offset(struct __nd_opt_dnssl, nd_opt_dnssl_domains); ptr = dnssl->nd_opt_dnssl_domains; i = 0; while (len > 0) { size_t buf_len = 0; while (len > 0) { uint8_t dom_len = *ptr; ptr++; len--; if (!dom_len) break; if (dom_len > len) return NULL; if (buf_len + dom_len + 1 > sizeof(buf)) return NULL; memcpy(buf + buf_len, ptr, dom_len); buf[buf_len + dom_len] = '.'; ptr += dom_len; len -= dom_len; buf_len += dom_len + 1; } if (!buf_len) break; buf[buf_len - 1] = '\0'; /* overwrite final '.' */ if (i++ == domain_index) return buf; } return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg); static int ndp_sock_recv(struct ndp *ndp) { struct ndp_msg *msg; enum ndp_msg_type msg_type; size_t len; int err; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; len = ndp_msg_payload_maxlen(msg); err = myrecvfrom6(ndp->sock, msg->buf, &len, 0, &msg->addrto, &msg->ifindex, &msg->hoplimit); if (err) { err(ndp, "Failed to receive message"); goto free_msg; } dbg(ndp, "rcvd from: %s, ifindex: %u, hoplimit: %d", str_in6_addr(&msg->addrto), msg->ifindex, msg->hoplimit); if (msg->hoplimit != 255) { warn(ndp, "ignoring packet with bad hop limit (%d)", msg->hoplimit); err = 0; goto free_msg; } if (len < sizeof(*msg->icmp6_hdr)) { warn(ndp, "rcvd icmp6 packet too short (%luB)", len); err = 0; goto free_msg; } err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); if (err) { err = 0; goto free_msg; } ndp_msg_init(msg, msg_type); ndp_msg_payload_len_set(msg, len); if (!ndp_msg_check_valid(msg)) { warn(ndp, "rcvd invalid ND message"); err = 0; goto free_msg; } dbg(ndp, "rcvd %s, len: %zuB", ndp_msg_type_info(msg_type)->strabbr, len); if (!ndp_msg_check_opts(msg)) { err = 0; goto free_msg; } err = ndp_call_handlers(ndp, msg);; free_msg: ndp_msg_destroy(msg); return err; } /** * SECTION: msgrcv handler * @short_description: msgrcv handler and related stuff */ struct ndp_msgrcv_handler_item { struct list_item list; ndp_msgrcv_handler_func_t func; enum ndp_msg_type msg_type; uint32_t ifindex; void * priv; }; static struct ndp_msgrcv_handler_item * ndp_find_msgrcv_handler_item(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) if (handler_item->func == func && handler_item->msg_type == msg_type && handler_item->ifindex == ifindex && handler_item->priv == priv) return handler_item; return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg) { struct ndp_msgrcv_handler_item *handler_item; int err; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) { if (handler_item->msg_type != NDP_MSG_ALL && handler_item->msg_type != ndp_msg_type(msg)) continue; if (handler_item->ifindex && handler_item->ifindex != msg->ifindex) continue; err = handler_item->func(ndp, msg, handler_item->priv); if (err) return err; } return 0; } /** * ndp_msgrcv_handler_register: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Registers custom @func handler which is going to be called when * specified @msg_type is received. If one wants the function to be * called for all message types, pass NDP_MSG_ALL, * Note that @ifindex can be set to filter only messages received on * specified interface. For @func to be called for messages received on * all interfaces, just set 0. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msgrcv_handler_register(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; if (ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv)) return -EEXIST; if (!func) return -EINVAL; handler_item = malloc(sizeof(*handler_item)); if (!handler_item) return -ENOMEM; handler_item->func = func; handler_item->msg_type = msg_type; handler_item->ifindex = ifindex; handler_item->priv = priv; list_add_tail(&ndp->msgrcv_handler_list, &handler_item->list); return 0; } /** * ndp_msgrcv_handler_unregister: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Unregisters custom @func handler. * **/ NDP_EXPORT void ndp_msgrcv_handler_unregister(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; handler_item = ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv); if (!handler_item) return; list_del(&handler_item->list); free(handler_item); } /** * SECTION: event fd * @short_description: event filedescriptor related stuff */ /** * ndp_get_eventfd: * @ndp: libndp library context * * Get eventfd filedesctiptor. * * Returns: fd. **/ NDP_EXPORT int ndp_get_eventfd(struct ndp *ndp) { return ndp->sock; } /** * ndp_call_eventfd_handler: * @ndp: libndp library context * * Call eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_call_eventfd_handler(struct ndp *ndp) { return ndp_sock_recv(ndp); } /** * ndp_callall_eventfd_handler: * @ndp: libndp library context * * Call all pending events on eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_callall_eventfd_handler(struct ndp *ndp) { fd_set rfds; int fdmax; struct timeval tv; int fd = ndp_get_eventfd(ndp); int ret; int err; memset(&tv, 0, sizeof(tv)); FD_ZERO(&rfds); FD_SET(fd, &rfds); fdmax = fd + 1; while (true) { ret = select(fdmax, &rfds, NULL, NULL, &tv); if (ret == -1) return -errno; if (!FD_ISSET(fd, &rfds)) return 0; err = ndp_call_eventfd_handler(ndp); if (err) return err; } } /** * SECTION: Exported context functions * @short_description: Core context functions exported to user */ /** * ndp_open: * @p_ndp: pointer where new libndp library context address will be stored * * Allocates and initializes library context, opens raw socket. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_open(struct ndp **p_ndp) { struct ndp *ndp; const char *env; int err; ndp = myzalloc(sizeof(*ndp)); if (!ndp) return -ENOMEM; ndp->log_fn = log_stderr; ndp->log_priority = LOG_ERR; /* environment overwrites config */ env = getenv("NDP_LOG"); if (env != NULL) ndp_set_log_priority(ndp, log_priority(env)); dbg(ndp, "ndp context %p created.", ndp); dbg(ndp, "log_priority=%d", ndp->log_priority); list_init(&ndp->msgrcv_handler_list); err = ndp_sock_open(ndp); if (err) goto free_ndp; *p_ndp = ndp; return 0; free_ndp: free(ndp); return err; } /** * ndp_close: * @ndp: libndp library context * * Do library context cleanup. **/ NDP_EXPORT void ndp_close(struct ndp *ndp) { ndp_sock_close(ndp); free(ndp); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5016_0
crossvul-cpp_data_good_1571_9
/* Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* MySQL Slap A simple program designed to work as if multiple clients querying the database, then reporting the timing of each stage. MySQL slap runs three stages: 1) Create schema,table, and optionally any SP or data you want to beign the test with. (single client) 2) Load test (many clients) 3) Cleanup (disconnection, drop table if specified, single client) Examples: Supply your own create and query SQL statements, with 50 clients querying (200 selects for each): mysqlslap --delimiter=";" \ --create="CREATE TABLE A (a int);INSERT INTO A VALUES (23)" \ --query="SELECT * FROM A" --concurrency=50 --iterations=200 Let the program build the query SQL statement with a table of two int columns, three varchar columns, five clients querying (20 times each), don't create the table or insert the data (using the previous test's schema and data): mysqlslap --concurrency=5 --iterations=20 \ --number-int-cols=2 --number-char-cols=3 \ --auto-generate-sql Tell the program to load the create, insert and query SQL statements from the specified files, where the create.sql file has multiple table creation statements delimited by ';' and multiple insert statements delimited by ';'. The --query file will have multiple queries delimited by ';', run all the load statements, and then run all the queries in the query file with five clients (five times each): mysqlslap --concurrency=5 \ --iterations=5 --query=query.sql --create=create.sql \ --delimiter=";" TODO: Add language for better tests String length for files and those put on the command line are not setup to handle binary data. More stats Break up tests and run them on multiple hosts at once. Allow output to be fed into a database directly. */ #define SLAP_VERSION "1.0" #define HUGE_STRING_LENGTH 8196 #define RAND_STRING_SIZE 126 /* Types */ #define SELECT_TYPE 0 #define UPDATE_TYPE 1 #define INSERT_TYPE 2 #define UPDATE_TYPE_REQUIRES_PREFIX 3 #define CREATE_TABLE_TYPE 4 #define SELECT_TYPE_REQUIRES_PREFIX 5 #define DELETE_TYPE_REQUIRES_PREFIX 6 #include "client_priv.h" #include "my_default.h" #include <mysqld_error.h> #include <my_dir.h> #include <signal.h> #include <stdarg.h> #include <sslopt-vars.h> #include <sys/types.h> #ifndef _WIN32 #include <sys/wait.h> #endif #include <ctype.h> #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ #ifdef _WIN32 #define srandom srand #define random rand #define snprintf _snprintf #endif #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif /* Global Thread counter */ uint thread_counter; pthread_mutex_t counter_mutex; pthread_cond_t count_threshhold; uint master_wakeup; pthread_mutex_t sleeper_mutex; pthread_cond_t sleep_threshhold; static char **defaults_argv; char **primary_keys; unsigned long long primary_keys_number_of; static char *host= NULL, *opt_password= NULL, *user= NULL, *user_supplied_query= NULL, *user_supplied_pre_statements= NULL, *user_supplied_post_statements= NULL, *default_engine= NULL, *pre_system= NULL, *post_system= NULL, *opt_mysql_unix_port= NULL; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static uint opt_enable_cleartext_plugin= 0; static my_bool using_opt_enable_cleartext_plugin= 0; const char *delimiter= "\n"; const char *create_schema_string= "mysqlslap"; static my_bool opt_preserve= TRUE, opt_no_drop= FALSE; static my_bool debug_info_flag= 0, debug_check_flag= 0; static my_bool opt_only_print= FALSE; static my_bool opt_compress= FALSE, tty_password= FALSE, opt_silent= FALSE, auto_generate_sql_autoincrement= FALSE, auto_generate_sql_guid_primary= FALSE, auto_generate_sql= FALSE; const char *auto_generate_sql_type= "mixed"; static unsigned long connect_flags= CLIENT_MULTI_RESULTS | CLIENT_MULTI_STATEMENTS | CLIENT_REMEMBER_OPTIONS; static int verbose, delimiter_length; static uint commit_rate; static uint detach_rate; const char *num_int_cols_opt; const char *num_char_cols_opt; /* Yes, we do set defaults here */ static unsigned int num_int_cols= 1; static unsigned int num_char_cols= 1; static unsigned int num_int_cols_index= 0; static unsigned int num_char_cols_index= 0; static unsigned int iterations; static uint my_end_arg= 0; static char *default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME; static ulonglong actual_queries= 0; static ulonglong auto_actual_queries; static ulonglong auto_generate_sql_unique_write_number; static ulonglong auto_generate_sql_unique_query_number; static unsigned int auto_generate_sql_secondary_indexes; static ulonglong num_of_query; static ulonglong auto_generate_sql_number; const char *concurrency_str= NULL; static char *create_string; uint *concurrency; const char *default_dbug_option="d:t:o,/tmp/mysqlslap.trace"; const char *opt_csv_str; File csv_file; static uint opt_protocol= 0; static int get_options(int *argc,char ***argv); static uint opt_mysql_port= 0; static const char *load_default_groups[]= { "mysqlslap","client",0 }; typedef struct statement statement; struct statement { char *string; size_t length; unsigned char type; char *option; size_t option_length; statement *next; }; typedef struct option_string option_string; struct option_string { char *string; size_t length; char *option; size_t option_length; option_string *next; }; typedef struct stats stats; struct stats { long int timing; uint users; unsigned long long rows; }; typedef struct thread_context thread_context; struct thread_context { statement *stmt; ulonglong limit; }; typedef struct conclusions conclusions; struct conclusions { char *engine; long int avg_timing; long int max_timing; long int min_timing; uint users; unsigned long long avg_rows; /* The following are not used yet */ unsigned long long max_rows; unsigned long long min_rows; }; static option_string *engine_options= NULL; static statement *pre_statements= NULL; static statement *post_statements= NULL; static statement *create_statements= NULL, *query_statements= NULL; /* Prototypes */ void print_conclusions(conclusions *con); void print_conclusions_csv(conclusions *con); void generate_stats(conclusions *con, option_string *eng, stats *sptr); uint parse_comma(const char *string, uint **range); uint parse_delimiter(const char *script, statement **stmt, char delm); uint parse_option(const char *origin, option_string **stmt, char delm); static int drop_schema(MYSQL *mysql, const char *db); uint get_random_string(char *buf); static statement *build_table_string(void); static statement *build_insert_string(void); static statement *build_update_string(void); static statement * build_select_string(my_bool key); static int generate_primary_key_list(MYSQL *mysql, option_string *engine_stmt); static int drop_primary_key_list(void); static int create_schema(MYSQL *mysql, const char *db, statement *stmt, option_string *engine_stmt); static int run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit); pthread_handler_t run_task(void *p); void statement_cleanup(statement *stmt); void option_cleanup(option_string *stmt); void concurrency_loop(MYSQL *mysql, uint current, option_string *eptr); static int run_statements(MYSQL *mysql, statement *stmt); int slap_connect(MYSQL *mysql); static int run_query(MYSQL *mysql, const char *query, int len); static const char ALPHANUMERICS[]= "0123456789ABCDEFGHIJKLMNOPQRSTWXYZabcdefghijklmnopqrstuvwxyz"; #define ALPHANUMERICS_SIZE (sizeof(ALPHANUMERICS)-1) static long int timedif(struct timeval a, struct timeval b) { int us, s; us = a.tv_usec - b.tv_usec; us /= 1000; s = a.tv_sec - b.tv_sec; s *= 1000; return s + us; } #ifdef _WIN32 static int gettimeofday(struct timeval *tp, void *tzp) { unsigned int ticks; ticks= GetTickCount(); tp->tv_usec= ticks*1000; tp->tv_sec= ticks/1000; return 0; } #endif int main(int argc, char **argv) { MYSQL mysql; option_string *eptr; MY_INIT(argv[0]); my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,&argc,&argv)) { my_end(0); exit(1); } my_getopt_use_args_separator= FALSE; defaults_argv=argv; if (get_options(&argc,&argv)) { free_defaults(defaults_argv); my_end(0); exit(1); } /* Seed the random number generator if we will be using it. */ if (auto_generate_sql) srandom((uint)time(NULL)); /* globals? Yes, so we only have to run strlen once */ delimiter_length= strlen(delimiter); if (argc > 2) { fprintf(stderr,"%s: Too many arguments\n",my_progname); free_defaults(defaults_argv); my_end(0); exit(1); } mysql_init(&mysql); if (opt_compress) mysql_options(&mysql,MYSQL_OPT_COMPRESS,NullS); SSL_SET_OPTIONS(&mysql); if (opt_protocol) mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset); if (opt_plugin_dir && *opt_plugin_dir) mysql_options(&mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(&mysql, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(&mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(&mysql, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqlslap"); if (using_opt_enable_cleartext_plugin) mysql_options(&mysql, MYSQL_ENABLE_CLEARTEXT_PLUGIN, (char*) &opt_enable_cleartext_plugin); if (!opt_only_print) { if (!(mysql_real_connect(&mysql, host, user, opt_password, NULL, opt_mysql_port, opt_mysql_unix_port, connect_flags))) { fprintf(stderr,"%s: Error when connecting to server: %s\n", my_progname,mysql_error(&mysql)); free_defaults(defaults_argv); my_end(0); exit(1); } } pthread_mutex_init(&counter_mutex, NULL); pthread_cond_init(&count_threshhold, NULL); pthread_mutex_init(&sleeper_mutex, NULL); pthread_cond_init(&sleep_threshhold, NULL); /* Main iterations loop */ eptr= engine_options; do { /* For the final stage we run whatever queries we were asked to run */ uint *current; if (verbose >= 2) printf("Starting Concurrency Test\n"); if (*concurrency) { for (current= concurrency; current && *current; current++) concurrency_loop(&mysql, *current, eptr); } else { uint infinite= 1; do { concurrency_loop(&mysql, infinite, eptr); } while (infinite++); } if (!opt_preserve) drop_schema(&mysql, create_schema_string); } while (eptr ? (eptr= eptr->next) : 0); pthread_mutex_destroy(&counter_mutex); pthread_cond_destroy(&count_threshhold); pthread_mutex_destroy(&sleeper_mutex); pthread_cond_destroy(&sleep_threshhold); if (!opt_only_print) mysql_close(&mysql); /* Close & free connection */ /* now free all the strings we created */ my_free(opt_password); my_free(concurrency); statement_cleanup(create_statements); statement_cleanup(query_statements); statement_cleanup(pre_statements); statement_cleanup(post_statements); option_cleanup(engine_options); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif free_defaults(defaults_argv); my_end(my_end_arg); return 0; } void concurrency_loop(MYSQL *mysql, uint current, option_string *eptr) { unsigned int x; stats *head_sptr; stats *sptr; conclusions conclusion; unsigned long long client_limit; int sysret; head_sptr= (stats *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(stats) * iterations, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); memset(&conclusion, 0, sizeof(conclusions)); if (auto_actual_queries) client_limit= auto_actual_queries; else if (num_of_query) client_limit= num_of_query / current; else client_limit= actual_queries; for (x= 0, sptr= head_sptr; x < iterations; x++, sptr++) { /* We might not want to load any data, such as when we are calling a stored_procedure that doesn't use data, or we know we already have data in the table. */ if (!opt_preserve) drop_schema(mysql, create_schema_string); /* First we create */ if (create_statements) create_schema(mysql, create_schema_string, create_statements, eptr); /* If we generated GUID we need to build a list of them from creation that we can later use. */ if (verbose >= 2) printf("Generating primary key list\n"); if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) generate_primary_key_list(mysql, eptr); if (commit_rate) run_query(mysql, "SET AUTOCOMMIT=0", strlen("SET AUTOCOMMIT=0")); if (pre_system) if ((sysret= system(pre_system)) != 0) fprintf(stderr, "Warning: Execution of pre_system option returned %d.\n", sysret); /* Pre statements are always run after all other logic so they can correct/adjust any item that they want. */ if (pre_statements) run_statements(mysql, pre_statements); run_scheduler(sptr, query_statements, current, client_limit); if (post_statements) run_statements(mysql, post_statements); if (post_system) if ((sysret= system(post_system)) != 0) fprintf(stderr, "Warning: Execution of post_system option returned %d.\n", sysret); /* We are finished with this run */ if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) drop_primary_key_list(); } if (verbose >= 2) printf("Generating stats\n"); generate_stats(&conclusion, eptr, head_sptr); if (!opt_silent) print_conclusions(&conclusion); if (opt_csv_str) print_conclusions_csv(&conclusion); my_free(head_sptr); } static struct my_option my_long_options[] = { {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql", 'a', "Generate SQL where not supplied by file or command line.", &auto_generate_sql, &auto_generate_sql, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-add-autoincrement", OPT_SLAP_AUTO_GENERATE_ADD_AUTO, "Add an AUTO_INCREMENT column to auto-generated tables.", &auto_generate_sql_autoincrement, &auto_generate_sql_autoincrement, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-execute-number", OPT_SLAP_AUTO_GENERATE_EXECUTE_QUERIES, "Set this number to generate a set number of queries to run.", &auto_actual_queries, &auto_actual_queries, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-guid-primary", OPT_SLAP_AUTO_GENERATE_GUID_PRIMARY, "Add GUID based primary keys to auto-generated tables.", &auto_generate_sql_guid_primary, &auto_generate_sql_guid_primary, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-load-type", OPT_SLAP_AUTO_GENERATE_SQL_LOAD_TYPE, "Specify test load type: mixed, update, write, key, or read; default is mixed.", &auto_generate_sql_type, &auto_generate_sql_type, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-secondary-indexes", OPT_SLAP_AUTO_GENERATE_SECONDARY_INDEXES, "Number of secondary indexes to add to auto-generated tables.", &auto_generate_sql_secondary_indexes, &auto_generate_sql_secondary_indexes, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"auto-generate-sql-unique-query-number", OPT_SLAP_AUTO_GENERATE_UNIQUE_QUERY_NUM, "Number of unique queries to generate for automatic tests.", &auto_generate_sql_unique_query_number, &auto_generate_sql_unique_query_number, 0, GET_ULL, REQUIRED_ARG, 10, 0, 0, 0, 0, 0}, {"auto-generate-sql-unique-write-number", OPT_SLAP_AUTO_GENERATE_UNIQUE_WRITE_NUM, "Number of unique queries to generate for auto-generate-sql-write-number.", &auto_generate_sql_unique_write_number, &auto_generate_sql_unique_write_number, 0, GET_ULL, REQUIRED_ARG, 10, 0, 0, 0, 0, 0}, {"auto-generate-sql-write-number", OPT_SLAP_AUTO_GENERATE_WRITE_NUM, "Number of row inserts to perform for each thread (default is 100).", &auto_generate_sql_number, &auto_generate_sql_number, 0, GET_ULL, REQUIRED_ARG, 100, 0, 0, 0, 0, 0}, {"commit", OPT_SLAP_COMMIT, "Commit records every X number of statements.", &commit_rate, &commit_rate, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"concurrency", 'c', "Number of clients to simulate for query to run.", &concurrency_str, &concurrency_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"create", OPT_SLAP_CREATE_STRING, "File or string to use create tables.", &create_string, &create_string, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"create-schema", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.", &create_schema_string, &create_schema_string, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"csv", OPT_SLAP_CSV, "Generate CSV output to named file or to stdout if no file is named.", NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit.", 0, 0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", &default_dbug_option, &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", 'T', "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"delimiter", 'F', "Delimiter to use in SQL statements supplied in file or command line.", &delimiter, &delimiter, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"detach", OPT_SLAP_DETACH, "Detach (close and reopen) connections after X number of requests.", &detach_rate, &detach_rate, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"enable_cleartext_plugin", OPT_ENABLE_CLEARTEXT_PLUGIN, "Enable/disable the clear text authentication plugin.", &opt_enable_cleartext_plugin, &opt_enable_cleartext_plugin, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"engine", 'e', "Storage engine to use for creating the table.", &default_engine, &default_engine, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &host, &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"iterations", 'i', "Number of times to run the tests.", &iterations, &iterations, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, {"no-drop", OPT_SLAP_NO_DROP, "Do not drop the schema after the test.", &opt_no_drop, &opt_no_drop, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"number-char-cols", 'x', "Number of VARCHAR columns to create in table if specifying --auto-generate-sql.", &num_char_cols_opt, &num_char_cols_opt, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"number-int-cols", 'y', "Number of INT columns to create in table if specifying --auto-generate-sql.", &num_int_cols_opt, &num_int_cols_opt, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"number-of-queries", OPT_MYSQL_NUMBER_OF_QUERY, "Limit each client to this number of queries (this is not exact).", &num_of_query, &num_of_query, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"only-print", OPT_MYSQL_ONLY_PRINT, "Do not connect to the databases, but instead print out what would have " "been done.", &opt_only_print, &opt_only_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given it's " "asked from the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection.", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, 0}, {"post-query", OPT_SLAP_POST_QUERY, "Query to run or file containing query to execute after tests have completed.", &user_supplied_post_statements, &user_supplied_post_statements, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"post-system", OPT_SLAP_POST_SYSTEM, "system() string to execute after tests have completed.", &post_system, &post_system, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"pre-query", OPT_SLAP_PRE_QUERY, "Query to run or file containing query to execute before running tests.", &user_supplied_pre_statements, &user_supplied_pre_statements, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"pre-system", OPT_SLAP_PRE_SYSTEM, "system() string to execute before running tests.", &pre_system, &pre_system, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"query", 'q', "Query to run or file containing query to run.", &user_supplied_query, &user_supplied_query, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"silent", 's', "Run program in silent mode - no output.", &opt_silent, &opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"user", 'u', "User for login if not current user.", &user, &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "More verbose output; you can use this multiple times to get even more " "verbose output.", &verbose, &verbose, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname, SLAP_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2005")); puts("Run a query multiple times against the server.\n"); printf("Usage: %s [OPTIONS]\n",my_progname); print_defaults("my",load_default_groups); my_print_help(my_long_options); } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { DBUG_ENTER("get_one_option"); switch(optid) { case 'v': verbose++; break; case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start= argument; my_free(opt_password); opt_password= my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]= 0; /* Cut length of argument */ tty_password= 0; } else tty_password= 1; break; case 'W': #ifdef _WIN32 opt_protocol= MYSQL_PROTOCOL_PIPE; #endif break; case OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case '#': DBUG_PUSH(argument ? argument : default_dbug_option); debug_check_flag= 1; break; case OPT_SLAP_CSV: if (!argument) argument= (char *)"-"; /* use stdout */ opt_csv_str= argument; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); break; case '?': case 'I': /* Info */ usage(); exit(0); case OPT_ENABLE_CLEARTEXT_PLUGIN: using_opt_enable_cleartext_plugin= TRUE; break; } DBUG_RETURN(0); } uint get_random_string(char *buf) { char *buf_ptr= buf; int x; DBUG_ENTER("get_random_string"); for (x= RAND_STRING_SIZE; x > 0; x--) *buf_ptr++= ALPHANUMERICS[random() % ALPHANUMERICS_SIZE]; DBUG_RETURN(buf_ptr - buf); } /* build_table_string This function builds a create table query if the user opts to not supply a file or string containing a create table statement */ static statement * build_table_string(void) { char buf[HUGE_STRING_LENGTH]; unsigned int col_count; statement *ptr; DYNAMIC_STRING table_string; DBUG_ENTER("build_table_string"); DBUG_PRINT("info", ("num int cols %u num char cols %u", num_int_cols, num_char_cols)); init_dynamic_string(&table_string, "", 1024, 1024); dynstr_append(&table_string, "CREATE TABLE `t1` ("); if (auto_generate_sql_autoincrement) { dynstr_append(&table_string, "id serial"); if (num_int_cols || num_char_cols) dynstr_append(&table_string, ","); } if (auto_generate_sql_guid_primary) { dynstr_append(&table_string, "id varchar(32) primary key"); if (num_int_cols || num_char_cols || auto_generate_sql_guid_primary) dynstr_append(&table_string, ","); } if (auto_generate_sql_secondary_indexes) { unsigned int count; for (count= 0; count < auto_generate_sql_secondary_indexes; count++) { if (count) /* Except for the first pass we add a comma */ dynstr_append(&table_string, ","); if (snprintf(buf, HUGE_STRING_LENGTH, "id%d varchar(32) unique key", count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in create table\n"); exit(1); } dynstr_append(&table_string, buf); } if (num_int_cols || num_char_cols) dynstr_append(&table_string, ","); } if (num_int_cols) for (col_count= 1; col_count <= num_int_cols; col_count++) { if (num_int_cols_index) { if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d INT(32), INDEX(intcol%d)", col_count, col_count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in create table\n"); exit(1); } } else { if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d INT(32) ", col_count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in create table\n"); exit(1); } } dynstr_append(&table_string, buf); if (col_count < num_int_cols || num_char_cols > 0) dynstr_append(&table_string, ","); } if (num_char_cols) for (col_count= 1; col_count <= num_char_cols; col_count++) { if (num_char_cols_index) { if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d VARCHAR(128), INDEX(charcol%d) ", col_count, col_count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating table\n"); exit(1); } } else { if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d VARCHAR(128)", col_count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating table\n"); exit(1); } } dynstr_append(&table_string, buf); if (col_count < num_char_cols) dynstr_append(&table_string, ","); } dynstr_append(&table_string, ")"); ptr= (statement *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(statement), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->string = (char *)my_malloc(PSI_NOT_INSTRUMENTED, table_string.length+1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= table_string.length+1; ptr->type= CREATE_TABLE_TYPE; my_stpcpy(ptr->string, table_string.str); dynstr_free(&table_string); DBUG_RETURN(ptr); } /* build_update_string() This function builds insert statements when the user opts to not supply an insert file or string containing insert data */ static statement * build_update_string(void) { char buf[HUGE_STRING_LENGTH]; unsigned int col_count; statement *ptr; DYNAMIC_STRING update_string; DBUG_ENTER("build_update_string"); init_dynamic_string(&update_string, "", 1024, 1024); dynstr_append(&update_string, "UPDATE t1 SET "); if (num_int_cols) for (col_count= 1; col_count <= num_int_cols; col_count++) { if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d = %ld", col_count, random()) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating update\n"); exit(1); } dynstr_append(&update_string, buf); if (col_count < num_int_cols || num_char_cols > 0) dynstr_append_mem(&update_string, ",", 1); } if (num_char_cols) for (col_count= 1; col_count <= num_char_cols; col_count++) { char rand_buffer[RAND_STRING_SIZE]; int buf_len= get_random_string(rand_buffer); if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d = '%.*s'", col_count, buf_len, rand_buffer) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating update\n"); exit(1); } dynstr_append(&update_string, buf); if (col_count < num_char_cols) dynstr_append_mem(&update_string, ",", 1); } if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) dynstr_append(&update_string, " WHERE id = "); ptr= (statement *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(statement), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, update_string.length + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= update_string.length+1; if (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary) ptr->type= UPDATE_TYPE_REQUIRES_PREFIX ; else ptr->type= UPDATE_TYPE; my_stpcpy(ptr->string, update_string.str); dynstr_free(&update_string); DBUG_RETURN(ptr); } /* build_insert_string() This function builds insert statements when the user opts to not supply an insert file or string containing insert data */ static statement * build_insert_string(void) { char buf[HUGE_STRING_LENGTH]; unsigned int col_count; statement *ptr; DYNAMIC_STRING insert_string; DBUG_ENTER("build_insert_string"); init_dynamic_string(&insert_string, "", 1024, 1024); dynstr_append(&insert_string, "INSERT INTO t1 VALUES ("); if (auto_generate_sql_autoincrement) { dynstr_append(&insert_string, "NULL"); if (num_int_cols || num_char_cols) dynstr_append(&insert_string, ","); } if (auto_generate_sql_guid_primary) { dynstr_append(&insert_string, "uuid()"); if (num_int_cols || num_char_cols) dynstr_append(&insert_string, ","); } if (auto_generate_sql_secondary_indexes) { unsigned int count; for (count= 0; count < auto_generate_sql_secondary_indexes; count++) { if (count) /* Except for the first pass we add a comma */ dynstr_append(&insert_string, ","); dynstr_append(&insert_string, "uuid()"); } if (num_int_cols || num_char_cols) dynstr_append(&insert_string, ","); } if (num_int_cols) for (col_count= 1; col_count <= num_int_cols; col_count++) { if (snprintf(buf, HUGE_STRING_LENGTH, "%ld", random()) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating insert\n"); exit(1); } dynstr_append(&insert_string, buf); if (col_count < num_int_cols || num_char_cols > 0) dynstr_append_mem(&insert_string, ",", 1); } if (num_char_cols) for (col_count= 1; col_count <= num_char_cols; col_count++) { int buf_len= get_random_string(buf); dynstr_append_mem(&insert_string, "'", 1); dynstr_append_mem(&insert_string, buf, buf_len); dynstr_append_mem(&insert_string, "'", 1); if (col_count < num_char_cols) dynstr_append_mem(&insert_string, ",", 1); } dynstr_append_mem(&insert_string, ")", 1); ptr= (statement *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(statement), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, insert_string.length + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= insert_string.length+1; ptr->type= INSERT_TYPE; my_stpcpy(ptr->string, insert_string.str); dynstr_free(&insert_string); DBUG_RETURN(ptr); } /* build_select_string() This function builds a query if the user opts to not supply a query statement or file containing a query statement */ static statement * build_select_string(my_bool key) { char buf[HUGE_STRING_LENGTH]; unsigned int col_count; statement *ptr; static DYNAMIC_STRING query_string; DBUG_ENTER("build_select_string"); init_dynamic_string(&query_string, "", 1024, 1024); dynstr_append_mem(&query_string, "SELECT ", 7); for (col_count= 1; col_count <= num_int_cols; col_count++) { if (snprintf(buf, HUGE_STRING_LENGTH, "intcol%d", col_count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating select\n"); exit(1); } dynstr_append(&query_string, buf); if (col_count < num_int_cols || num_char_cols > 0) dynstr_append_mem(&query_string, ",", 1); } for (col_count= 1; col_count <= num_char_cols; col_count++) { if (snprintf(buf, HUGE_STRING_LENGTH, "charcol%d", col_count) > HUGE_STRING_LENGTH) { fprintf(stderr, "Memory Allocation error in creating select\n"); exit(1); } dynstr_append(&query_string, buf); if (col_count < num_char_cols) dynstr_append_mem(&query_string, ",", 1); } dynstr_append(&query_string, " FROM t1"); if ((key) && (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary)) dynstr_append(&query_string, " WHERE id = "); ptr= (statement *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(statement), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, query_string.length + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr->length= query_string.length+1; if ((key) && (auto_generate_sql_autoincrement || auto_generate_sql_guid_primary)) ptr->type= SELECT_TYPE_REQUIRES_PREFIX; else ptr->type= SELECT_TYPE; my_stpcpy(ptr->string, query_string.str); dynstr_free(&query_string); DBUG_RETURN(ptr); } static int get_options(int *argc,char ***argv) { int ho_error; char *tmp_string; MY_STAT sbuf; /* Stat information for the data file */ DBUG_ENTER("get_options"); if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (!user) user= (char *)"root"; /* If something is created and --no-drop is not specified, we drop the schema. */ if (!opt_no_drop && (create_string || auto_generate_sql)) opt_preserve= FALSE; if (auto_generate_sql && (create_string || user_supplied_query)) { fprintf(stderr, "%s: Can't use --auto-generate-sql when create and query strings are specified!\n", my_progname); exit(1); } if (auto_generate_sql && auto_generate_sql_guid_primary && auto_generate_sql_autoincrement) { fprintf(stderr, "%s: Either auto-generate-sql-guid-primary or auto-generate-sql-add-autoincrement can be used!\n", my_progname); exit(1); } /* We are testing to make sure that if someone specified a key search that we actually added a key! */ if (auto_generate_sql && auto_generate_sql_type[0] == 'k') if ( auto_generate_sql_autoincrement == FALSE && auto_generate_sql_guid_primary == FALSE) { fprintf(stderr, "%s: Can't perform key test without a primary key!\n", my_progname); exit(1); } if (auto_generate_sql && num_of_query && auto_actual_queries) { fprintf(stderr, "%s: Either auto-generate-sql-execute-number or number-of-queries can be used!\n", my_progname); exit(1); } parse_comma(concurrency_str ? concurrency_str : "1", &concurrency); if (opt_csv_str) { opt_silent= TRUE; if (opt_csv_str[0] == '-') { csv_file= my_fileno(stdout); } else { if ((csv_file= my_open(opt_csv_str, O_CREAT|O_WRONLY|O_APPEND, MYF(0))) == -1) { fprintf(stderr,"%s: Could not open csv file: %sn\n", my_progname, opt_csv_str); exit(1); } } } if (opt_only_print) opt_silent= TRUE; if (num_int_cols_opt) { option_string *str; parse_option(num_int_cols_opt, &str, ','); num_int_cols= atoi(str->string); if (str->option) num_int_cols_index= atoi(str->option); option_cleanup(str); } if (num_char_cols_opt) { option_string *str; parse_option(num_char_cols_opt, &str, ','); num_char_cols= atoi(str->string); if (str->option) num_char_cols_index= atoi(str->option); else num_char_cols_index= 0; option_cleanup(str); } if (auto_generate_sql) { unsigned long long x= 0; statement *ptr_statement; if (verbose >= 2) printf("Building Create Statements for Auto\n"); create_statements= build_table_string(); /* Pre-populate table */ for (ptr_statement= create_statements, x= 0; x < auto_generate_sql_unique_write_number; x++, ptr_statement= ptr_statement->next) { ptr_statement->next= build_insert_string(); } if (verbose >= 2) printf("Building Query Statements for Auto\n"); if (auto_generate_sql_type[0] == 'r') { if (verbose >= 2) printf("Generating SELECT Statements for Auto\n"); query_statements= build_select_string(FALSE); for (ptr_statement= query_statements, x= 0; x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { ptr_statement->next= build_select_string(FALSE); } } else if (auto_generate_sql_type[0] == 'k') { if (verbose >= 2) printf("Generating SELECT for keys Statements for Auto\n"); query_statements= build_select_string(TRUE); for (ptr_statement= query_statements, x= 0; x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { ptr_statement->next= build_select_string(TRUE); } } else if (auto_generate_sql_type[0] == 'w') { /* We generate a number of strings in case the engine is Archive (since strings which were identical one after another would be too easily optimized). */ if (verbose >= 2) printf("Generating INSERT Statements for Auto\n"); query_statements= build_insert_string(); for (ptr_statement= query_statements, x= 0; x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { ptr_statement->next= build_insert_string(); } } else if (auto_generate_sql_type[0] == 'u') { query_statements= build_update_string(); for (ptr_statement= query_statements, x= 0; x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { ptr_statement->next= build_update_string(); } } else /* Mixed mode is default */ { int coin= 0; query_statements= build_insert_string(); /* This logic should be extended to do a more mixed load, at the moment it results in "every other". */ for (ptr_statement= query_statements, x= 0; x < auto_generate_sql_unique_query_number; x++, ptr_statement= ptr_statement->next) { if (coin) { ptr_statement->next= build_insert_string(); coin= 0; } else { ptr_statement->next= build_select_string(TRUE); coin= 1; } } } } else { if (create_string && my_stat(create_string, &sbuf, MYF(0))) { File data_file; if (!MY_S_ISREG(sbuf.st_mode)) { fprintf(stderr,"%s: Create file was not a regular file\n", my_progname); exit(1); } if ((data_file= my_open(create_string, O_RDWR, MYF(0))) == -1) { fprintf(stderr,"%s: Could not open create file\n", my_progname); exit(1); } tmp_string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, sbuf.st_size + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); my_read(data_file, (uchar*) tmp_string, sbuf.st_size, MYF(0)); tmp_string[sbuf.st_size]= '\0'; my_close(data_file,MYF(0)); parse_delimiter(tmp_string, &create_statements, delimiter[0]); my_free(tmp_string); } else if (create_string) { parse_delimiter(create_string, &create_statements, delimiter[0]); } if (user_supplied_query && my_stat(user_supplied_query, &sbuf, MYF(0))) { File data_file; if (!MY_S_ISREG(sbuf.st_mode)) { fprintf(stderr,"%s: User query supplied file was not a regular file\n", my_progname); exit(1); } if ((data_file= my_open(user_supplied_query, O_RDWR, MYF(0))) == -1) { fprintf(stderr,"%s: Could not open query supplied file\n", my_progname); exit(1); } tmp_string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, sbuf.st_size + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); my_read(data_file, (uchar*) tmp_string, sbuf.st_size, MYF(0)); tmp_string[sbuf.st_size]= '\0'; my_close(data_file,MYF(0)); if (user_supplied_query) actual_queries= parse_delimiter(tmp_string, &query_statements, delimiter[0]); my_free(tmp_string); } else if (user_supplied_query) { actual_queries= parse_delimiter(user_supplied_query, &query_statements, delimiter[0]); } } if (user_supplied_pre_statements && my_stat(user_supplied_pre_statements, &sbuf, MYF(0))) { File data_file; if (!MY_S_ISREG(sbuf.st_mode)) { fprintf(stderr,"%s: User query supplied file was not a regular file\n", my_progname); exit(1); } if ((data_file= my_open(user_supplied_pre_statements, O_RDWR, MYF(0))) == -1) { fprintf(stderr,"%s: Could not open query supplied file\n", my_progname); exit(1); } tmp_string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, sbuf.st_size + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); my_read(data_file, (uchar*) tmp_string, sbuf.st_size, MYF(0)); tmp_string[sbuf.st_size]= '\0'; my_close(data_file,MYF(0)); if (user_supplied_pre_statements) (void)parse_delimiter(tmp_string, &pre_statements, delimiter[0]); my_free(tmp_string); } else if (user_supplied_pre_statements) { (void)parse_delimiter(user_supplied_pre_statements, &pre_statements, delimiter[0]); } if (user_supplied_post_statements && my_stat(user_supplied_post_statements, &sbuf, MYF(0))) { File data_file; if (!MY_S_ISREG(sbuf.st_mode)) { fprintf(stderr,"%s: User query supplied file was not a regular file\n", my_progname); exit(1); } if ((data_file= my_open(user_supplied_post_statements, O_RDWR, MYF(0))) == -1) { fprintf(stderr,"%s: Could not open query supplied file\n", my_progname); exit(1); } tmp_string= (char *)my_malloc(PSI_NOT_INSTRUMENTED, sbuf.st_size + 1, MYF(MY_ZEROFILL|MY_FAE|MY_WME)); my_read(data_file, (uchar*) tmp_string, sbuf.st_size, MYF(0)); tmp_string[sbuf.st_size]= '\0'; my_close(data_file,MYF(0)); if (user_supplied_post_statements) (void)parse_delimiter(tmp_string, &post_statements, delimiter[0]); my_free(tmp_string); } else if (user_supplied_post_statements) { (void)parse_delimiter(user_supplied_post_statements, &post_statements, delimiter[0]); } if (verbose >= 2) printf("Parsing engines to use.\n"); if (default_engine) parse_option(default_engine, &engine_options, ','); if (tty_password) opt_password= get_tty_password(NullS); DBUG_RETURN(0); } static int run_query(MYSQL *mysql, const char *query, int len) { if (opt_only_print) { printf("%.*s;\n", len, query); return 0; } if (verbose >= 3) printf("%.*s;\n", len, query); return mysql_real_query(mysql, query, len); } static int generate_primary_key_list(MYSQL *mysql, option_string *engine_stmt) { MYSQL_RES *result; MYSQL_ROW row; unsigned long long counter; DBUG_ENTER("generate_primary_key_list"); /* Blackhole is a special case, this allows us to test the upper end of the server during load runs. */ if (opt_only_print || (engine_stmt && strstr(engine_stmt->string, "blackhole"))) { primary_keys_number_of= 1; primary_keys= (char **)my_malloc(PSI_NOT_INSTRUMENTED, (uint)(sizeof(char *) * primary_keys_number_of), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); /* Yes, we strdup a const string to simplify the interface */ primary_keys[0]= my_strdup(PSI_NOT_INSTRUMENTED, "796c4422-1d94-102a-9d6d-00e0812d", MYF(0)); } else { if (run_query(mysql, "SELECT id from t1", strlen("SELECT id from t1"))) { fprintf(stderr,"%s: Cannot select GUID primary keys. (%s)\n", my_progname, mysql_error(mysql)); exit(1); } if (!(result= mysql_store_result(mysql))) { fprintf(stderr, "%s: Error when storing result: %d %s\n", my_progname, mysql_errno(mysql), mysql_error(mysql)); exit(1); } primary_keys_number_of= mysql_num_rows(result); /* So why check this? Blackhole :) */ if (primary_keys_number_of) { /* We create the structure and loop and create the items. */ primary_keys= (char **)my_malloc(PSI_NOT_INSTRUMENTED, (uint)(sizeof(char *) * primary_keys_number_of), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); row= mysql_fetch_row(result); for (counter= 0; counter < primary_keys_number_of; counter++, row= mysql_fetch_row(result)) primary_keys[counter]= my_strdup(PSI_NOT_INSTRUMENTED, row[0], MYF(0)); } mysql_free_result(result); } DBUG_RETURN(0); } static int drop_primary_key_list(void) { unsigned long long counter; if (primary_keys_number_of) { for (counter= 0; counter < primary_keys_number_of; counter++) my_free(primary_keys[counter]); my_free(primary_keys); } return 0; } static int create_schema(MYSQL *mysql, const char *db, statement *stmt, option_string *engine_stmt) { char query[HUGE_STRING_LENGTH]; statement *ptr; statement *after_create; int len; ulonglong count; DBUG_ENTER("create_schema"); len= snprintf(query, HUGE_STRING_LENGTH, "CREATE SCHEMA `%s`", db); if (verbose >= 2) printf("Loading Pre-data\n"); if (run_query(mysql, query, len)) { fprintf(stderr,"%s: Cannot create schema %s : %s\n", my_progname, db, mysql_error(mysql)); exit(1); } if (opt_only_print) { printf("use %s;\n", db); } else { if (verbose >= 3) printf("%s;\n", query); if (mysql_select_db(mysql, db)) { fprintf(stderr,"%s: Cannot select schema '%s': %s\n",my_progname, db, mysql_error(mysql)); exit(1); } } if (engine_stmt) { len= snprintf(query, HUGE_STRING_LENGTH, "set storage_engine=`%s`", engine_stmt->string); if (run_query(mysql, query, len)) { fprintf(stderr,"%s: Cannot set default engine: %s\n", my_progname, mysql_error(mysql)); exit(1); } } count= 0; after_create= stmt; limit_not_met: for (ptr= after_create; ptr && ptr->length; ptr= ptr->next, count++) { if (auto_generate_sql && ( auto_generate_sql_number == count)) break; if (engine_stmt && engine_stmt->option && ptr->type == CREATE_TABLE_TYPE) { char buffer[HUGE_STRING_LENGTH]; snprintf(buffer, HUGE_STRING_LENGTH, "%s %s", ptr->string, engine_stmt->option); if (run_query(mysql, buffer, strlen(buffer))) { fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); exit(1); } } else { if (run_query(mysql, ptr->string, ptr->length)) { fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); exit(1); } } } if (auto_generate_sql && (auto_generate_sql_number > count )) { /* Special case for auto create, we don't want to create tables twice */ after_create= stmt->next; goto limit_not_met; } DBUG_RETURN(0); } static int drop_schema(MYSQL *mysql, const char *db) { char query[HUGE_STRING_LENGTH]; int len; DBUG_ENTER("drop_schema"); len= snprintf(query, HUGE_STRING_LENGTH, "DROP SCHEMA IF EXISTS `%s`", db); if (run_query(mysql, query, len)) { fprintf(stderr,"%s: Cannot drop database '%s' ERROR : %s\n", my_progname, db, mysql_error(mysql)); exit(1); } DBUG_RETURN(0); } static int run_statements(MYSQL *mysql, statement *stmt) { statement *ptr; MYSQL_RES *result; DBUG_ENTER("run_statements"); for (ptr= stmt; ptr && ptr->length; ptr= ptr->next) { if (run_query(mysql, ptr->string, ptr->length)) { fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); exit(1); } if (mysql_field_count(mysql)) { result= mysql_store_result(mysql); mysql_free_result(result); } } DBUG_RETURN(0); } static int run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) { uint x; struct timeval start_time, end_time; thread_context con; pthread_t mainthread; /* Thread descriptor */ pthread_attr_t attr; /* Thread attributes */ DBUG_ENTER("run_scheduler"); con.stmt= stmts; con.limit= limit; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_mutex_lock(&counter_mutex); thread_counter= 0; pthread_mutex_lock(&sleeper_mutex); master_wakeup= 1; pthread_mutex_unlock(&sleeper_mutex); for (x= 0; x < concur; x++) { /* now you create the thread */ if (pthread_create(&mainthread, &attr, run_task, (void *)&con) != 0) { fprintf(stderr,"%s: Could not create thread\n", my_progname); exit(0); } thread_counter++; } pthread_mutex_unlock(&counter_mutex); pthread_attr_destroy(&attr); pthread_mutex_lock(&sleeper_mutex); master_wakeup= 0; pthread_mutex_unlock(&sleeper_mutex); pthread_cond_broadcast(&sleep_threshhold); gettimeofday(&start_time, NULL); /* We loop until we know that all children have cleaned up. */ pthread_mutex_lock(&counter_mutex); while (thread_counter) { struct timespec abstime; set_timespec(abstime, 3); pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } pthread_mutex_unlock(&counter_mutex); gettimeofday(&end_time, NULL); sptr->timing= timedif(end_time, start_time); sptr->users= concur; sptr->rows= limit; DBUG_RETURN(0); } pthread_handler_t run_task(void *p) { ulonglong counter= 0, queries; ulonglong detach_counter; unsigned int commit_counter; MYSQL *mysql; MYSQL_RES *result; MYSQL_ROW row; statement *ptr; thread_context *con= (thread_context *)p; DBUG_ENTER("run_task"); DBUG_PRINT("info", ("task script \"%s\"", con->stmt ? con->stmt->string : "")); pthread_mutex_lock(&sleeper_mutex); while (master_wakeup) { pthread_cond_wait(&sleep_threshhold, &sleeper_mutex); } pthread_mutex_unlock(&sleeper_mutex); if (!(mysql= mysql_init(NULL))) { fprintf(stderr,"%s: mysql_init() failed ERROR : %s\n", my_progname, mysql_error(mysql)); exit(0); } if (mysql_thread_init()) { fprintf(stderr,"%s: mysql_thread_init() failed ERROR : %s\n", my_progname, mysql_error(mysql)); exit(0); } DBUG_PRINT("info", ("trying to connect to host %s as user %s", host, user)); if (!opt_only_print) { if (slap_connect(mysql)) goto end; } DBUG_PRINT("info", ("connected.")); if (verbose >= 3) printf("connected!\n"); queries= 0; commit_counter= 0; if (commit_rate) run_query(mysql, "SET AUTOCOMMIT=0", strlen("SET AUTOCOMMIT=0")); limit_not_met: for (ptr= con->stmt, detach_counter= 0; ptr && ptr->length; ptr= ptr->next, detach_counter++) { if (!opt_only_print && detach_rate && !(detach_counter % detach_rate)) { mysql_close(mysql); if (!(mysql= mysql_init(NULL))) { fprintf(stderr,"%s: mysql_init() failed ERROR : %s\n", my_progname, mysql_error(mysql)); exit(0); } if (slap_connect(mysql)) goto end; } /* We have to execute differently based on query type. This should become a function. */ if ((ptr->type == UPDATE_TYPE_REQUIRES_PREFIX) || (ptr->type == SELECT_TYPE_REQUIRES_PREFIX)) { int length; unsigned int key_val; char *key; char buffer[HUGE_STRING_LENGTH]; /* This should only happen if some sort of new engine was implemented that didn't properly handle UPDATEs. Just in case someone runs this under an experimental engine we don't want a crash so the if() is placed here. */ DBUG_ASSERT(primary_keys_number_of); if (primary_keys_number_of) { key_val= (unsigned int)(random() % primary_keys_number_of); key= primary_keys[key_val]; DBUG_ASSERT(key); length= snprintf(buffer, HUGE_STRING_LENGTH, "%.*s '%s'", (int)ptr->length, ptr->string, key); if (run_query(mysql, buffer, length)) { fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", my_progname, (uint)length, buffer, mysql_error(mysql)); exit(0); } } } else { if (run_query(mysql, ptr->string, ptr->length)) { fprintf(stderr,"%s: Cannot run query %.*s ERROR : %s\n", my_progname, (uint)ptr->length, ptr->string, mysql_error(mysql)); exit(0); } } do { if (mysql_field_count(mysql)) { if (!(result= mysql_store_result(mysql))) fprintf(stderr, "%s: Error when storing result: %d %s\n", my_progname, mysql_errno(mysql), mysql_error(mysql)); else { while ((row= mysql_fetch_row(result))) counter++; mysql_free_result(result); } } } while(mysql_next_result(mysql) == 0); queries++; if (commit_rate && (++commit_counter == commit_rate)) { commit_counter= 0; run_query(mysql, "COMMIT", strlen("COMMIT")); } if (con->limit && queries == con->limit) goto end; } if (con->limit && queries < con->limit) goto limit_not_met; end: if (commit_rate) run_query(mysql, "COMMIT", strlen("COMMIT")); if (!opt_only_print) mysql_close(mysql); mysql_thread_end(); pthread_mutex_lock(&counter_mutex); thread_counter--; pthread_cond_signal(&count_threshhold); pthread_mutex_unlock(&counter_mutex); DBUG_RETURN(0); } uint parse_option(const char *origin, option_string **stmt, char delm) { char *retstr; char *ptr= (char *)origin; option_string **sptr= stmt; option_string *tmp; size_t length= strlen(origin); uint count= 0; /* We know that there is always one */ for (tmp= *sptr= (option_string *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(option_string), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); (retstr= strchr(ptr, delm)); tmp->next= (option_string *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(option_string), MYF(MY_ZEROFILL|MY_FAE|MY_WME)), tmp= tmp->next) { char buffer[HUGE_STRING_LENGTH]; char *buffer_ptr; count++; strncpy(buffer, ptr, (size_t)(retstr - ptr)); if ((buffer_ptr= strchr(buffer, ':'))) { char *option_ptr; tmp->length= (size_t)(buffer_ptr - buffer); tmp->string= my_strndup(PSI_NOT_INSTRUMENTED, ptr, (uint)tmp->length, MYF(MY_FAE)); option_ptr= ptr + 1 + tmp->length; /* Move past the : and the first string */ tmp->option_length= (size_t)(retstr - option_ptr); tmp->option= my_strndup(PSI_NOT_INSTRUMENTED, option_ptr, (uint)tmp->option_length, MYF(MY_FAE)); } else { tmp->string= my_strndup(PSI_NOT_INSTRUMENTED, ptr, (size_t)(retstr - ptr), MYF(MY_FAE)); tmp->length= (size_t)(retstr - ptr); } ptr+= retstr - ptr + 1; if (isspace(*ptr)) ptr++; count++; } if (ptr != origin+length) { char *origin_ptr; if ((origin_ptr= strchr(ptr, ':'))) { char *option_ptr; tmp->length= (size_t)(origin_ptr - ptr); tmp->string= my_strndup(PSI_NOT_INSTRUMENTED, origin, tmp->length, MYF(MY_FAE)); option_ptr= (char *)ptr + 1 + tmp->length; /* Move past the : and the first string */ tmp->option_length= (size_t)((ptr + length) - option_ptr); tmp->option= my_strndup(PSI_NOT_INSTRUMENTED, option_ptr, tmp->option_length, MYF(MY_FAE)); } else { tmp->length= (size_t)((ptr + length) - ptr); tmp->string= my_strndup(PSI_NOT_INSTRUMENTED, ptr, tmp->length, MYF(MY_FAE)); } count++; } return count; } uint parse_delimiter(const char *script, statement **stmt, char delm) { char *retstr; char *ptr= (char *)script; statement **sptr= stmt; statement *tmp; uint length= strlen(script); uint count= 0; /* We know that there is always one */ for (tmp= *sptr= (statement *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(statement), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); (retstr= strchr(ptr, delm)); tmp->next= (statement *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(statement), MYF(MY_ZEROFILL|MY_FAE|MY_WME)), tmp= tmp->next) { count++; tmp->string= my_strndup(PSI_NOT_INSTRUMENTED, ptr, (uint)(retstr - ptr), MYF(MY_FAE)); tmp->length= (size_t)(retstr - ptr); ptr+= retstr - ptr + 1; if (isspace(*ptr)) ptr++; } if (ptr != script+length) { tmp->string= my_strndup(PSI_NOT_INSTRUMENTED, ptr, (uint)((script + length) - ptr), MYF(MY_FAE)); tmp->length= (size_t)((script + length) - ptr); count++; } return count; } uint parse_comma(const char *string, uint **range) { uint count= 1,x; /* We know that there is always one */ char *retstr; char *ptr= (char *)string; uint *nptr; for (;*ptr; ptr++) if (*ptr == ',') count++; /* One extra spot for the NULL */ nptr= *range= (uint *)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(uint) * (count + 1), MYF(MY_ZEROFILL|MY_FAE|MY_WME)); ptr= (char *)string; x= 0; while ((retstr= strchr(ptr,','))) { nptr[x++]= atoi(ptr); ptr+= retstr - ptr + 1; } nptr[x++]= atoi(ptr); return count; } void print_conclusions(conclusions *con) { printf("Benchmark\n"); if (con->engine) printf("\tRunning for engine %s\n", con->engine); printf("\tAverage number of seconds to run all queries: %ld.%03ld seconds\n", con->avg_timing / 1000, con->avg_timing % 1000); printf("\tMinimum number of seconds to run all queries: %ld.%03ld seconds\n", con->min_timing / 1000, con->min_timing % 1000); printf("\tMaximum number of seconds to run all queries: %ld.%03ld seconds\n", con->max_timing / 1000, con->max_timing % 1000); printf("\tNumber of clients running queries: %d\n", con->users); printf("\tAverage number of queries per client: %llu\n", con->avg_rows); printf("\n"); } void print_conclusions_csv(conclusions *con) { char buffer[HUGE_STRING_LENGTH]; const char *ptr= auto_generate_sql_type ? auto_generate_sql_type : "query"; snprintf(buffer, HUGE_STRING_LENGTH, "%s,%s,%ld.%03ld,%ld.%03ld,%ld.%03ld,%d,%llu\n", con->engine ? con->engine : "", /* Storage engine we ran against */ ptr, /* Load type */ con->avg_timing / 1000, con->avg_timing % 1000, /* Time to load */ con->min_timing / 1000, con->min_timing % 1000, /* Min time */ con->max_timing / 1000, con->max_timing % 1000, /* Max time */ con->users, /* Children used */ con->avg_rows /* Queries run */ ); my_write(csv_file, (uchar*) buffer, (uint)strlen(buffer), MYF(0)); } void generate_stats(conclusions *con, option_string *eng, stats *sptr) { stats *ptr; unsigned int x; con->min_timing= sptr->timing; con->max_timing= sptr->timing; con->min_rows= sptr->rows; con->max_rows= sptr->rows; /* At the moment we assume uniform */ con->users= sptr->users; con->avg_rows= sptr->rows; /* With no next, we know it is the last element that was malloced */ for (ptr= sptr, x= 0; x < iterations; ptr++, x++) { con->avg_timing+= ptr->timing; if (ptr->timing > con->max_timing) con->max_timing= ptr->timing; if (ptr->timing < con->min_timing) con->min_timing= ptr->timing; } con->avg_timing= con->avg_timing/iterations; if (eng && eng->string) con->engine= eng->string; else con->engine= NULL; } void option_cleanup(option_string *stmt) { option_string *ptr, *nptr; if (!stmt) return; for (ptr= stmt; ptr; ptr= nptr) { nptr= ptr->next; my_free(ptr->string); my_free(ptr->option); my_free(ptr); } } void statement_cleanup(statement *stmt) { statement *ptr, *nptr; if (!stmt) return; for (ptr= stmt; ptr; ptr= nptr) { nptr= ptr->next; my_free(ptr->string); my_free(ptr); } } int slap_connect(MYSQL *mysql) { /* Connect to server */ static ulong connection_retry_sleep= 100000; /* Microseconds */ int x, connect_error= 1; for (x= 0; x < 10; x++) { if (mysql_real_connect(mysql, host, user, opt_password, create_schema_string, opt_mysql_port, opt_mysql_unix_port, connect_flags)) { /* Connect suceeded */ connect_error= 0; break; } my_sleep(connection_retry_sleep); } if (connect_error) { fprintf(stderr,"%s: Error when connecting to server: %d %s\n", my_progname, mysql_errno(mysql), mysql_error(mysql)); return 1; } return 0; }
./CrossVul/dataset_final_sorted/CWE-284/c/good_1571_9
crossvul-cpp_data_bad_5019_0
/* * vMTRR implementation * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright(C) 2015 Intel Corporation. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * Marcelo Tosatti <mtosatti@redhat.com> * Paolo Bonzini <pbonzini@redhat.com> * Xiao Guangrong <guangrong.xiao@linux.intel.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/kvm_host.h> #include <asm/mtrr.h> #include "cpuid.h" #include "mmu.h" #define IA32_MTRR_DEF_TYPE_E (1ULL << 11) #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10) #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; case 0x2f8: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else /* MTRR mask */ mask |= 0x7ff; if (data & mask) { kvm_inject_gp(vcpu, 0); return false; } return true; } EXPORT_SYMBOL_GPL(kvm_mtrr_valid); static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E); } static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE); } static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) { return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) { /* * Intel SDM 11.11.2.2: all MTRRs are disabled when * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC * memory type is applied to all of physical memory. * * However, virtual machines can be run with CPUID such that * there are no MTRRs. In that case, the firmware will never * enable MTRRs and it is obviously undesirable to run the * guest entirely with UC memory and we use WB. */ if (guest_cpuid_has_mtrr(vcpu)) return MTRR_TYPE_UNCACHABLE; else return MTRR_TYPE_WRBACK; } /* * Three terms are used in the following code: * - segment, it indicates the address segments covered by fixed MTRRs. * - unit, it corresponds to the MSR entry in the segment. * - range, a range is covered in one memory cache type. */ struct fixed_mtrr_segment { u64 start; u64 end; int range_shift; /* the start position in kvm_mtrr.fixed_ranges[]. */ int range_start; }; static struct fixed_mtrr_segment fixed_seg_table[] = { /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */ { .start = 0x0, .end = 0x80000, .range_shift = 16, /* 64K */ .range_start = 0, }, /* * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units, * 16K fixed mtrr. */ { .start = 0x80000, .end = 0xc0000, .range_shift = 14, /* 16K */ .range_start = 8, }, /* * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units, * 4K fixed mtrr. */ { .start = 0xc0000, .end = 0x100000, .range_shift = 12, /* 12K */ .range_start = 24, } }; /* * The size of unit is covered in one MSR, one MSR entry contains * 8 ranges so that unit size is always 8 * 2^range_shift. */ static u64 fixed_mtrr_seg_unit_size(int seg) { return 8 << fixed_seg_table[seg].range_shift; } static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) { switch (msr) { case MSR_MTRRfix64K_00000: *seg = 0; *unit = 0; break; case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: *seg = 1; *unit = msr - MSR_MTRRfix16K_80000; break; case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: *seg = 2; *unit = msr - MSR_MTRRfix4K_C0000; break; default: return false; } return true; } static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; u64 unit_size = fixed_mtrr_seg_unit_size(seg); *start = mtrr_seg->start + unit * unit_size; *end = *start + unit_size; WARN_ON(*end > mtrr_seg->end); } static int fixed_mtrr_seg_unit_range_index(int seg, int unit) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) > mtrr_seg->end); /* each unit has 8 ranges. */ return mtrr_seg->range_start + 8 * unit; } static int fixed_mtrr_seg_end_range_index(int seg) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; int n; n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift; return mtrr_seg->range_start + n - 1; } static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) { int seg, unit; if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) return false; fixed_mtrr_seg_unit_range(seg, unit, start, end); return true; } static int fixed_msr_to_range_index(u32 msr) { int seg, unit; if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) return -1; return fixed_mtrr_seg_unit_range_index(seg, unit); } static int fixed_mtrr_addr_to_seg(u64 addr) { struct fixed_mtrr_segment *mtrr_seg; int seg, seg_num = ARRAY_SIZE(fixed_seg_table); for (seg = 0; seg < seg_num; seg++) { mtrr_seg = &fixed_seg_table[seg]; if (mtrr_seg->start <= addr && addr < mtrr_seg->end) return seg; } return -1; } static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg) { struct fixed_mtrr_segment *mtrr_seg; int index; mtrr_seg = &fixed_seg_table[seg]; index = mtrr_seg->range_start; index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift; return index; } static u64 fixed_mtrr_range_end_addr(int seg, int index) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; int pos = index - mtrr_seg->range_start; return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift); } static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) { u64 mask; *start = range->base & PAGE_MASK; mask = range->mask & PAGE_MASK; /* This cannot overflow because writing to the reserved bits of * variable MTRRs causes a #GP. */ *end = (*start | ~mask) + 1; } static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; gfn_t start, end; int index; if (msr == MSR_IA32_CR_PAT || !tdp_enabled || !kvm_arch_has_noncoherent_dma(vcpu->kvm)) return; if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) return; /* fixed MTRRs. */ if (fixed_msr_to_range(msr, &start, &end)) { if (!fixed_mtrr_is_enabled(mtrr_state)) return; } else if (msr == MSR_MTRRdefType) { start = 0x0; end = ~0ULL; } else { /* variable range MTRRs. */ index = (msr - 0x200) / 2; var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end); } kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); } static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) { return (range->mask & (1 << 11)) != 0; } static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct kvm_mtrr_range *tmp, *cur; int index, is_mtrr_mask; index = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * index; cur = &mtrr_state->var_ranges[index]; /* remove the entry if it's in the list. */ if (var_mtrr_range_is_valid(cur)) list_del(&mtrr_state->var_ranges[index].node); /* Extend the mask with all 1 bits to the left, since those * bits must implicitly be 0. The bits are then cleared * when reading them. */ if (!is_mtrr_mask) cur->base = data; else cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); /* add it to the list if it's enabled. */ if (var_mtrr_range_is_valid(cur)) { list_for_each_entry(tmp, &mtrr_state->head, node) if (cur->base >= tmp->base) break; list_add_tail(&cur->node, &tmp->node); } } int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int index; if (!kvm_mtrr_valid(vcpu, msr, data)) return 1; index = fixed_msr_to_range_index(msr); if (index >= 0) *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; else if (msr == MSR_MTRRdefType) vcpu->arch.mtrr_state.deftype = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else set_var_mtrr_msr(vcpu, msr, data); update_mtrr(vcpu, msr); return 0; } int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { int index; /* MSR_MTRRcap is a readonly MSR. */ if (msr == MSR_MTRRcap) { /* * SMRR = 0 * WC = 1 * FIX = 1 * VCNT = KVM_NR_VAR_MTRR */ *pdata = 0x500 | KVM_NR_VAR_MTRR; return 0; } if (!msr_mtrr_valid(msr)) return 1; index = fixed_msr_to_range_index(msr); if (index >= 0) *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; else if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.deftype; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int is_mtrr_mask; index = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * index; if (!is_mtrr_mask) *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; else *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; } return 0; } void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) { INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); } struct mtrr_iter { /* input fields. */ struct kvm_mtrr *mtrr_state; u64 start; u64 end; /* output fields. */ int mem_type; /* mtrr is completely disabled? */ bool mtrr_disabled; /* [start, end) is not fully covered in MTRRs? */ bool partial_map; /* private fields. */ union { /* used for fixed MTRRs. */ struct { int index; int seg; }; /* used for var MTRRs. */ struct { struct kvm_mtrr_range *range; /* max address has been covered in var MTRRs. */ u64 start_max; }; }; bool fixed; }; static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter) { int seg, index; if (!fixed_mtrr_is_enabled(iter->mtrr_state)) return false; seg = fixed_mtrr_addr_to_seg(iter->start); if (seg < 0) return false; iter->fixed = true; index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); iter->index = index; iter->seg = seg; return true; } static bool match_var_range(struct mtrr_iter *iter, struct kvm_mtrr_range *range) { u64 start, end; var_mtrr_range(range, &start, &end); if (!(start >= iter->end || end <= iter->start)) { iter->range = range; /* * the function is called when we do kvm_mtrr.head walking. * Range has the minimum base address which interleaves * [looker->start_max, looker->end). */ iter->partial_map |= iter->start_max < start; /* update the max address has been covered. */ iter->start_max = max(iter->start_max, end); return true; } return false; } static void __mtrr_lookup_var_next(struct mtrr_iter *iter) { struct kvm_mtrr *mtrr_state = iter->mtrr_state; list_for_each_entry_continue(iter->range, &mtrr_state->head, node) if (match_var_range(iter, iter->range)) return; iter->range = NULL; iter->partial_map |= iter->start_max < iter->end; } static void mtrr_lookup_var_start(struct mtrr_iter *iter) { struct kvm_mtrr *mtrr_state = iter->mtrr_state; iter->fixed = false; iter->start_max = iter->start; iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); __mtrr_lookup_var_next(iter); } static void mtrr_lookup_fixed_next(struct mtrr_iter *iter) { /* terminate the lookup. */ if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) { iter->fixed = false; iter->range = NULL; return; } iter->index++; /* have looked up for all fixed MTRRs. */ if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges)) return mtrr_lookup_var_start(iter); /* switch to next segment. */ if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg)) iter->seg++; } static void mtrr_lookup_var_next(struct mtrr_iter *iter) { __mtrr_lookup_var_next(iter); } static void mtrr_lookup_start(struct mtrr_iter *iter) { if (!mtrr_is_enabled(iter->mtrr_state)) { iter->mtrr_disabled = true; return; } if (!mtrr_lookup_fixed_start(iter)) mtrr_lookup_var_start(iter); } static void mtrr_lookup_init(struct mtrr_iter *iter, struct kvm_mtrr *mtrr_state, u64 start, u64 end) { iter->mtrr_state = mtrr_state; iter->start = start; iter->end = end; iter->mtrr_disabled = false; iter->partial_map = false; iter->fixed = false; iter->range = NULL; mtrr_lookup_start(iter); } static bool mtrr_lookup_okay(struct mtrr_iter *iter) { if (iter->fixed) { iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index]; return true; } if (iter->range) { iter->mem_type = iter->range->base & 0xff; return true; } return false; } static void mtrr_lookup_next(struct mtrr_iter *iter) { if (iter->fixed) mtrr_lookup_fixed_next(iter); else mtrr_lookup_var_next(iter); } #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \ for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \ mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_)) u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct mtrr_iter iter; u64 start, end; int type = -1; const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK) | (1 << MTRR_TYPE_WRTHROUGH); start = gfn_to_gpa(gfn); end = start + PAGE_SIZE; mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { int curr_type = iter.mem_type; /* * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR * Precedences. */ if (type == -1) { type = curr_type; continue; } /* * If two or more variable memory ranges match and the * memory types are identical, then that memory type is * used. */ if (type == curr_type) continue; /* * If two or more variable memory ranges match and one of * the memory types is UC, the UC memory type used. */ if (curr_type == MTRR_TYPE_UNCACHABLE) return MTRR_TYPE_UNCACHABLE; /* * If two or more variable memory ranges match and the * memory types are WT and WB, the WT memory type is used. */ if (((1 << type) & wt_wb_mask) && ((1 << curr_type) & wt_wb_mask)) { type = MTRR_TYPE_WRTHROUGH; continue; } /* * For overlaps not defined by the above rules, processor * behavior is undefined. */ /* We use WB for this undefined behavior. :( */ return MTRR_TYPE_WRBACK; } if (iter.mtrr_disabled) return mtrr_disabled_type(vcpu); /* not contained in any MTRRs. */ if (type == -1) return mtrr_default_type(mtrr_state); /* * We just check one page, partially covered by MTRRs is * impossible. */ WARN_ON(iter.partial_map); return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct mtrr_iter iter; u64 start, end; int type = -1; start = gfn_to_gpa(gfn); end = gfn_to_gpa(gfn + page_num); mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { if (type == -1) { type = iter.mem_type; continue; } if (type != iter.mem_type) return false; } if (iter.mtrr_disabled) return true; if (!iter.partial_map) return true; if (type == -1) return true; return type == mtrr_default_type(mtrr_state); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5019_0
crossvul-cpp_data_bad_5093_0
/* * socket.c * * Copyright (C) 2012 Martin Szulecki <m.szulecki@libimobiledevice.org> * Copyright (C) 2012 Nikias Bassen <nikias@gmx.li> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <sys/stat.h> #ifdef WIN32 #include <winsock2.h> #include <windows.h> static int wsa_init = 0; #else #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <netdb.h> #include <arpa/inet.h> #endif #include "socket.h" #define RECV_TIMEOUT 20000 static int verbose = 0; void socket_set_verbose(int level) { verbose = level; } #ifndef WIN32 int socket_create_unix(const char *filename) { struct sockaddr_un name; int sock; size_t size; #ifdef SO_NOSIGPIPE int yes = 1; #endif // remove if still present unlink(filename); /* Create the socket. */ sock = socket(PF_LOCAL, SOCK_STREAM, 0); if (sock < 0) { perror("socket"); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sock, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sock); return -1; } #endif /* Bind a name to the socket. */ name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = '\0'; /* The size of the address is the offset of the start of the filename, plus its length, plus one for the terminating null byte. Alternatively you can just do: size = SUN_LEN (&name); */ size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (bind(sock, (struct sockaddr *) &name, size) < 0) { perror("bind"); socket_close(sock); return -1; } if (listen(sock, 10) < 0) { perror("listen"); socket_close(sock); return -1; } return sock; } int socket_connect_unix(const char *filename) { struct sockaddr_un name; int sfd = -1; size_t size; struct stat fst; #ifdef SO_NOSIGPIPE int yes = 1; #endif // check if socket file exists... if (stat(filename, &fst) != 0) { if (verbose >= 2) fprintf(stderr, "%s: stat '%s': %s\n", __func__, filename, strerror(errno)); return -1; } // ... and if it is a unix domain socket if (!S_ISSOCK(fst.st_mode)) { if (verbose >= 2) fprintf(stderr, "%s: File '%s' is not a socket!\n", __func__, filename); return -1; } // make a new socket if ((sfd = socket(PF_LOCAL, SOCK_STREAM, 0)) < 0) { if (verbose >= 2) fprintf(stderr, "%s: socket: %s\n", __func__, strerror(errno)); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #endif // and connect to 'filename' name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = 0; size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (connect(sfd, (struct sockaddr *) &name, size) < 0) { socket_close(sfd); if (verbose >= 2) fprintf(stderr, "%s: connect: %s\n", __func__, strerror(errno)); return -1; } return sfd; } #endif int socket_create(uint16_t port) { int sfd = -1; int yes = 1; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif struct sockaddr_in saddr; if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #endif memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = htonl(INADDR_ANY); saddr.sin_port = htons(port); if (0 > bind(sfd, (struct sockaddr *) &saddr, sizeof(saddr))) { perror("bind()"); socket_close(sfd); return -1; } if (listen(sfd, 1) == -1) { perror("listen()"); socket_close(sfd); return -1; } return sfd; } int socket_connect(const char *addr, uint16_t port) { int sfd = -1; int yes = 1; struct hostent *hp; struct sockaddr_in saddr; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif if (!addr) { errno = EINVAL; return -1; } if ((hp = gethostbyname(addr)) == NULL) { if (verbose >= 2) fprintf(stderr, "%s: unknown host '%s'\n", __func__, addr); return -1; } if (!hp->h_addr) { if (verbose >= 2) fprintf(stderr, "%s: gethostbyname returned NULL address!\n", __func__); return -1; } if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #endif memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = *(uint32_t *) hp->h_addr; saddr.sin_port = htons(port); if (connect(sfd, (struct sockaddr *) &saddr, sizeof(saddr)) < 0) { perror("connect"); socket_close(sfd); return -2; } return sfd; } int socket_check_fd(int fd, fd_mode fdm, unsigned int timeout) { fd_set fds; int sret; int eagain; struct timeval to; struct timeval *pto; if (fd < 0) { if (verbose >= 2) fprintf(stderr, "ERROR: invalid fd in check_fd %d\n", fd); return -1; } FD_ZERO(&fds); FD_SET(fd, &fds); if (timeout > 0) { to.tv_sec = (time_t) (timeout / 1000); to.tv_usec = (time_t) ((timeout - (to.tv_sec * 1000)) * 1000); pto = &to; } else { pto = NULL; } sret = -1; do { eagain = 0; switch (fdm) { case FDM_READ: sret = select(fd + 1, &fds, NULL, NULL, pto); break; case FDM_WRITE: sret = select(fd + 1, NULL, &fds, NULL, pto); break; case FDM_EXCEPT: sret = select(fd + 1, NULL, NULL, &fds, pto); break; default: return -1; } if (sret < 0) { switch (errno) { case EINTR: // interrupt signal in select if (verbose >= 2) fprintf(stderr, "%s: EINTR\n", __func__); eagain = 1; break; case EAGAIN: if (verbose >= 2) fprintf(stderr, "%s: EAGAIN\n", __func__); break; default: if (verbose >= 2) fprintf(stderr, "%s: select failed: %s\n", __func__, strerror(errno)); return -1; } } } while (eagain); return sret; } int socket_accept(int fd, uint16_t port) { #ifdef WIN32 int addr_len; #else socklen_t addr_len; #endif int result; struct sockaddr_in addr; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_ANY); addr.sin_port = htons(port); addr_len = sizeof(addr); result = accept(fd, (struct sockaddr*)&addr, &addr_len); return result; } int socket_shutdown(int fd, int how) { return shutdown(fd, how); } int socket_close(int fd) { #ifdef WIN32 return closesocket(fd); #else return close(fd); #endif } int socket_receive(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, 0, RECV_TIMEOUT); } int socket_peek(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, MSG_PEEK, RECV_TIMEOUT); } int socket_receive_timeout(int fd, void *data, size_t length, int flags, unsigned int timeout) { int res; int result; // check if data is available res = socket_check_fd(fd, FDM_READ, timeout); if (res <= 0) { return res; } // if we get here, there _is_ data available result = recv(fd, data, length, flags); if (res > 0 && result == 0) { // but this is an error condition if (verbose >= 3) fprintf(stderr, "%s: fd=%d recv returned 0\n", __func__, fd); return -EAGAIN; } if (result < 0) { return -errno; } return result; } int socket_send(int fd, void *data, size_t length) { int flags = 0; #ifdef MSG_NOSIGNAL flags |= MSG_NOSIGNAL; #endif return send(fd, data, length, flags); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5093_0
crossvul-cpp_data_bad_880_3
/* * Copyright (C) 2014-2019 Firejail Authors * * This file is part of firejail project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "firejail.h" #include <sys/mount.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/prctl.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/types.h> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <sched.h> #ifndef CLONE_NEWUSER #define CLONE_NEWUSER 0x10000000 #endif #include <sys/prctl.h> #ifndef PR_SET_NO_NEW_PRIVS # define PR_SET_NO_NEW_PRIVS 38 #endif #ifndef PR_GET_NO_NEW_PRIVS # define PR_GET_NO_NEW_PRIVS 39 #endif #ifdef HAVE_APPARMOR #include <sys/apparmor.h> #endif #include <syscall.h> static int force_nonewprivs = 0; static int monitored_pid = 0; static void sandbox_handler(int sig){ usleep(10000); // don't race to print a message fmessage("\nChild received signal %d, shutting down the sandbox...\n", sig); // broadcast sigterm to all processes in the group kill(-1, SIGTERM); sleep(1); if (monitored_pid) { int monsec = 9; char *monfile; if (asprintf(&monfile, "/proc/%d/cmdline", monitored_pid) == -1) errExit("asprintf"); while (monsec) { FILE *fp = fopen(monfile, "r"); if (!fp) break; char c; size_t count = fread(&c, 1, 1, fp); fclose(fp); if (count == 0) break; if (arg_debug) printf("Waiting on PID %d to finish\n", monitored_pid); sleep(1); monsec--; } free(monfile); } // broadcast a SIGKILL kill(-1, SIGKILL); flush_stdin(); exit(sig); } static void install_handler(void) { struct sigaction sga; // block SIGTERM while handling SIGINT sigemptyset(&sga.sa_mask); sigaddset(&sga.sa_mask, SIGTERM); sga.sa_handler = sandbox_handler; sga.sa_flags = 0; sigaction(SIGINT, &sga, NULL); // block SIGINT while handling SIGTERM sigemptyset(&sga.sa_mask); sigaddset(&sga.sa_mask, SIGINT); sga.sa_handler = sandbox_handler; sga.sa_flags = 0; sigaction(SIGTERM, &sga, NULL); } static void set_caps(void) { if (arg_caps_drop_all) caps_drop_all(); else if (arg_caps_drop) caps_drop_list(arg_caps_list); else if (arg_caps_keep) caps_keep_list(arg_caps_list); else if (arg_caps_default_filter) caps_default_filter(); // drop discretionary access control capabilities for root sandboxes // if caps.keep, the user has to set it manually in the list if (!arg_caps_keep) caps_drop_dac_override(); } static void save_nogroups(void) { if (arg_nogroups == 0) return; FILE *fp = fopen(RUN_GROUPS_CFG, "w"); if (fp) { fprintf(fp, "\n"); SET_PERMS_STREAM(fp, 0, 0, 0644); // assume mode 0644 fclose(fp); } else { fprintf(stderr, "Error: cannot save nogroups state\n"); exit(1); } } static void save_nonewprivs(void) { if (arg_nonewprivs == 0) return; FILE *fp = fopen(RUN_NONEWPRIVS_CFG, "wxe"); if (fp) { fprintf(fp, "\n"); SET_PERMS_STREAM(fp, 0, 0, 0644); // assume mode 0644 fclose(fp); } else { fprintf(stderr, "Error: cannot save nonewprivs state\n"); exit(1); } } static void save_umask(void) { FILE *fp = fopen(RUN_UMASK_FILE, "wxe"); if (fp) { fprintf(fp, "%o\n", orig_umask); SET_PERMS_STREAM(fp, 0, 0, 0644); // assume mode 0644 fclose(fp); } else { fprintf(stderr, "Error: cannot save umask\n"); exit(1); } } static FILE *create_ready_for_join_file(void) { FILE *fp = fopen(RUN_READY_FOR_JOIN, "wxe"); if (fp) { ASSERT_PERMS_STREAM(fp, 0, 0, 0644); return fp; } else { fprintf(stderr, "Error: cannot create %s\n", RUN_READY_FOR_JOIN); exit(1); } } static void sandbox_if_up(Bridge *br) { assert(br); if (!br->configured) return; char *dev = br->devsandbox; net_if_up(dev); if (br->arg_ip_none == 1); // do nothing else if (br->arg_ip_none == 0 && br->macvlan == 0) { if (br->ipsandbox == br->ip) { fprintf(stderr, "Error: %d.%d.%d.%d is interface %s address.\n", PRINT_IP(br->ipsandbox), br->dev); exit(1); } // just assign the address assert(br->ipsandbox); if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(br->ipsandbox), dev); net_config_interface(dev, br->ipsandbox, br->mask, br->mtu); arp_announce(dev, br); } else if (br->arg_ip_none == 0 && br->macvlan == 1) { // reassign the macvlan address if (br->ipsandbox == 0) // ip address assigned by arp-scan for a macvlan device br->ipsandbox = arp_assign(dev, br); //br->ip, br->mask); else { if (br->ipsandbox == br->ip) { fprintf(stderr, "Error: %d.%d.%d.%d is interface %s address.\n", PRINT_IP(br->ipsandbox), br->dev); exit(1); } uint32_t rv = arp_check(dev, br->ipsandbox); if (rv) { fprintf(stderr, "Error: the address %d.%d.%d.%d is already in use.\n", PRINT_IP(br->ipsandbox)); exit(1); } } if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(br->ipsandbox), dev); net_config_interface(dev, br->ipsandbox, br->mask, br->mtu); arp_announce(dev, br); } if (br->ip6sandbox) net_if_ip6(dev, br->ip6sandbox); } static void chk_chroot(void) { // if we are starting firejail inside some other container technology, we don't care about this char *mycont = getenv("container"); if (mycont) return; // check if this is a regular chroot struct stat s; if (stat("/", &s) == 0) { if (s.st_ino != 2) return; } fprintf(stderr, "Error: cannot mount filesystem as slave\n"); exit(1); } static int monitor_application(pid_t app_pid) { EUID_ASSERT(); monitored_pid = app_pid; // block signals and install handler sigset_t oldmask, newmask; sigemptyset(&oldmask); sigemptyset(&newmask); sigaddset(&newmask, SIGTERM); sigaddset(&newmask, SIGINT); sigprocmask(SIG_BLOCK, &newmask, &oldmask); install_handler(); // handle --timeout int options = 0;; unsigned timeout = 0; if (cfg.timeout) { options = WNOHANG; timeout = cfg.timeout; } int status = 0; while (monitored_pid) { usleep(20000); char *msg; if (asprintf(&msg, "monitoring pid %d\n", monitored_pid) == -1) errExit("asprintf"); logmsg(msg); if (arg_debug) printf("%s\n", msg); free(msg); pid_t rv; do { // handle signals asynchronously sigprocmask(SIG_SETMASK, &oldmask, NULL); rv = waitpid(-1, &status, options); // block signals again sigprocmask(SIG_BLOCK, &newmask, NULL); if (rv == -1) { // we can get here if we have processes joining the sandbox (ECHILD) sleep(1); break; } // handle --timeout if (options) { if (--timeout == 0) { kill(-1, SIGTERM); sleep(1); flush_stdin(); _exit(1); } else sleep(1); } } while(rv != monitored_pid); if (arg_debug) printf("Sandbox monitor: waitpid %d retval %d status %d\n", monitored_pid, rv, status); DIR *dir; if (!(dir = opendir("/proc"))) { // sleep 2 seconds and try again sleep(2); if (!(dir = opendir("/proc"))) { fprintf(stderr, "Error: cannot open /proc directory\n"); exit(1); } } struct dirent *entry; monitored_pid = 0; while ((entry = readdir(dir)) != NULL) { unsigned pid; if (sscanf(entry->d_name, "%u", &pid) != 1) continue; if (pid == 1) continue; // todo: make this generic // Dillo browser leaves a dpid process running, we need to shut it down int found = 0; if (strcmp(cfg.command_name, "dillo") == 0) { char *pidname = pid_proc_comm(pid); if (pidname && strcmp(pidname, "dpid") == 0) found = 1; free(pidname); } if (found) break; monitored_pid = pid; break; } closedir(dir); if (monitored_pid != 0 && arg_debug) printf("Sandbox monitor: monitoring %d\n", monitored_pid); } // return the latest exit status. return status; } static void print_time(void) { if (start_timestamp) { unsigned long long end_timestamp = getticks(); // measure 1 ms usleep(1000); unsigned long long onems = getticks() - end_timestamp; if (onems) { fmessage("Child process initialized in %.02f ms\n", (float) (end_timestamp - start_timestamp) / (float) onems); return; } } fmessage("Child process initialized\n"); } // check execute permissions for the program // this is done typically by the shell // we are here because of --shell=none // we duplicate execvp functionality (man execvp): // [...] if the specified // filename does not contain a slash (/) character. The file is sought // in the colon-separated list of directory pathnames specified in the // PATH environment variable. static int ok_to_run(const char *program) { if (strstr(program, "/")) { if (access(program, X_OK) == 0) // it will also dereference symlinks return 1; } else { // search $PATH char *path1 = getenv("PATH"); if (path1) { if (arg_debug) printf("Searching $PATH for %s\n", program); char *path2 = strdup(path1); if (!path2) errExit("strdup"); // use path2 to count the entries char *ptr = strtok(path2, ":"); while (ptr) { char *fname; if (asprintf(&fname, "%s/%s", ptr, program) == -1) errExit("asprintf"); if (arg_debug) printf("trying #%s#\n", fname); struct stat s; int rv = stat(fname, &s); if (rv == 0) { if (access(fname, X_OK) == 0) { free(path2); free(fname); return 1; } else fprintf(stderr, "Error: execute permission denied for %s\n", fname); free(fname); break; } free(fname); ptr = strtok(NULL, ":"); } free(path2); } } return 0; } void start_application(int no_sandbox, FILE *fp) { // set environment if (no_sandbox == 0) { env_defaults(); env_apply(); } // restore original umask umask(orig_umask); if (arg_debug) { printf("starting application\n"); printf("LD_PRELOAD=%s\n", getenv("LD_PRELOAD")); } //**************************************** // audit //**************************************** if (arg_audit) { assert(arg_audit_prog); if (fp) { fprintf(fp, "ready\n"); fclose(fp); } #ifdef HAVE_GCOV __gcov_dump(); #endif #ifdef HAVE_SECCOMP seccomp_install_filters(); #endif execl(arg_audit_prog, arg_audit_prog, NULL); perror("execl"); exit(1); } //**************************************** // start the program without using a shell //**************************************** else if (arg_shell_none) { if (arg_debug) { int i; for (i = cfg.original_program_index; i < cfg.original_argc; i++) { if (cfg.original_argv[i] == NULL) break; printf("execvp argument %d: %s\n", i - cfg.original_program_index, cfg.original_argv[i]); } } if (cfg.original_program_index == 0) { fprintf(stderr, "Error: --shell=none configured, but no program specified\n"); exit(1); } if (!arg_command && !arg_quiet) print_time(); int rv = ok_to_run(cfg.original_argv[cfg.original_program_index]); if (fp) { fprintf(fp, "ready\n"); fclose(fp); } #ifdef HAVE_GCOV __gcov_dump(); #endif #ifdef HAVE_SECCOMP seccomp_install_filters(); #endif if (rv) execvp(cfg.original_argv[cfg.original_program_index], &cfg.original_argv[cfg.original_program_index]); else fprintf(stderr, "Error: no suitable %s executable found\n", cfg.original_argv[cfg.original_program_index]); exit(1); } //**************************************** // start the program using a shell //**************************************** else { assert(cfg.shell); assert(cfg.command_line); char *arg[5]; int index = 0; arg[index++] = cfg.shell; if (login_shell) { arg[index++] = "-l"; if (arg_debug) printf("Starting %s login shell\n", cfg.shell); } else { arg[index++] = "-c"; if (arg_debug) printf("Running %s command through %s\n", cfg.command_line, cfg.shell); if (arg_doubledash) arg[index++] = "--"; arg[index++] = cfg.command_line; } arg[index] = NULL; assert(index < 5); if (arg_debug) { char *msg; if (asprintf(&msg, "sandbox %d, execvp into %s", sandbox_pid, cfg.command_line) == -1) errExit("asprintf"); logmsg(msg); free(msg); } if (arg_debug) { int i; for (i = 0; i < 5; i++) { if (arg[i] == NULL) break; printf("execvp argument %d: %s\n", i, arg[i]); } } if (!arg_command && !arg_quiet) print_time(); if (fp) { fprintf(fp, "ready\n"); fclose(fp); } #ifdef HAVE_GCOV __gcov_dump(); #endif #ifdef HAVE_SECCOMP seccomp_install_filters(); #endif execvp(arg[0], arg); } perror("execvp"); exit(1); // it should never get here!!! } static void enforce_filters(void) { // enforce NO_NEW_PRIVS arg_nonewprivs = 1; force_nonewprivs = 1; // disable all capabilities fmessage("\n** Warning: dropping all Linux capabilities **\n"); arg_caps_drop_all = 1; // drop all supplementary groups; /etc/group file inside chroot // is controlled by a regular usr arg_nogroups = 1; } int sandbox(void* sandbox_arg) { // Get rid of unused parameter warning (void)sandbox_arg; pid_t child_pid = getpid(); if (arg_debug) printf("Initializing child process\n"); // close each end of the unused pipes close(parent_to_child_fds[1]); close(child_to_parent_fds[0]); // wait for parent to do base setup wait_for_other(parent_to_child_fds[0]); if (arg_debug && child_pid == 1) printf("PID namespace installed\n"); //**************************** // set hostname //**************************** if (cfg.hostname) { if (sethostname(cfg.hostname, strlen(cfg.hostname)) < 0) errExit("sethostname"); } //**************************** // mount namespace //**************************** // mount events are not forwarded between the host the sandbox if (mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL) < 0) { chk_chroot(); } // ... and mount a tmpfs on top of /run/firejail/mnt directory preproc_mount_mnt_dir(); // bind-mount firejail binaries and helper programs if (mount(LIBDIR "/firejail", RUN_FIREJAIL_LIB_DIR, "none", MS_BIND, NULL) < 0) errExit("mounting " RUN_FIREJAIL_LIB_DIR); //**************************** // log sandbox data //**************************** if (cfg.name) fs_logger2("sandbox name:", cfg.name); fs_logger2int("sandbox pid:", (int) sandbox_pid); if (cfg.chrootdir) fs_logger("sandbox filesystem: chroot"); else if (arg_overlay) fs_logger("sandbox filesystem: overlay"); else fs_logger("sandbox filesystem: local"); fs_logger("install mount namespace"); //**************************** // netfilter //**************************** if (arg_netfilter && any_bridge_configured()) { // assuming by default the client filter netfilter(arg_netfilter_file); } if (arg_netfilter6 && any_bridge_configured()) { // assuming by default the client filter netfilter6(arg_netfilter6_file); } //**************************** // networking //**************************** int gw_cfg_failed = 0; // default gw configuration flag if (arg_nonetwork) { net_if_up("lo"); if (arg_debug) printf("Network namespace enabled, only loopback interface available\n"); } else if (arg_netns) { netns(arg_netns); if (arg_debug) printf("Network namespace '%s' activated\n", arg_netns); } else if (any_bridge_configured() || any_interface_configured()) { // configure lo and eth0...eth3 net_if_up("lo"); if (mac_not_zero(cfg.bridge0.macsandbox)) net_config_mac(cfg.bridge0.devsandbox, cfg.bridge0.macsandbox); sandbox_if_up(&cfg.bridge0); if (mac_not_zero(cfg.bridge1.macsandbox)) net_config_mac(cfg.bridge1.devsandbox, cfg.bridge1.macsandbox); sandbox_if_up(&cfg.bridge1); if (mac_not_zero(cfg.bridge2.macsandbox)) net_config_mac(cfg.bridge2.devsandbox, cfg.bridge2.macsandbox); sandbox_if_up(&cfg.bridge2); if (mac_not_zero(cfg.bridge3.macsandbox)) net_config_mac(cfg.bridge3.devsandbox, cfg.bridge3.macsandbox); sandbox_if_up(&cfg.bridge3); // moving an interface in a namespace using --interface will reset the interface configuration; // we need to put the configuration back if (cfg.interface0.configured && cfg.interface0.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface0.ip), cfg.interface0.dev); net_config_interface(cfg.interface0.dev, cfg.interface0.ip, cfg.interface0.mask, cfg.interface0.mtu); } if (cfg.interface1.configured && cfg.interface1.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface1.ip), cfg.interface1.dev); net_config_interface(cfg.interface1.dev, cfg.interface1.ip, cfg.interface1.mask, cfg.interface1.mtu); } if (cfg.interface2.configured && cfg.interface2.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface2.ip), cfg.interface2.dev); net_config_interface(cfg.interface2.dev, cfg.interface2.ip, cfg.interface2.mask, cfg.interface2.mtu); } if (cfg.interface3.configured && cfg.interface3.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface3.ip), cfg.interface3.dev); net_config_interface(cfg.interface3.dev, cfg.interface3.ip, cfg.interface3.mask, cfg.interface3.mtu); } // add a default route if (cfg.defaultgw) { // set the default route if (net_add_route(0, 0, cfg.defaultgw)) { fwarning("cannot configure default route\n"); gw_cfg_failed = 1; } } if (arg_debug) printf("Network namespace enabled\n"); } // print network configuration if (!arg_quiet) { if (any_bridge_configured() || any_interface_configured() || cfg.defaultgw || cfg.dns1) { fmessage("\n"); if (any_bridge_configured() || any_interface_configured()) { if (arg_scan) sbox_run(SBOX_ROOT | SBOX_CAPS_NETWORK | SBOX_SECCOMP, 3, PATH_FNET, "printif", "scan"); else sbox_run(SBOX_ROOT | SBOX_CAPS_NETWORK | SBOX_SECCOMP, 2, PATH_FNET, "printif"); } if (cfg.defaultgw != 0) { if (gw_cfg_failed) fmessage("Default gateway configuration failed\n"); else fmessage("Default gateway %d.%d.%d.%d\n", PRINT_IP(cfg.defaultgw)); } if (cfg.dns1 != NULL) fmessage("DNS server %s\n", cfg.dns1); if (cfg.dns2 != NULL) fmessage("DNS server %s\n", cfg.dns2); if (cfg.dns3 != NULL) fmessage("DNS server %s\n", cfg.dns3); if (cfg.dns4 != NULL) fmessage("DNS server %s\n", cfg.dns4); fmessage("\n"); } } // load IBUS env variables if (arg_nonetwork || any_bridge_configured() || any_interface_configured()) { // do nothing - there are problems with ibus version 1.5.11 } else { EUID_USER(); env_ibus_load(); EUID_ROOT(); } //**************************** // fs pre-processing: // - build seccomp filters // - create an empty /etc/ld.so.preload //**************************** #ifdef HAVE_SECCOMP if (cfg.protocol) { if (arg_debug) printf("Build protocol filter: %s\n", cfg.protocol); // build the seccomp filter as a regular user int rv = sbox_run(SBOX_USER | SBOX_CAPS_NONE | SBOX_SECCOMP, 5, PATH_FSECCOMP, "protocol", "build", cfg.protocol, RUN_SECCOMP_PROTOCOL); if (rv) exit(rv); } if (arg_seccomp && (cfg.seccomp_list || cfg.seccomp_list_drop || cfg.seccomp_list_keep)) arg_seccomp_postexec = 1; #endif // need ld.so.preload if tracing or seccomp with any non-default lists bool need_preload = arg_trace || arg_tracelog || arg_seccomp_postexec; // for --appimage, --chroot and --overlay* we force NO_NEW_PRIVS // and drop all capabilities if (getuid() != 0 && (arg_appimage || cfg.chrootdir || arg_overlay)) { enforce_filters(); need_preload = arg_trace || arg_tracelog; } // trace pre-install if (need_preload) fs_trace_preload(); // store hosts file if (cfg.hosts_file) fs_store_hosts_file(); //**************************** // configure filesystem //**************************** #ifdef HAVE_CHROOT if (cfg.chrootdir) { fs_chroot(cfg.chrootdir); //**************************** // trace pre-install, this time inside chroot //**************************** if (need_preload) fs_trace_preload(); } else #endif #ifdef HAVE_OVERLAYFS if (arg_overlay) fs_overlayfs(); else #endif fs_basic_fs(); //**************************** // private mode //**************************** if (arg_private) { if (cfg.home_private) { // --private= if (cfg.chrootdir) fwarning("private=directory feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private=directory feature is disabled in overlay\n"); else fs_private_homedir(); } else if (cfg.home_private_keep) { // --private-home= if (cfg.chrootdir) fwarning("private-home= feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-home= feature is disabled in overlay\n"); else fs_private_home_list(); } else // --private fs_private(); } if (arg_private_dev) fs_private_dev(); if (arg_private_etc) { if (cfg.chrootdir) fwarning("private-etc feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-etc feature is disabled in overlay\n"); else { fs_private_dir_list("/etc", RUN_ETC_DIR, cfg.etc_private_keep); // create /etc/ld.so.preload file again if (need_preload) fs_trace_preload(); } } if (arg_private_opt) { if (cfg.chrootdir) fwarning("private-opt feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-opt feature is disabled in overlay\n"); else { fs_private_dir_list("/opt", RUN_OPT_DIR, cfg.opt_private_keep); } } if (arg_private_srv) { if (cfg.chrootdir) fwarning("private-srv feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-srv feature is disabled in overlay\n"); else { fs_private_dir_list("/srv", RUN_SRV_DIR, cfg.srv_private_keep); } } // private-bin is disabled for appimages if (arg_private_bin && !arg_appimage) { if (cfg.chrootdir) fwarning("private-bin feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-bin feature is disabled in overlay\n"); else { // for --x11=xorg we need to add xauth command if (arg_x11_xorg) { EUID_USER(); char *tmp; if (asprintf(&tmp, "%s,xauth", cfg.bin_private_keep) == -1) errExit("asprintf"); cfg.bin_private_keep = tmp; EUID_ROOT(); } fs_private_bin_list(); } } // private-lib is disabled for appimages if (arg_private_lib && !arg_appimage) { if (cfg.chrootdir) fwarning("private-lib feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-lib feature is disabled in overlay\n"); else { fs_private_lib(); } } if (arg_private_cache) { if (cfg.chrootdir) fwarning("private-cache feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-cache feature is disabled in overlay\n"); else fs_private_cache(); } if (arg_private_tmp) { // private-tmp is implemented as a whitelist EUID_USER(); fs_private_tmp(); EUID_ROOT(); } //**************************** // Session D-BUS //**************************** if (arg_nodbus) dbus_session_disable(); //**************************** // hosts and hostname //**************************** if (cfg.hostname) fs_hostname(cfg.hostname); if (cfg.hosts_file) fs_mount_hosts_file(); //**************************** // /etc overrides from the network namespace //**************************** if (arg_netns) netns_mounts(arg_netns); //**************************** // update /proc, /sys, /dev, /boot directory //**************************** fs_proc_sys_dev_boot(); //**************************** // handle /mnt and /media //**************************** if (checkcfg(CFG_DISABLE_MNT)) fs_mnt(1); else if (arg_disable_mnt) fs_mnt(0); //**************************** // apply the profile file //**************************** // apply all whitelist commands ... fs_whitelist(); // ... followed by blacklist commands fs_blacklist(); // mkdir and mkfile are processed all over again //**************************** // nosound/no3d/notv/novideo and fix for pulseaudio 7.0 //**************************** if (arg_nosound) { // disable pulseaudio pulseaudio_disable(); // disable /dev/snd fs_dev_disable_sound(); } else if (!arg_noautopulse) pulseaudio_init(); if (arg_no3d) fs_dev_disable_3d(); if (arg_notv) fs_dev_disable_tv(); if (arg_nodvd) fs_dev_disable_dvd(); if (arg_nou2f) fs_dev_disable_u2f(); if (arg_novideo) fs_dev_disable_video(); //**************************** // install trace //**************************** if (need_preload) fs_trace(); //**************************** // set dns //**************************** fs_resolvconf(); //**************************** // fs post-processing //**************************** fs_logger_print(); fs_logger_change_owner(); //**************************** // set application environment //**************************** EUID_USER(); int cwd = 0; if (cfg.cwd) { if (chdir(cfg.cwd) == 0) cwd = 1; } if (!cwd) { if (chdir("/") < 0) errExit("chdir"); if (cfg.homedir) { struct stat s; if (stat(cfg.homedir, &s) == 0) { /* coverity[toctou] */ if (chdir(cfg.homedir) < 0) errExit("chdir"); } } } if (arg_debug) { char *cpath = get_current_dir_name(); if (cpath) { printf("Current directory: %s\n", cpath); free(cpath); } } EUID_ROOT(); // clean /tmp/.X11-unix sockets fs_x11(); if (arg_x11_xorg) x11_xorg(); // save original umask save_umask(); //**************************** // set security filters //**************************** // save state of nonewprivs save_nonewprivs(); // set capabilities set_caps(); // save cpu affinity mask to CPU_CFG file save_cpu(); // save cgroup in CGROUP_CFG file save_cgroup(); // set seccomp #ifdef HAVE_SECCOMP // install protocol filter #ifdef SYS_socket if (cfg.protocol) { if (arg_debug) printf("Install protocol filter: %s\n", cfg.protocol); seccomp_load(RUN_SECCOMP_PROTOCOL); // install filter protocol_filter_save(); // save filter in RUN_PROTOCOL_CFG } else { int rv = unlink(RUN_SECCOMP_PROTOCOL); (void) rv; } #endif // if a keep list is available, disregard the drop list if (arg_seccomp == 1) { if (cfg.seccomp_list_keep) seccomp_filter_keep(); else seccomp_filter_drop(); } else { // clean seccomp files under /run/firejail/mnt int rv = unlink(RUN_SECCOMP_CFG); rv |= unlink(RUN_SECCOMP_32); (void) rv; } if (arg_memory_deny_write_execute) { if (arg_debug) printf("Install memory write&execute filter\n"); seccomp_load(RUN_SECCOMP_MDWX); // install filter } else { int rv = unlink(RUN_SECCOMP_MDWX); (void) rv; } #endif //**************************************** // communicate progress of sandbox set up // to --join //**************************************** FILE *rj = create_ready_for_join_file(); //**************************************** // create a new user namespace // - too early to drop privileges //**************************************** save_nogroups(); if (arg_noroot) { int rv = unshare(CLONE_NEWUSER); if (rv == -1) { fwarning("cannot create a new user namespace, going forward without it...\n"); arg_noroot = 0; } } // notify parent that new user namespace has been created so a proper // UID/GID map can be setup notify_other(child_to_parent_fds[1]); close(child_to_parent_fds[1]); // wait for parent to finish setting up a proper UID/GID map wait_for_other(parent_to_child_fds[0]); close(parent_to_child_fds[0]); // somehow, the new user namespace resets capabilities; // we need to do them again if (arg_noroot) { if (arg_debug) printf("noroot user namespace installed\n"); set_caps(); } //**************************************** // Set NO_NEW_PRIVS if desired //**************************************** if (arg_nonewprivs) { prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); if (prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { fwarning("cannot set NO_NEW_PRIVS, it requires a Linux kernel version 3.5 or newer.\n"); if (force_nonewprivs) { fprintf(stderr, "Error: NO_NEW_PRIVS required for this sandbox, exiting ...\n"); exit(1); } } else if (arg_debug) printf("NO_NEW_PRIVS set\n"); } //**************************************** // drop privileges //**************************************** drop_privs(arg_nogroups); // kill the sandbox in case the parent died prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); //**************************************** // set cpu affinity //**************************************** if (cfg.cpus) set_cpu_affinity(); //**************************************** // fork the application and monitor it //**************************************** pid_t app_pid = fork(); if (app_pid == -1) errExit("fork"); if (app_pid == 0) { #ifdef HAVE_APPARMOR if (checkcfg(CFG_APPARMOR) && arg_apparmor) { errno = 0; if (aa_change_onexec("firejail-default")) { fwarning("Cannot confine the application using AppArmor.\n" "Maybe firejail-default AppArmor profile is not loaded into the kernel.\n" "As root, run \"aa-enforce firejail-default\" to load it.\n"); } else if (arg_debug) printf("AppArmor enabled\n"); } #endif // set nice and rlimits if (arg_nice) set_nice(cfg.nice); set_rlimits(); start_application(0, rj); } fclose(rj); int status = monitor_application(app_pid); // monitor application flush_stdin(); if (WIFEXITED(status)) { // if we had a proper exit, return that exit status return WEXITSTATUS(status); } else { // something else went wrong! return -1; } }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_880_3
crossvul-cpp_data_bad_1571_8
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Show databases, tables or columns */ #define SHOW_VERSION "9.10" #include "client_priv.h" #include "my_default.h" #include <my_sys.h> #include <m_string.h> #include <mysql.h> #include <mysqld_error.h> #include <signal.h> #include <stdarg.h> #include <sslopt-vars.h> #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ static char * host=0, *opt_password=0, *user=0; static my_bool opt_show_keys= 0, opt_compress= 0, opt_count=0, opt_status= 0; static my_bool tty_password= 0, opt_table_type= 0; static my_bool debug_info_flag= 0, debug_check_flag= 0; static uint my_end_arg= 0; static uint opt_verbose=0; static char *default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME; static char *opt_plugin_dir= 0, *opt_default_auth= 0; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static uint opt_protocol=0; static char *opt_bind_addr = NULL; static void get_options(int *argc,char ***argv); static uint opt_mysql_port=0; static int list_dbs(MYSQL *mysql,const char *wild); static int list_tables(MYSQL *mysql,const char *db,const char *table); static int list_table_status(MYSQL *mysql,const char *db,const char *table); static int list_fields(MYSQL *mysql,const char *db,const char *table, const char *field); static void print_header(const char *header,uint head_length,...); static void print_row(const char *header,uint head_length,...); static void print_trailer(uint length,...); static void print_res_header(MYSQL_RES *result); static void print_res_top(MYSQL_RES *result); static void print_res_row(MYSQL_RES *result,MYSQL_ROW cur); static const char *load_default_groups[]= { "mysqlshow","client",0 }; static char * opt_mysql_unix_port=0; int main(int argc, char **argv) { int error; my_bool first_argument_uses_wildcards=0; char *wild; MYSQL mysql; MY_INIT(argv[0]); my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,&argc,&argv)) exit(1); my_getopt_use_args_separator= FALSE; get_options(&argc,&argv); wild=0; if (argc) { char *pos= argv[argc-1], *to; for (to= pos ; *pos ; pos++, to++) { switch (*pos) { case '*': *pos= '%'; first_argument_uses_wildcards= 1; break; case '?': *pos= '_'; first_argument_uses_wildcards= 1; break; case '%': case '_': first_argument_uses_wildcards= 1; break; case '\\': pos++; default: break; } *to= *pos; } *to= *pos; /* just to copy a '\0' if '\\' was used */ } if (first_argument_uses_wildcards) wild= argv[--argc]; else if (argc == 3) /* We only want one field */ wild= argv[--argc]; if (argc > 2) { fprintf(stderr,"%s: Too many arguments\n",my_progname); exit(1); } mysql_init(&mysql); if (opt_compress) mysql_options(&mysql,MYSQL_OPT_COMPRESS,NullS); #ifdef HAVE_OPENSSL if (opt_use_ssl) { mysql_ssl_set(&mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher); mysql_options(&mysql, MYSQL_OPT_SSL_CRL, opt_ssl_crl); mysql_options(&mysql, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath); } mysql_options(&mysql,MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (char*)&opt_ssl_verify_server_cert); #endif if (opt_protocol) mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(&mysql,MYSQL_OPT_BIND,opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset); if (opt_plugin_dir && *opt_plugin_dir) mysql_options(&mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(&mysql, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(&mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(&mysql, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqlshow"); if (!(mysql_real_connect(&mysql,host,user,opt_password, (first_argument_uses_wildcards) ? "" : argv[0],opt_mysql_port,opt_mysql_unix_port, 0))) { fprintf(stderr,"%s: %s\n",my_progname,mysql_error(&mysql)); exit(1); } mysql.reconnect= 1; switch (argc) { case 0: error=list_dbs(&mysql,wild); break; case 1: if (opt_status) error=list_table_status(&mysql,argv[0],wild); else error=list_tables(&mysql,argv[0],wild); break; default: if (opt_status && ! wild) error=list_table_status(&mysql,argv[0],argv[1]); else error=list_fields(&mysql,argv[0],argv[1],wild); break; } mysql_close(&mysql); /* Close & free connection */ my_free(opt_password); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif my_end(my_end_arg); exit(error ? 1 : 0); return 0; /* No compiler warnings */ } static struct my_option my_long_options[] = { {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", 'c', "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"count", OPT_COUNT, "Show number of rows per table (may be slow for non-MyISAM tables).", &opt_count, &opt_count, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &host, &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"status", 'i', "Shows a lot of extra information about each table.", &opt_status, &opt_status, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"keys", 'k', "Show keys for table.", &opt_show_keys, &opt_show_keys, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given, it's " "solicited on the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"show-table-type", 't', "Show table type column.", &opt_table_type, &opt_table_type, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"user", 'u', "User for login if not current user.", &user, &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "More verbose output; you can use this multiple times to get even more " "verbose output.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,SHOW_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("Shows the structure of a MySQL database (databases, tables, and columns).\n"); printf("Usage: %s [OPTIONS] [database [table [column]]]\n",my_progname); puts("\n\ If last argument contains a shell or SQL wildcard (*,?,% or _) then only\n\ what\'s matched by the wildcard is shown.\n\ If no database is given then all matching databases are shown.\n\ If no table is given, then all matching tables in database are shown.\n\ If no column is given, then all matching columns and column types in table\n\ are shown."); print_defaults("my",load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch(optid) { case 'v': opt_verbose++; break; case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start=argument; my_free(opt_password); opt_password=my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]=0; /* Cut length of argument */ tty_password= 0; } else tty_password=1; break; case 'W': #ifdef _WIN32 opt_protocol = MYSQL_PROTOCOL_PIPE; #endif break; case OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case '#': DBUG_PUSH(argument ? argument : "d:t:o"); debug_check_flag= 1; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); break; case '?': case 'I': /* Info */ usage(); exit(0); } return 0; } static void get_options(int *argc,char ***argv) { int ho_error; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); if (tty_password) opt_password=get_tty_password(NullS); if (opt_count) { /* We need to set verbose to 2 as we need to change the output to include the number-of-rows column */ opt_verbose= 2; } if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; return; } static int list_dbs(MYSQL *mysql,const char *wild) { const char *header; uint length, counter = 0; ulong rowcount = 0L; char tables[NAME_LEN+1], rows[NAME_LEN+1]; char query[NAME_LEN + 100]; MYSQL_FIELD *field; MYSQL_RES *result; MYSQL_ROW row= NULL, rrow; if (!(result=mysql_list_dbs(mysql,wild))) { fprintf(stderr,"%s: Cannot list databases: %s\n",my_progname, mysql_error(mysql)); return 1; } /* If a wildcard was used, but there was only one row and it's name is an exact match, we'll assume they really wanted to see the contents of that database. This is because it is fairly common for database names to contain the underscore (_), like INFORMATION_SCHEMA. */ if (wild && mysql_num_rows(result) == 1) { row= mysql_fetch_row(result); if (!my_strcasecmp(&my_charset_latin1, row[0], wild)) { mysql_free_result(result); if (opt_status) return list_table_status(mysql, wild, NULL); else return list_tables(mysql, wild, NULL); } } if (wild) printf("Wildcard: %s\n",wild); header="Databases"; length=(uint) strlen(header); field=mysql_fetch_field(result); if (length < field->max_length) length=field->max_length; if (!opt_verbose) print_header(header,length,NullS); else if (opt_verbose == 1) print_header(header,length,"Tables",6,NullS); else print_header(header,length,"Tables",6,"Total Rows",12,NullS); /* The first row may have already been read up above. */ while (row || (row= mysql_fetch_row(result))) { counter++; if (opt_verbose) { if (!(mysql_select_db(mysql,row[0]))) { MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL); if (mysql_affected_rows(mysql) > 0) { sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql)); rowcount = 0; if (opt_verbose > 1) { /* Print the count of tables and rows for each database */ MYSQL_ROW trow; while ((trow = mysql_fetch_row(tresult))) { my_snprintf(query, sizeof(query), "SELECT COUNT(*) FROM `%s`", trow[0]); if (!(mysql_query(mysql,query))) { MYSQL_RES *rresult; if ((rresult = mysql_store_result(mysql))) { rrow = mysql_fetch_row(rresult); rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10); mysql_free_result(rresult); } } } sprintf(rows,"%12lu",rowcount); } } else { sprintf(tables,"%6d",0); sprintf(rows,"%12d",0); } mysql_free_result(tresult); } else { my_stpcpy(tables,"N/A"); my_stpcpy(rows,"N/A"); } } if (!opt_verbose) print_row(row[0],length,0); else if (opt_verbose == 1) print_row(row[0],length,tables,6,NullS); else print_row(row[0],length,tables,6,rows,12,NullS); row= NULL; } print_trailer(length, (opt_verbose > 0 ? 6 : 0), (opt_verbose > 1 ? 12 :0), 0); if (counter && opt_verbose) printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : ""); mysql_free_result(result); return 0; } static int list_tables(MYSQL *mysql,const char *db,const char *table) { const char *header; uint head_length, counter = 0; char query[NAME_LEN + 100], rows[NAME_LEN], fields[16]; MYSQL_FIELD *field; MYSQL_RES *result; MYSQL_ROW row, rrow; if (mysql_select_db(mysql,db)) { fprintf(stderr,"%s: Cannot connect to db %s: %s\n",my_progname,db, mysql_error(mysql)); return 1; } if (table) { /* We just hijack the 'rows' variable for a bit to store the escaped table name */ mysql_real_escape_string(mysql, rows, table, (unsigned long)strlen(table)); my_snprintf(query, sizeof(query), "show%s tables like '%s'", opt_table_type ? " full" : "", rows); } else my_snprintf(query, sizeof(query), "show%s tables", opt_table_type ? " full" : ""); if (mysql_query(mysql, query) || !(result= mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot list tables in %s: %s\n",my_progname,db, mysql_error(mysql)); exit(1); } printf("Database: %s",db); if (table) printf(" Wildcard: %s",table); putchar('\n'); header="Tables"; head_length=(uint) strlen(header); field=mysql_fetch_field(result); if (head_length < field->max_length) head_length=field->max_length; if (opt_table_type) { if (!opt_verbose) print_header(header,head_length,"table_type",10,NullS); else if (opt_verbose == 1) print_header(header,head_length,"table_type",10,"Columns",8,NullS); else { print_header(header,head_length,"table_type",10,"Columns",8, "Total Rows",10,NullS); } } else { if (!opt_verbose) print_header(header,head_length,NullS); else if (opt_verbose == 1) print_header(header,head_length,"Columns",8,NullS); else print_header(header,head_length,"Columns",8, "Total Rows",10,NullS); } while ((row = mysql_fetch_row(result))) { counter++; if (opt_verbose > 0) { if (!(mysql_select_db(mysql,db))) { MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL); ulong rowcount=0L; if (!rresult) { my_stpcpy(fields,"N/A"); my_stpcpy(rows,"N/A"); } else { sprintf(fields,"%8u",(uint) mysql_num_fields(rresult)); mysql_free_result(rresult); if (opt_verbose > 1) { /* Print the count of rows for each table */ my_snprintf(query, sizeof(query), "SELECT COUNT(*) FROM `%s`", row[0]); if (!(mysql_query(mysql,query))) { if ((rresult = mysql_store_result(mysql))) { rrow = mysql_fetch_row(rresult); rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10); mysql_free_result(rresult); } sprintf(rows,"%10lu",rowcount); } else sprintf(rows,"%10d",0); } } } else { my_stpcpy(fields,"N/A"); my_stpcpy(rows,"N/A"); } } if (opt_table_type) { if (!opt_verbose) print_row(row[0],head_length,row[1],10,NullS); else if (opt_verbose == 1) print_row(row[0],head_length,row[1],10,fields,8,NullS); else print_row(row[0],head_length,row[1],10,fields,8,rows,10,NullS); } else { if (!opt_verbose) print_row(row[0],head_length,NullS); else if (opt_verbose == 1) print_row(row[0],head_length, fields,8, NullS); else print_row(row[0],head_length, fields,8, rows,10, NullS); } } print_trailer(head_length, (opt_table_type ? 10 : opt_verbose > 0 ? 8 : 0), (opt_table_type ? (opt_verbose > 0 ? 8 : 0) : (opt_verbose > 1 ? 10 :0)), !opt_table_type ? 0 : opt_verbose > 1 ? 10 :0, 0); if (counter && opt_verbose) printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : ""); mysql_free_result(result); return 0; } static int list_table_status(MYSQL *mysql,const char *db,const char *wild) { char query[NAME_LEN + 100]; int len; MYSQL_RES *result; MYSQL_ROW row; len= sizeof(query); len-= my_snprintf(query, len, "show table status from `%s`", db); if (wild && wild[0] && len) strxnmov(query + strlen(query), len - 1, " like '", wild, "'", NullS); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot get status for db: %s, table: %s: %s\n", my_progname,db,wild ? wild : "",mysql_error(mysql)); if (mysql_errno(mysql) == ER_PARSE_ERROR) fprintf(stderr,"This error probably means that your MySQL server doesn't support the\n\'show table status' command.\n"); return 1; } printf("Database: %s",db); if (wild) printf(" Wildcard: %s",wild); putchar('\n'); print_res_header(result); while ((row=mysql_fetch_row(result))) print_res_row(result,row); print_res_top(result); mysql_free_result(result); return 0; } /* list fields uses field interface as an example of how to parse a MYSQL FIELD */ static int list_fields(MYSQL *mysql,const char *db,const char *table, const char *wild) { char query[NAME_LEN + 100]; int len; MYSQL_RES *result; MYSQL_ROW row; ulong UNINIT_VAR(rows); if (mysql_select_db(mysql,db)) { fprintf(stderr,"%s: Cannot connect to db: %s: %s\n",my_progname,db, mysql_error(mysql)); return 1; } if (opt_count) { my_snprintf(query, sizeof(query), "select count(*) from `%s`", table); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot get record count for db: %s, table: %s: %s\n", my_progname,db,table,mysql_error(mysql)); return 1; } row= mysql_fetch_row(result); rows= (ulong) strtoull(row[0], (char**) 0, 10); mysql_free_result(result); } len= sizeof(query); len-= my_snprintf(query, len, "show /*!32332 FULL */ columns from `%s`", table); if (wild && wild[0] && len) strxnmov(query + strlen(query), len - 1, " like '", wild, "'", NullS); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot list columns in db: %s, table: %s: %s\n", my_progname,db,table,mysql_error(mysql)); return 1; } printf("Database: %s Table: %s", db, table); if (opt_count) printf(" Rows: %lu", rows); if (wild && wild[0]) printf(" Wildcard: %s",wild); putchar('\n'); print_res_header(result); while ((row=mysql_fetch_row(result))) print_res_row(result,row); print_res_top(result); if (opt_show_keys) { my_snprintf(query, sizeof(query), "show keys from `%s`", table); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot list keys in db: %s, table: %s: %s\n", my_progname,db,table,mysql_error(mysql)); return 1; } if (mysql_num_rows(result)) { print_res_header(result); while ((row=mysql_fetch_row(result))) print_res_row(result,row); print_res_top(result); } else puts("Table has no keys"); } mysql_free_result(result); return 0; } /***************************************************************************** General functions to print a nice ascii-table from data *****************************************************************************/ static void print_header(const char *header,uint head_length,...) { va_list args; uint length,i,str_length,pre_space; const char *field; va_start(args,head_length); putchar('+'); field=header; length=head_length; for (;;) { for (i=0 ; i < length+2 ; i++) putchar('-'); putchar('+'); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('\n'); va_start(args,head_length); field=header; length=head_length; putchar('|'); for (;;) { str_length=(uint) strlen(field); if (str_length > length) str_length=length+1; pre_space=(uint) (((int) length-(int) str_length)/2)+1; for (i=0 ; i < pre_space ; i++) putchar(' '); for (i = 0 ; i < str_length ; i++) putchar(field[i]); length=length+2-str_length-pre_space; for (i=0 ; i < length ; i++) putchar(' '); putchar('|'); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('\n'); va_start(args,head_length); putchar('+'); field=header; length=head_length; for (;;) { for (i=0 ; i < length+2 ; i++) putchar('-'); putchar('+'); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('\n'); } static void print_row(const char *header,uint head_length,...) { va_list args; const char *field; uint i,length,field_length; va_start(args,head_length); field=header; length=head_length; for (;;) { putchar('|'); putchar(' '); fputs(field,stdout); field_length=(uint) strlen(field); for (i=field_length ; i <= length ; i++) putchar(' '); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('|'); putchar('\n'); } static void print_trailer(uint head_length,...) { va_list args; uint length,i; va_start(args,head_length); length=head_length; putchar('+'); for (;;) { for (i=0 ; i < length+2 ; i++) putchar('-'); putchar('+'); if (!(length=va_arg(args,uint))) break; } va_end(args); putchar('\n'); } static void print_res_header(MYSQL_RES *result) { MYSQL_FIELD *field; print_res_top(result); mysql_field_seek(result,0); putchar('|'); while ((field = mysql_fetch_field(result))) { printf(" %-*s|",(int) field->max_length+1,field->name); } putchar('\n'); print_res_top(result); } static void print_res_top(MYSQL_RES *result) { uint i,length; MYSQL_FIELD *field; putchar('+'); mysql_field_seek(result,0); while((field = mysql_fetch_field(result))) { if ((length=(uint) strlen(field->name)) > field->max_length) field->max_length=length; else length=field->max_length; for (i=length+2 ; i--> 0 ; ) putchar('-'); putchar('+'); } putchar('\n'); } static void print_res_row(MYSQL_RES *result,MYSQL_ROW cur) { uint i,length; MYSQL_FIELD *field; putchar('|'); mysql_field_seek(result,0); for (i=0 ; i < mysql_num_fields(result); i++) { field = mysql_fetch_field(result); length=field->max_length; printf(" %-*s|",length+1,cur[i] ? (char*) cur[i] : ""); } putchar('\n'); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_1571_8
crossvul-cpp_data_bad_2374_0
/* -*- mode: c; c-file-style: "bsd"; indent-tabs-mode: t -*- */ /* * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved. * */ #include <k5-platform.h> #include <gssrpc/rpc.h> #include <gssapi/gssapi_krb5.h> /* for gss_nt_krb5_name */ #include <syslog.h> #include <kadm5/kadm_rpc.h> #include <krb5.h> #include <kadm5/admin.h> #include <adm_proto.h> #include "misc.h" #include "kadm5/server_internal.h" extern void *global_server_handle; static int check_rpcsec_auth(struct svc_req *); /* * Function: kadm_1 * * Purpose: RPC proccessing procedure. * originally generated from rpcgen * * Arguments: * rqstp (input) rpc request structure * transp (input) rpc transport structure * (input/output) * <return value> * * Requires: * Effects: * Modifies: */ void kadm_1(rqstp, transp) struct svc_req *rqstp; register SVCXPRT *transp; { union { cprinc_arg create_principal_2_arg; dprinc_arg delete_principal_2_arg; mprinc_arg modify_principal_2_arg; rprinc_arg rename_principal_2_arg; gprinc_arg get_principal_2_arg; chpass_arg chpass_principal_2_arg; chrand_arg chrand_principal_2_arg; cpol_arg create_policy_2_arg; dpol_arg delete_policy_2_arg; mpol_arg modify_policy_2_arg; gpol_arg get_policy_2_arg; setkey_arg setkey_principal_2_arg; setv4key_arg setv4key_principal_2_arg; cprinc3_arg create_principal3_2_arg; chpass3_arg chpass_principal3_2_arg; chrand3_arg chrand_principal3_2_arg; setkey3_arg setkey_principal3_2_arg; } argument; char *result; bool_t (*xdr_argument)(), (*xdr_result)(); char *(*local)(); if (rqstp->rq_cred.oa_flavor != AUTH_GSSAPI && !check_rpcsec_auth(rqstp)) { krb5_klog_syslog(LOG_ERR, "Authentication attempt failed: %s, " "RPC authentication flavor %d", client_addr(rqstp->rq_xprt), rqstp->rq_cred.oa_flavor); svcerr_weakauth(transp); return; } switch (rqstp->rq_proc) { case NULLPROC: (void) svc_sendreply(transp, xdr_void, (char *)NULL); return; case CREATE_PRINCIPAL: xdr_argument = xdr_cprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) create_principal_2_svc; break; case DELETE_PRINCIPAL: xdr_argument = xdr_dprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) delete_principal_2_svc; break; case MODIFY_PRINCIPAL: xdr_argument = xdr_mprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) modify_principal_2_svc; break; case RENAME_PRINCIPAL: xdr_argument = xdr_rprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) rename_principal_2_svc; break; case GET_PRINCIPAL: xdr_argument = xdr_gprinc_arg; xdr_result = xdr_gprinc_ret; local = (char *(*)()) get_principal_2_svc; break; case GET_PRINCS: xdr_argument = xdr_gprincs_arg; xdr_result = xdr_gprincs_ret; local = (char *(*)()) get_princs_2_svc; break; case CHPASS_PRINCIPAL: xdr_argument = xdr_chpass_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) chpass_principal_2_svc; break; case SETV4KEY_PRINCIPAL: xdr_argument = xdr_setv4key_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) setv4key_principal_2_svc; break; case SETKEY_PRINCIPAL: xdr_argument = xdr_setkey_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) setkey_principal_2_svc; break; case CHRAND_PRINCIPAL: xdr_argument = xdr_chrand_arg; xdr_result = xdr_chrand_ret; local = (char *(*)()) chrand_principal_2_svc; break; case CREATE_POLICY: xdr_argument = xdr_cpol_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) create_policy_2_svc; break; case DELETE_POLICY: xdr_argument = xdr_dpol_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) delete_policy_2_svc; break; case MODIFY_POLICY: xdr_argument = xdr_mpol_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) modify_policy_2_svc; break; case GET_POLICY: xdr_argument = xdr_gpol_arg; xdr_result = xdr_gpol_ret; local = (char *(*)()) get_policy_2_svc; break; case GET_POLS: xdr_argument = xdr_gpols_arg; xdr_result = xdr_gpols_ret; local = (char *(*)()) get_pols_2_svc; break; case GET_PRIVS: xdr_argument = xdr_u_int32; xdr_result = xdr_getprivs_ret; local = (char *(*)()) get_privs_2_svc; break; case INIT: xdr_argument = xdr_u_int32; xdr_result = xdr_generic_ret; local = (char *(*)()) init_2_svc; break; case CREATE_PRINCIPAL3: xdr_argument = xdr_cprinc3_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) create_principal3_2_svc; break; case CHPASS_PRINCIPAL3: xdr_argument = xdr_chpass3_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) chpass_principal3_2_svc; break; case CHRAND_PRINCIPAL3: xdr_argument = xdr_chrand3_arg; xdr_result = xdr_chrand_ret; local = (char *(*)()) chrand_principal3_2_svc; break; case SETKEY_PRINCIPAL3: xdr_argument = xdr_setkey3_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) setkey_principal3_2_svc; break; case PURGEKEYS: xdr_argument = xdr_purgekeys_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) purgekeys_2_svc; break; case GET_STRINGS: xdr_argument = xdr_gstrings_arg; xdr_result = xdr_gstrings_ret; local = (char *(*)()) get_strings_2_svc; break; case SET_STRING: xdr_argument = xdr_sstring_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) set_string_2_svc; break; default: krb5_klog_syslog(LOG_ERR, "Invalid KADM5 procedure number: %s, %d", client_addr(rqstp->rq_xprt), rqstp->rq_proc); svcerr_noproc(transp); return; } memset(&argument, 0, sizeof(argument)); if (!svc_getargs(transp, xdr_argument, &argument)) { svcerr_decode(transp); return; } result = (*local)(&argument, rqstp); if (result != NULL && !svc_sendreply(transp, xdr_result, result)) { krb5_klog_syslog(LOG_ERR, "WARNING! Unable to send function results, " "continuing."); svcerr_systemerr(transp); } if (!svc_freeargs(transp, xdr_argument, &argument)) { krb5_klog_syslog(LOG_ERR, "WARNING! Unable to free arguments, " "continuing."); } return; } static int check_rpcsec_auth(struct svc_req *rqstp) { gss_ctx_id_t ctx; krb5_context kctx; OM_uint32 maj_stat, min_stat; gss_name_t name; krb5_principal princ; int ret, success; krb5_data *c1, *c2, *realm; gss_buffer_desc gss_str; kadm5_server_handle_t handle; size_t slen; char *sdots; success = 0; handle = (kadm5_server_handle_t)global_server_handle; if (rqstp->rq_cred.oa_flavor != RPCSEC_GSS) return 0; ctx = rqstp->rq_svccred; maj_stat = gss_inquire_context(&min_stat, ctx, NULL, &name, NULL, NULL, NULL, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { krb5_klog_syslog(LOG_ERR, _("check_rpcsec_auth: failed " "inquire_context, stat=%u"), maj_stat); log_badauth(maj_stat, min_stat, rqstp->rq_xprt, NULL); goto fail_name; } kctx = handle->context; ret = gss_to_krb5_name_1(rqstp, kctx, name, &princ, &gss_str); if (ret == 0) goto fail_name; slen = gss_str.length; trunc_name(&slen, &sdots); /* * Since we accept with GSS_C_NO_NAME, the client can authenticate * against the entire kdb. Therefore, ensure that the service * name is something reasonable. */ if (krb5_princ_size(kctx, princ) != 2) goto fail_princ; c1 = krb5_princ_component(kctx, princ, 0); c2 = krb5_princ_component(kctx, princ, 1); realm = krb5_princ_realm(kctx, princ); if (strncmp(handle->params.realm, realm->data, realm->length) == 0 && strncmp("kadmin", c1->data, c1->length) == 0) { if (strncmp("history", c2->data, c2->length) == 0) goto fail_princ; else success = 1; } fail_princ: if (!success) { krb5_klog_syslog(LOG_ERR, _("bad service principal %.*s%s"), (int) slen, (char *) gss_str.value, sdots); } gss_release_buffer(&min_stat, &gss_str); krb5_free_principal(kctx, princ); fail_name: gss_release_name(&min_stat, &name); return success; } int gss_to_krb5_name_1(struct svc_req *rqstp, krb5_context ctx, gss_name_t gss_name, krb5_principal *princ, gss_buffer_t gss_str) { OM_uint32 status, minor_stat; gss_OID gss_type; char *str; int success; status = gss_display_name(&minor_stat, gss_name, gss_str, &gss_type); if ((status != GSS_S_COMPLETE) || (gss_type != gss_nt_krb5_name)) { krb5_klog_syslog(LOG_ERR, _("gss_to_krb5_name: failed display_name " "status %d"), status); log_badauth(status, minor_stat, rqstp->rq_xprt, NULL); return 0; } str = malloc(gss_str->length +1); if (str == NULL) return 0; *str = '\0'; strncat(str, gss_str->value, gss_str->length); success = (krb5_parse_name(ctx, str, princ) == 0); free(str); return success; }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_2374_0
crossvul-cpp_data_good_4810_0
/* * Copyright (C) the libgit2 contributors. All rights reserved. * * This file is part of libgit2, distributed under the GNU GPL v2 with * a Linking Exception. For full terms see the included COPYING file. */ #ifndef GIT_WINHTTP #include "git2.h" #include "http_parser.h" #include "buffer.h" #include "netops.h" #include "global.h" #include "remote.h" #include "smart.h" #include "auth.h" #include "auth_negotiate.h" #include "tls_stream.h" #include "socket_stream.h" #include "curl_stream.h" git_http_auth_scheme auth_schemes[] = { { GIT_AUTHTYPE_NEGOTIATE, "Negotiate", GIT_CREDTYPE_DEFAULT, git_http_auth_negotiate }, { GIT_AUTHTYPE_BASIC, "Basic", GIT_CREDTYPE_USERPASS_PLAINTEXT, git_http_auth_basic }, }; static const char *upload_pack_service = "upload-pack"; static const char *upload_pack_ls_service_url = "/info/refs?service=git-upload-pack"; static const char *upload_pack_service_url = "/git-upload-pack"; static const char *receive_pack_service = "receive-pack"; static const char *receive_pack_ls_service_url = "/info/refs?service=git-receive-pack"; static const char *receive_pack_service_url = "/git-receive-pack"; static const char *get_verb = "GET"; static const char *post_verb = "POST"; #define OWNING_SUBTRANSPORT(s) ((http_subtransport *)(s)->parent.subtransport) #define PARSE_ERROR_GENERIC -1 #define PARSE_ERROR_REPLAY -2 /** Look at the user field */ #define PARSE_ERROR_EXT -3 #define CHUNK_SIZE 4096 enum last_cb { NONE, FIELD, VALUE }; typedef struct { git_smart_subtransport_stream parent; const char *service; const char *service_url; char *redirect_url; const char *verb; char *chunk_buffer; unsigned chunk_buffer_len; unsigned sent_request : 1, received_response : 1, chunked : 1, redirect_count : 3; } http_stream; typedef struct { git_smart_subtransport parent; transport_smart *owner; git_stream *io; gitno_connection_data connection_data; bool connected; /* Parser structures */ http_parser parser; http_parser_settings settings; gitno_buffer parse_buffer; git_buf parse_header_name; git_buf parse_header_value; char parse_buffer_data[NETIO_BUFSIZE]; char *content_type; char *location; git_vector www_authenticate; enum last_cb last_cb; int parse_error; int error; unsigned parse_finished : 1; /* Authentication */ git_cred *cred; git_cred *url_cred; git_vector auth_contexts; } http_subtransport; typedef struct { http_stream *s; http_subtransport *t; /* Target buffer details from read() */ char *buffer; size_t buf_size; size_t *bytes_read; } parser_context; static bool credtype_match(git_http_auth_scheme *scheme, void *data) { unsigned int credtype = *(unsigned int *)data; return !!(scheme->credtypes & credtype); } static bool challenge_match(git_http_auth_scheme *scheme, void *data) { const char *scheme_name = scheme->name; const char *challenge = (const char *)data; size_t scheme_len; scheme_len = strlen(scheme_name); return (strncasecmp(challenge, scheme_name, scheme_len) == 0 && (challenge[scheme_len] == '\0' || challenge[scheme_len] == ' ')); } static int auth_context_match( git_http_auth_context **out, http_subtransport *t, bool (*scheme_match)(git_http_auth_scheme *scheme, void *data), void *data) { git_http_auth_scheme *scheme = NULL; git_http_auth_context *context = NULL, *c; size_t i; *out = NULL; for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) { if (scheme_match(&auth_schemes[i], data)) { scheme = &auth_schemes[i]; break; } } if (!scheme) return 0; /* See if authentication has already started for this scheme */ git_vector_foreach(&t->auth_contexts, i, c) { if (c->type == scheme->type) { context = c; break; } } if (!context) { if (scheme->init_context(&context, &t->connection_data) < 0) return -1; else if (!context) return 0; else if (git_vector_insert(&t->auth_contexts, context) < 0) return -1; } *out = context; return 0; } static int apply_credentials(git_buf *buf, http_subtransport *t) { git_cred *cred = t->cred; git_http_auth_context *context; /* Apply the credentials given to us in the URL */ if (!cred && t->connection_data.user && t->connection_data.pass) { if (!t->url_cred && git_cred_userpass_plaintext_new(&t->url_cred, t->connection_data.user, t->connection_data.pass) < 0) return -1; cred = t->url_cred; } if (!cred) return 0; /* Get or create a context for the best scheme for this cred type */ if (auth_context_match(&context, t, credtype_match, &cred->credtype) < 0) return -1; return context->next_token(buf, context, cred); } static const char *user_agent(void) { const char *custom = git_libgit2__user_agent(); if (custom) return custom; return "libgit2 " LIBGIT2_VERSION; } static int gen_request( git_buf *buf, http_stream *s, size_t content_length) { http_subtransport *t = OWNING_SUBTRANSPORT(s); const char *path = t->connection_data.path ? t->connection_data.path : "/"; size_t i; git_buf_printf(buf, "%s %s%s HTTP/1.1\r\n", s->verb, path, s->service_url); git_buf_printf(buf, "User-Agent: git/2.0 (%s)\r\n", user_agent()); git_buf_printf(buf, "Host: %s\r\n", t->connection_data.host); if (s->chunked || content_length > 0) { git_buf_printf(buf, "Accept: application/x-git-%s-result\r\n", s->service); git_buf_printf(buf, "Content-Type: application/x-git-%s-request\r\n", s->service); if (s->chunked) git_buf_puts(buf, "Transfer-Encoding: chunked\r\n"); else git_buf_printf(buf, "Content-Length: %"PRIuZ "\r\n", content_length); } else git_buf_puts(buf, "Accept: */*\r\n"); for (i = 0; i < t->owner->custom_headers.count; i++) { if (t->owner->custom_headers.strings[i]) git_buf_printf(buf, "%s\r\n", t->owner->custom_headers.strings[i]); } /* Apply credentials to the request */ if (apply_credentials(buf, t) < 0) return -1; git_buf_puts(buf, "\r\n"); if (git_buf_oom(buf)) return -1; return 0; } static int parse_authenticate_response( git_vector *www_authenticate, http_subtransport *t, int *allowed_types) { git_http_auth_context *context; char *challenge; size_t i; git_vector_foreach(www_authenticate, i, challenge) { if (auth_context_match(&context, t, challenge_match, challenge) < 0) return -1; else if (!context) continue; if (context->set_challenge && context->set_challenge(context, challenge) < 0) return -1; *allowed_types |= context->credtypes; } return 0; } static int on_header_ready(http_subtransport *t) { git_buf *name = &t->parse_header_name; git_buf *value = &t->parse_header_value; if (!strcasecmp("Content-Type", git_buf_cstr(name))) { if (!t->content_type) { t->content_type = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(t->content_type); } } else if (!strcasecmp("WWW-Authenticate", git_buf_cstr(name))) { char *dup = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(dup); git_vector_insert(&t->www_authenticate, dup); } else if (!strcasecmp("Location", git_buf_cstr(name))) { if (!t->location) { t->location = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(t->location); } } return 0; } static int on_header_field(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; /* Both parse_header_name and parse_header_value are populated * and ready for consumption */ if (VALUE == t->last_cb) if (on_header_ready(t) < 0) return t->parse_error = PARSE_ERROR_GENERIC; if (NONE == t->last_cb || VALUE == t->last_cb) git_buf_clear(&t->parse_header_name); if (git_buf_put(&t->parse_header_name, str, len) < 0) return t->parse_error = PARSE_ERROR_GENERIC; t->last_cb = FIELD; return 0; } static int on_header_value(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; assert(NONE != t->last_cb); if (FIELD == t->last_cb) git_buf_clear(&t->parse_header_value); if (git_buf_put(&t->parse_header_value, str, len) < 0) return t->parse_error = PARSE_ERROR_GENERIC; t->last_cb = VALUE; return 0; } static int on_headers_complete(http_parser *parser) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; http_stream *s = ctx->s; git_buf buf = GIT_BUF_INIT; int error = 0, no_callback = 0, allowed_auth_types = 0; /* Both parse_header_name and parse_header_value are populated * and ready for consumption. */ if (VALUE == t->last_cb) if (on_header_ready(t) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Capture authentication headers which may be a 401 (authentication * is not complete) or a 200 (simply informing us that auth *is* * complete.) */ if (parse_authenticate_response(&t->www_authenticate, t, &allowed_auth_types) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Check for an authentication failure. */ if (parser->status_code == 401 && get_verb == s->verb) { if (!t->owner->cred_acquire_cb) { no_callback = 1; } else { if (allowed_auth_types) { if (t->cred) { t->cred->free(t->cred); t->cred = NULL; } error = t->owner->cred_acquire_cb(&t->cred, t->owner->url, t->connection_data.user, allowed_auth_types, t->owner->cred_acquire_payload); if (error == GIT_PASSTHROUGH) { no_callback = 1; } else if (error < 0) { t->error = error; return t->parse_error = PARSE_ERROR_EXT; } else { assert(t->cred); if (!(t->cred->credtype & allowed_auth_types)) { giterr_set(GITERR_NET, "credentials callback returned an invalid cred type"); return t->parse_error = PARSE_ERROR_GENERIC; } /* Successfully acquired a credential. */ t->parse_error = PARSE_ERROR_REPLAY; return 0; } } } if (no_callback) { giterr_set(GITERR_NET, "authentication required but no callback set"); return t->parse_error = PARSE_ERROR_GENERIC; } } /* Check for a redirect. * Right now we only permit a redirect to the same hostname. */ if ((parser->status_code == 301 || parser->status_code == 302 || (parser->status_code == 303 && get_verb == s->verb) || parser->status_code == 307) && t->location) { if (s->redirect_count >= 7) { giterr_set(GITERR_NET, "Too many redirects"); return t->parse_error = PARSE_ERROR_GENERIC; } if (gitno_connection_data_from_url(&t->connection_data, t->location, s->service_url) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Set the redirect URL on the stream. This is a transfer of * ownership of the memory. */ if (s->redirect_url) git__free(s->redirect_url); s->redirect_url = t->location; t->location = NULL; t->connected = 0; s->redirect_count++; t->parse_error = PARSE_ERROR_REPLAY; return 0; } /* Check for a 200 HTTP status code. */ if (parser->status_code != 200) { giterr_set(GITERR_NET, "Unexpected HTTP status code: %d", parser->status_code); return t->parse_error = PARSE_ERROR_GENERIC; } /* The response must contain a Content-Type header. */ if (!t->content_type) { giterr_set(GITERR_NET, "No Content-Type header in response"); return t->parse_error = PARSE_ERROR_GENERIC; } /* The Content-Type header must match our expectation. */ if (get_verb == s->verb) git_buf_printf(&buf, "application/x-git-%s-advertisement", ctx->s->service); else git_buf_printf(&buf, "application/x-git-%s-result", ctx->s->service); if (git_buf_oom(&buf)) return t->parse_error = PARSE_ERROR_GENERIC; if (strcmp(t->content_type, git_buf_cstr(&buf))) { git_buf_free(&buf); giterr_set(GITERR_NET, "Invalid Content-Type: %s", t->content_type); return t->parse_error = PARSE_ERROR_GENERIC; } git_buf_free(&buf); return 0; } static int on_message_complete(http_parser *parser) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; t->parse_finished = 1; return 0; } static int on_body_fill_buffer(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; /* If our goal is to replay the request (either an auth failure or * a redirect) then don't bother buffering since we're ignoring the * content anyway. */ if (t->parse_error == PARSE_ERROR_REPLAY) return 0; if (ctx->buf_size < len) { giterr_set(GITERR_NET, "Can't fit data in the buffer"); return t->parse_error = PARSE_ERROR_GENERIC; } memcpy(ctx->buffer, str, len); *(ctx->bytes_read) += len; ctx->buffer += len; ctx->buf_size -= len; return 0; } static void clear_parser_state(http_subtransport *t) { http_parser_init(&t->parser, HTTP_RESPONSE); gitno_buffer_setup_fromstream(t->io, &t->parse_buffer, t->parse_buffer_data, sizeof(t->parse_buffer_data)); t->last_cb = NONE; t->parse_error = 0; t->parse_finished = 0; git_buf_free(&t->parse_header_name); git_buf_init(&t->parse_header_name, 0); git_buf_free(&t->parse_header_value); git_buf_init(&t->parse_header_value, 0); git__free(t->content_type); t->content_type = NULL; git__free(t->location); t->location = NULL; git_vector_free_deep(&t->www_authenticate); } static int write_chunk(git_stream *io, const char *buffer, size_t len) { git_buf buf = GIT_BUF_INIT; /* Chunk header */ git_buf_printf(&buf, "%" PRIxZ "\r\n", len); if (git_buf_oom(&buf)) return -1; if (git_stream_write(io, buf.ptr, buf.size, 0) < 0) { git_buf_free(&buf); return -1; } git_buf_free(&buf); /* Chunk body */ if (len > 0 && git_stream_write(io, buffer, len, 0) < 0) return -1; /* Chunk footer */ if (git_stream_write(io, "\r\n", 2, 0) < 0) return -1; return 0; } static int apply_proxy_config(http_subtransport *t) { int error; git_proxy_t proxy_type; if (!git_stream_supports_proxy(t->io)) return 0; proxy_type = t->owner->proxy.type; if (proxy_type == GIT_PROXY_NONE) return 0; if (proxy_type == GIT_PROXY_AUTO) { char *url; git_proxy_options opts = GIT_PROXY_OPTIONS_INIT; if ((error = git_remote__get_http_proxy(t->owner->owner, !!t->connection_data.use_ssl, &url)) < 0) return error; opts.type = GIT_PROXY_SPECIFIED; opts.url = url; error = git_stream_set_proxy(t->io, &opts); git__free(url); return error; } return git_stream_set_proxy(t->io, &t->owner->proxy); } static int http_connect(http_subtransport *t) { int error; if (t->connected && http_should_keep_alive(&t->parser) && t->parse_finished) return 0; if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; t->connected = 0; } if (t->connection_data.use_ssl) { error = git_tls_stream_new(&t->io, t->connection_data.host, t->connection_data.port); } else { #ifdef GIT_CURL error = git_curl_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #else error = git_socket_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #endif } if (error < 0) return error; GITERR_CHECK_VERSION(t->io, GIT_STREAM_VERSION, "git_stream"); apply_proxy_config(t); error = git_stream_connect(t->io); if ((!error || error == GIT_ECERTIFICATE) && t->owner->certificate_check_cb != NULL && git_stream_is_encrypted(t->io)) { git_cert *cert; int is_valid = (error == GIT_OK); if ((error = git_stream_certificate(&cert, t->io)) < 0) return error; giterr_clear(); error = t->owner->certificate_check_cb(cert, is_valid, t->connection_data.host, t->owner->message_cb_payload); if (error < 0) { if (!giterr_last()) giterr_set(GITERR_NET, "user cancelled certificate check"); return error; } } if (error < 0) return error; t->connected = 1; return 0; } static int http_stream_read( git_smart_subtransport_stream *stream, char *buffer, size_t buf_size, size_t *bytes_read) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); parser_context ctx; size_t bytes_parsed; replay: *bytes_read = 0; assert(t->connected); if (!s->sent_request) { git_buf request = GIT_BUF_INIT; clear_parser_state(t); if (gen_request(&request, s, 0) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) { git_buf_free(&request); return -1; } git_buf_free(&request); s->sent_request = 1; } if (!s->received_response) { if (s->chunked) { assert(s->verb == post_verb); /* Flush, if necessary */ if (s->chunk_buffer_len > 0 && write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; /* Write the final chunk. */ if (git_stream_write(t->io, "0\r\n\r\n", 5, 0) < 0) return -1; } s->received_response = 1; } while (!*bytes_read && !t->parse_finished) { size_t data_offset; int error; /* * Make the parse_buffer think it's as full of data as * the buffer, so it won't try to recv more data than * we can put into it. * * data_offset is the actual data offset from which we * should tell the parser to start reading. */ if (buf_size >= t->parse_buffer.len) { t->parse_buffer.offset = 0; } else { t->parse_buffer.offset = t->parse_buffer.len - buf_size; } data_offset = t->parse_buffer.offset; if (gitno_recv(&t->parse_buffer) < 0) return -1; /* This call to http_parser_execute will result in invocations of the * on_* family of callbacks. The most interesting of these is * on_body_fill_buffer, which is called when data is ready to be copied * into the target buffer. We need to marshal the buffer, buf_size, and * bytes_read parameters to this callback. */ ctx.t = t; ctx.s = s; ctx.buffer = buffer; ctx.buf_size = buf_size; ctx.bytes_read = bytes_read; /* Set the context, call the parser, then unset the context. */ t->parser.data = &ctx; bytes_parsed = http_parser_execute(&t->parser, &t->settings, t->parse_buffer.data + data_offset, t->parse_buffer.offset - data_offset); t->parser.data = NULL; /* If there was a handled authentication failure, then parse_error * will have signaled us that we should replay the request. */ if (PARSE_ERROR_REPLAY == t->parse_error) { s->sent_request = 0; if ((error = http_connect(t)) < 0) return error; goto replay; } if (t->parse_error == PARSE_ERROR_EXT) { return t->error; } if (t->parse_error < 0) return -1; if (bytes_parsed != t->parse_buffer.offset - data_offset) { giterr_set(GITERR_NET, "HTTP parser error: %s", http_errno_description((enum http_errno)t->parser.http_errno)); return -1; } } return 0; } static int http_stream_write_chunked( git_smart_subtransport_stream *stream, const char *buffer, size_t len) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); assert(t->connected); /* Send the request, if necessary */ if (!s->sent_request) { git_buf request = GIT_BUF_INIT; clear_parser_state(t); if (gen_request(&request, s, 0) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) { git_buf_free(&request); return -1; } git_buf_free(&request); s->sent_request = 1; } if (len > CHUNK_SIZE) { /* Flush, if necessary */ if (s->chunk_buffer_len > 0) { if (write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; } /* Write chunk directly */ if (write_chunk(t->io, buffer, len) < 0) return -1; } else { /* Append as much to the buffer as we can */ int count = min(CHUNK_SIZE - s->chunk_buffer_len, len); if (!s->chunk_buffer) s->chunk_buffer = git__malloc(CHUNK_SIZE); memcpy(s->chunk_buffer + s->chunk_buffer_len, buffer, count); s->chunk_buffer_len += count; buffer += count; len -= count; /* Is the buffer full? If so, then flush */ if (CHUNK_SIZE == s->chunk_buffer_len) { if (write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; if (len > 0) { memcpy(s->chunk_buffer, buffer, len); s->chunk_buffer_len = len; } } } return 0; } static int http_stream_write_single( git_smart_subtransport_stream *stream, const char *buffer, size_t len) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); git_buf request = GIT_BUF_INIT; assert(t->connected); if (s->sent_request) { giterr_set(GITERR_NET, "Subtransport configured for only one write"); return -1; } clear_parser_state(t); if (gen_request(&request, s, len) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) goto on_error; if (len && git_stream_write(t->io, buffer, len, 0) < 0) goto on_error; git_buf_free(&request); s->sent_request = 1; return 0; on_error: git_buf_free(&request); return -1; } static void http_stream_free(git_smart_subtransport_stream *stream) { http_stream *s = (http_stream *)stream; if (s->chunk_buffer) git__free(s->chunk_buffer); if (s->redirect_url) git__free(s->redirect_url); git__free(s); } static int http_stream_alloc(http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (!stream) return -1; s = git__calloc(sizeof(http_stream), 1); GITERR_CHECK_ALLOC(s); s->parent.subtransport = &t->parent; s->parent.read = http_stream_read; s->parent.write = http_stream_write_single; s->parent.free = http_stream_free; *stream = (git_smart_subtransport_stream *)s; return 0; } static int http_uploadpack_ls( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = upload_pack_service; s->service_url = upload_pack_ls_service_url; s->verb = get_verb; return 0; } static int http_uploadpack( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = upload_pack_service; s->service_url = upload_pack_service_url; s->verb = post_verb; return 0; } static int http_receivepack_ls( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = receive_pack_service; s->service_url = receive_pack_ls_service_url; s->verb = get_verb; return 0; } static int http_receivepack( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; /* Use Transfer-Encoding: chunked for this request */ s->chunked = 1; s->parent.write = http_stream_write_chunked; s->service = receive_pack_service; s->service_url = receive_pack_service_url; s->verb = post_verb; return 0; } static int http_action( git_smart_subtransport_stream **stream, git_smart_subtransport *subtransport, const char *url, git_smart_service_t action) { http_subtransport *t = (http_subtransport *)subtransport; int ret; if (!stream) return -1; if ((!t->connection_data.host || !t->connection_data.port || !t->connection_data.path) && (ret = gitno_connection_data_from_url(&t->connection_data, url, NULL)) < 0) return ret; if ((ret = http_connect(t)) < 0) return ret; switch (action) { case GIT_SERVICE_UPLOADPACK_LS: return http_uploadpack_ls(t, stream); case GIT_SERVICE_UPLOADPACK: return http_uploadpack(t, stream); case GIT_SERVICE_RECEIVEPACK_LS: return http_receivepack_ls(t, stream); case GIT_SERVICE_RECEIVEPACK: return http_receivepack(t, stream); } *stream = NULL; return -1; } static int http_close(git_smart_subtransport *subtransport) { http_subtransport *t = (http_subtransport *) subtransport; git_http_auth_context *context; size_t i; clear_parser_state(t); t->connected = 0; if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; } if (t->cred) { t->cred->free(t->cred); t->cred = NULL; } if (t->url_cred) { t->url_cred->free(t->url_cred); t->url_cred = NULL; } git_vector_foreach(&t->auth_contexts, i, context) { if (context->free) context->free(context); } git_vector_clear(&t->auth_contexts); gitno_connection_data_free_ptrs(&t->connection_data); memset(&t->connection_data, 0x0, sizeof(gitno_connection_data)); return 0; } static void http_free(git_smart_subtransport *subtransport) { http_subtransport *t = (http_subtransport *) subtransport; http_close(subtransport); git_vector_free(&t->auth_contexts); git__free(t); } int git_smart_subtransport_http(git_smart_subtransport **out, git_transport *owner, void *param) { http_subtransport *t; GIT_UNUSED(param); if (!out) return -1; t = git__calloc(sizeof(http_subtransport), 1); GITERR_CHECK_ALLOC(t); t->owner = (transport_smart *)owner; t->parent.action = http_action; t->parent.close = http_close; t->parent.free = http_free; t->settings.on_header_field = on_header_field; t->settings.on_header_value = on_header_value; t->settings.on_headers_complete = on_headers_complete; t->settings.on_body = on_body_fill_buffer; t->settings.on_message_complete = on_message_complete; *out = (git_smart_subtransport *) t; return 0; } #endif /* !GIT_WINHTTP */
./CrossVul/dataset_final_sorted/CWE-284/c/good_4810_0
crossvul-cpp_data_bad_5016_0
/* * libndp.c - Neighbour discovery library * Copyright (C) 2013-2015 Jiri Pirko <jiri@resnulli.us> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/select.h> #include <netinet/in.h> #include <netinet/icmp6.h> #include <arpa/inet.h> #include <net/ethernet.h> #include <assert.h> #include <ndp.h> #include "ndp_private.h" #include "list.h" /** * SECTION: logging * @short_description: libndp logging facility */ void ndp_log(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, ...) { va_list args; va_start(args, format); ndp->log_fn(ndp, priority, file, line, fn, format, args); va_end(args); } static void log_stderr(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args) { fprintf(stderr, "libndp: %s: ", fn); vfprintf(stderr, format, args); fprintf(stderr, "\n"); } static int log_priority(const char *priority) { char *endptr; int prio; prio = strtol(priority, &endptr, 10); if (endptr[0] == '\0' || isspace(endptr[0])) return prio; if (strncmp(priority, "err", 3) == 0) return LOG_ERR; if (strncmp(priority, "info", 4) == 0) return LOG_INFO; if (strncmp(priority, "debug", 5) == 0) return LOG_DEBUG; return 0; } /** * ndp_set_log_fn: * @ndp: libndp library context * @log_fn: function to be called for logging messages * * The built-in logging writes to stderr. It can be * overridden by a custom function, to plug log messages * into the user's logging functionality. **/ NDP_EXPORT void ndp_set_log_fn(struct ndp *ndp, void (*log_fn)(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args)) { ndp->log_fn = log_fn; dbg(ndp, "Custom logging function %p registered.", log_fn); } /** * ndp_get_log_priority: * @ndp: libndp library context * * Returns: the current logging priority. **/ NDP_EXPORT int ndp_get_log_priority(struct ndp *ndp) { return ndp->log_priority; } /** * ndp_set_log_priority: * @ndp: libndp library context * @priority: the new logging priority * * Set the current logging priority. The value controls which messages * are logged. **/ NDP_EXPORT void ndp_set_log_priority(struct ndp *ndp, int priority) { ndp->log_priority = priority; } /** * SECTION: helpers * @short_description: various internal helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUG_ON(expr) { if (expr) assert(0); } static void *myzalloc(size_t size) { return calloc(1, size); } static int myrecvfrom6(int sockfd, void *buf, size_t *buflen, int flags, struct in6_addr *addr, uint32_t *ifindex, int *hoplimit) { struct sockaddr_in6 sin6; unsigned char cbuf[2 * CMSG_SPACE(sizeof(struct in6_pktinfo))]; struct iovec iovec; struct msghdr msghdr; struct cmsghdr *cmsghdr; ssize_t len; iovec.iov_len = *buflen; iovec.iov_base = buf; memset(&msghdr, 0, sizeof(msghdr)); msghdr.msg_name = &sin6; msghdr.msg_namelen = sizeof(sin6); msghdr.msg_iov = &iovec; msghdr.msg_iovlen = 1; msghdr.msg_control = cbuf; msghdr.msg_controllen = sizeof(cbuf); len = recvmsg(sockfd, &msghdr, flags); if (len == -1) return -errno; *buflen = len; /* Set ifindex to scope_id now. But since scope_id gets not * set by kernel for linklocal addresses, use pktinfo to obtain that * value right after. */ *ifindex = sin6.sin6_scope_id; for (cmsghdr = CMSG_FIRSTHDR(&msghdr); cmsghdr; cmsghdr = CMSG_NXTHDR(&msghdr, cmsghdr)) { if (cmsghdr->cmsg_level != IPPROTO_IPV6) continue; switch(cmsghdr->cmsg_type) { case IPV6_PKTINFO: if (cmsghdr->cmsg_len == CMSG_LEN(sizeof(struct in6_pktinfo))) { struct in6_pktinfo *pktinfo; pktinfo = (struct in6_pktinfo *) CMSG_DATA(cmsghdr); *ifindex = pktinfo->ipi6_ifindex; } break; case IPV6_HOPLIMIT: if (cmsghdr->cmsg_len == CMSG_LEN(sizeof(int))) { int *val; val = (int *) CMSG_DATA(cmsghdr); *hoplimit = *val; } break; } } *addr = sin6.sin6_addr; return 0; } static int mysendto6(int sockfd, void *buf, size_t buflen, int flags, struct in6_addr *addr, uint32_t ifindex) { struct sockaddr_in6 sin6; ssize_t ret; memset(&sin6, 0, sizeof(sin6)); memcpy(&sin6.sin6_addr, addr, sizeof(sin6.sin6_addr)); sin6.sin6_scope_id = ifindex; resend: ret = sendto(sockfd, buf, buflen, flags, &sin6, sizeof(sin6)); if (ret == -1) { switch(errno) { case EINTR: goto resend; default: return -errno; } } return 0; } static const char *str_in6_addr(struct in6_addr *addr) { static char buf[INET6_ADDRSTRLEN]; return inet_ntop(AF_INET6, addr, buf, sizeof(buf)); } /** * SECTION: NDP implementation * @short_description: functions that actually implements NDP */ static int ndp_sock_open(struct ndp *ndp) { int sock; //struct icmp6_filter flt; int ret; int err; int val; sock = socket(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6); if (sock == -1) { err(ndp, "Failed to create ICMP6 socket."); return -errno; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVPKTINFO."); err = -errno; goto close_sock; } val = 255; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_MULTICAST_HOPS."); err = -errno; goto close_sock; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVHOPLIMIT, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVHOPLIMIT,."); err = -errno; goto close_sock; } ndp->sock = sock; return 0; close_sock: close(sock); return err; } static void ndp_sock_close(struct ndp *ndp) { close(ndp->sock); } struct ndp_msggeneric { void *dataptr; /* must be first */ }; struct ndp_msgrs { struct nd_router_solicit *rs; /* must be first */ }; struct ndp_msgra { struct nd_router_advert *ra; /* must be first */ }; struct ndp_msgns { struct nd_neighbor_solicit *ns; /* must be first */ }; struct ndp_msgna { struct nd_neighbor_advert *na; /* must be first */ }; struct ndp_msgr { struct nd_redirect *r; /* must be first */ }; struct ndp_msg { #define NDP_MSG_BUFLEN 1500 unsigned char buf[NDP_MSG_BUFLEN]; size_t len; struct in6_addr addrto; uint32_t ifindex; int hoplimit; struct icmp6_hdr * icmp6_hdr; unsigned char * opts_start; /* pointer to buf at the place where opts start */ union { struct ndp_msggeneric generic; struct ndp_msgrs rs; struct ndp_msgra ra; struct ndp_msgns ns; struct ndp_msgna na; struct ndp_msgr r; } nd_msg; }; struct ndp_msg_type_info { #define NDP_STRABBR_SIZE 4 char strabbr[NDP_STRABBR_SIZE]; uint8_t raw_type; size_t raw_struct_size; void (*addrto_adjust)(struct in6_addr *addr); }; static void ndp_msg_addrto_adjust_all_nodes(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x1); } static void ndp_msg_addrto_adjust_all_routers(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x2); } static struct ndp_msg_type_info ndp_msg_type_info_list[] = { [NDP_MSG_RS] = { .strabbr = "RS", .raw_type = ND_ROUTER_SOLICIT, .raw_struct_size = sizeof(struct nd_router_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_routers, }, [NDP_MSG_RA] = { .strabbr = "RA", .raw_type = ND_ROUTER_ADVERT, .raw_struct_size = sizeof(struct nd_router_advert), }, [NDP_MSG_NS] = { .strabbr = "NS", .raw_type = ND_NEIGHBOR_SOLICIT, .raw_struct_size = sizeof(struct nd_neighbor_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_nodes, }, [NDP_MSG_NA] = { .strabbr = "NA", .raw_type = ND_NEIGHBOR_ADVERT, .raw_struct_size = sizeof(struct nd_neighbor_advert), }, [NDP_MSG_R] = { .strabbr = "R", .raw_type = ND_REDIRECT, .raw_struct_size = sizeof(struct nd_redirect), }, }; #define NDP_MSG_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_type_info_list) struct ndp_msg_type_info *ndp_msg_type_info(enum ndp_msg_type msg_type) { return &ndp_msg_type_info_list[msg_type]; } static int ndp_msg_type_by_raw_type(enum ndp_msg_type *p_msg_type, uint8_t raw_type) { int i; for (i = 0; i < NDP_MSG_TYPE_LIST_SIZE; i++) { if (ndp_msg_type_info(i)->raw_type == raw_type) { *p_msg_type = i; return 0; } } return -ENOENT; } static bool ndp_msg_check_valid(struct ndp_msg *msg) { size_t len = ndp_msg_payload_len(msg); enum ndp_msg_type msg_type = ndp_msg_type(msg); if (len < ndp_msg_type_info(msg_type)->raw_struct_size) return false; return true; } static struct ndp_msg *ndp_msg_alloc(void) { struct ndp_msg *msg; msg = myzalloc(sizeof(*msg)); if (!msg) return NULL; msg->icmp6_hdr = (struct icmp6_hdr *) msg->buf; return msg; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type); static void ndp_msg_init(struct ndp_msg *msg, enum ndp_msg_type msg_type) { size_t raw_struct_size = ndp_msg_type_info(msg_type)->raw_struct_size; ndp_msg_type_set(msg, msg_type); msg->len = raw_struct_size; msg->opts_start = msg->buf + raw_struct_size; /* Set-up "first pointers" in all ndp_msgrs, ndp_msgra, ndp_msgns, * ndp_msgna, ndp_msgr structures. */ msg->nd_msg.generic.dataptr = ndp_msg_payload(msg); } /** * ndp_msg_new: * @p_msg: pointer where new message structure address will be stored * @msg_type: message type * * Allocate new message structure of a specified type and initialize it. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_new(struct ndp_msg **p_msg, enum ndp_msg_type msg_type) { struct ndp_msg *msg; if (msg_type == NDP_MSG_ALL) return -EINVAL; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; ndp_msg_init(msg, msg_type); *p_msg = msg; return 0; } /** * ndp_msg_destroy: * * Destroy message structure. **/ NDP_EXPORT void ndp_msg_destroy(struct ndp_msg *msg) { free(msg); } /** * ndp_msg_payload: * @msg: message structure * * Get raw Neighbour discovery packet data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload(struct ndp_msg *msg) { return msg->buf; } /** * ndp_msg_payload_maxlen: * @msg: message structure * * Get raw Neighbour discovery packet data maximum length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_maxlen(struct ndp_msg *msg) { return sizeof(msg->buf); } /** * ndp_msg_payload_len: * @msg: message structure * * Get raw Neighbour discovery packet data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_len(struct ndp_msg *msg) { return msg->len; } /** * ndp_msg_payload_len_set: * @msg: message structure * * Set raw Neighbour discovery packet data length. **/ NDP_EXPORT void ndp_msg_payload_len_set(struct ndp_msg *msg, size_t len) { if (len > sizeof(msg->buf)) len = sizeof(msg->buf); msg->len = len; } /** * ndp_msg_payload_opts: * @msg: message structure * * Get raw Neighbour discovery packet options part data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload_opts(struct ndp_msg *msg) { return msg->opts_start; } static void *ndp_msg_payload_opts_offset(struct ndp_msg *msg, int offset) { unsigned char *ptr = ndp_msg_payload_opts(msg); return ptr + offset; } /** * ndp_msg_payload_opts_len: * @msg: message structure * * Get raw Neighbour discovery packet options part data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_opts_len(struct ndp_msg *msg) { return msg->len - (msg->opts_start - msg->buf); } /** * ndp_msgrs: * @msg: message structure * * Get RS message structure by passed @msg. * * Returns: RS message structure or NULL in case the message is not of type RS. **/ NDP_EXPORT struct ndp_msgrs *ndp_msgrs(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RS) return NULL; return &msg->nd_msg.rs; } /** * ndp_msgra: * @msg: message structure * * Get RA message structure by passed @msg. * * Returns: RA message structure or NULL in case the message is not of type RA. **/ NDP_EXPORT struct ndp_msgra *ndp_msgra(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RA) return NULL; return &msg->nd_msg.ra; } /** * ndp_msgns: * @msg: message structure * * Get NS message structure by passed @msg. * * Returns: NS message structure or NULL in case the message is not of type NS. **/ NDP_EXPORT struct ndp_msgns *ndp_msgns(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NS) return NULL; return &msg->nd_msg.ns; } /** * ndp_msgna: * @msg: message structure * * Get NA message structure by passed @msg. * * Returns: NA message structure or NULL in case the message is not of type NA. **/ NDP_EXPORT struct ndp_msgna *ndp_msgna(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NA) return NULL; return &msg->nd_msg.na; } /** * ndp_msgr: * @msg: message structure * * Get R message structure by passed @msg. * * Returns: R message structure or NULL in case the message is not of type R. **/ NDP_EXPORT struct ndp_msgr *ndp_msgr(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_R) return NULL; return &msg->nd_msg.r; } /** * ndp_msg_type: * @msg: message structure * * Get type of message. * * Returns: Message type **/ NDP_EXPORT enum ndp_msg_type ndp_msg_type(struct ndp_msg *msg) { enum ndp_msg_type msg_type; int err; err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); /* Type should be always set correctly (ensured by ndp_msg_init) */ BUG_ON(err); return msg_type; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type) { msg->icmp6_hdr->icmp6_type = ndp_msg_type_info(msg_type)->raw_type; } /** * ndp_msg_addrto: * @msg: message structure * * Get "to address" of message. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_addrto(struct ndp_msg *msg) { return &msg->addrto; } /** * ndp_msg_ifindex: * @msg: message structure * * Get interface index of message. * * Returns: Interface index **/ NDP_EXPORT uint32_t ndp_msg_ifindex(struct ndp_msg *msg) { return msg->ifindex; } /** * ndp_msg_ifindex_set: * @msg: message structure * * Set raw interface index of message. **/ NDP_EXPORT void ndp_msg_ifindex_set(struct ndp_msg *msg, uint32_t ifindex) { msg->ifindex = ifindex; } /** * ndp_msg_send: * @ndp: libndp library context * @msg: message structure * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send(struct ndp *ndp, struct ndp_msg *msg) { return ndp_msg_send_with_flags(ndp, msg, ND_OPT_NORMAL); } /** * ndp_msg_send_with_flags: * @ndp: libndp library context * @msg: message structure * @flags: option flags within message type * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send_with_flags(struct ndp *ndp, struct ndp_msg *msg, uint8_t flags) { enum ndp_msg_type msg_type = ndp_msg_type(msg); if (ndp_msg_type_info(msg_type)->addrto_adjust) ndp_msg_type_info(msg_type)->addrto_adjust(&msg->addrto); switch (msg_type) { case NDP_MSG_NA: if (flags & ND_OPT_NA_UNSOL) { ndp_msgna_flag_override_set((struct ndp_msgna*)&msg->nd_msg, true); ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, false); ndp_msg_addrto_adjust_all_nodes(&msg->addrto); } else { ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, true); } break; default: break; } return mysendto6(ndp->sock, msg->buf, msg->len, 0, &msg->addrto, msg->ifindex); } /** * SECTION: msgra getters/setters * @short_description: Getters and setters for RA message */ /** * ndp_msgra_curhoplimit: * @msgra: RA message structure * * Get RA curhoplimit. * * Returns: curhoplimit. **/ NDP_EXPORT uint8_t ndp_msgra_curhoplimit(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_curhoplimit; } /** * ndp_msgra_curhoplimit_set: * @msgra: RA message structure * * Set RA curhoplimit. **/ NDP_EXPORT void ndp_msgra_curhoplimit_set(struct ndp_msgra *msgra, uint8_t curhoplimit) { msgra->ra->nd_ra_curhoplimit = curhoplimit; } /** * ndp_msgra_flag_managed: * @msgra: RA message structure * * Get RA managed flag. * * Returns: managed flag. **/ NDP_EXPORT bool ndp_msgra_flag_managed(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_managed_set: * @msgra: RA message structure * * Set RA managed flag. **/ NDP_EXPORT void ndp_msgra_flag_managed_set(struct ndp_msgra *msgra, bool flag_managed) { if (flag_managed) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_other: * @msgra: RA message structure * * Get RA other flag. * * Returns: other flag. **/ NDP_EXPORT bool ndp_msgra_flag_other(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_other_set: * @msgra: RA message structure * * Set RA other flag. **/ NDP_EXPORT void ndp_msgra_flag_other_set(struct ndp_msgra *msgra, bool flag_other) { if (flag_other) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_home_agent: * @msgra: RA message structure * * Get RA home_agent flag. * * Returns: home_agent flag. **/ NDP_EXPORT bool ndp_msgra_flag_home_agent(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_flag_home_agent_set: * @msgra: RA message structure * * Set RA home_agent flag. **/ NDP_EXPORT void ndp_msgra_flag_home_agent_set(struct ndp_msgra *msgra, bool flag_home_agent) { if (flag_home_agent) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_route_preference: * @msgra: RA message structure * * Get route preference. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msgra_route_preference(struct ndp_msgra *msgra) { uint8_t prf = (msgra->ra->nd_ra_flags_reserved >> 3) & 3; /* rfc4191 says: * If the Router Lifetime is zero, the preference value MUST be set to * (00) by the sender and MUST be ignored by the receiver. * If the Reserved (10) value is received, the receiver MUST treat the * value as if it were (00). */ if (prf == 2 || !ndp_msgra_router_lifetime(msgra)) prf = 0; return prf; } /** * ndp_msgra_route_preference_set: * @msgra: RA message structure * @pref: preference * * Set route preference. **/ NDP_EXPORT void ndp_msgra_route_preference_set(struct ndp_msgra *msgra, enum ndp_route_preference pref) { msgra->ra->nd_ra_flags_reserved &= ~(3 << 3); msgra->ra->nd_ra_flags_reserved |= (pref << 3); } /** * ndp_msgra_router_lifetime: * @msgra: RA message structure * * Get RA router lifetime. * * Returns: router lifetime in seconds. **/ NDP_EXPORT uint16_t ndp_msgra_router_lifetime(struct ndp_msgra *msgra) { return ntohs(msgra->ra->nd_ra_router_lifetime); } /** * ndp_msgra_router_lifetime_set: * @msgra: RA message structure * * Set RA router lifetime. **/ NDP_EXPORT void ndp_msgra_router_lifetime_set(struct ndp_msgra *msgra, uint16_t router_lifetime) { msgra->ra->nd_ra_router_lifetime = htons(router_lifetime); } /** * ndp_msgra_reachable_time: * @msgra: RA message structure * * Get RA reachable time. * * Returns: reachable time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_reachable_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_reachable); } /** * ndp_msgra_reachable_time_set: * @msgra: RA message structure * * Set RA reachable time. **/ NDP_EXPORT void ndp_msgra_reachable_time_set(struct ndp_msgra *msgra, uint32_t reachable_time) { msgra->ra->nd_ra_reachable = htonl(reachable_time); } /** * ndp_msgra_retransmit_time: * @msgra: RA message structure * * Get RA retransmit time. * * Returns: retransmit time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_retransmit_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_retransmit); } /** * ndp_msgra_retransmit_time_set: * @msgra: RA message structure * * Set RA retransmit time. **/ NDP_EXPORT void ndp_msgra_retransmit_time_set(struct ndp_msgra *msgra, uint32_t retransmit_time) { msgra->ra->nd_ra_retransmit = htonl(retransmit_time); } /** * SECTION: msgna getters/setters * @short_description: Getters and setters for NA message */ /** * ndp_msgna_flag_router: * @msgna: NA message structure * * Get NA router flag. * * Returns: router flag. **/ NDP_EXPORT bool ndp_msgna_flag_router(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_router_set: * @msgna: NA message structure * * Set NA router flag. **/ NDP_EXPORT void ndp_msgna_flag_router_set(struct ndp_msgna *msgna, bool flag_router) { if (flag_router) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_ROUTER; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_solicited: * @msgna: NA message structure * * Get NA solicited flag. * * Returns: solicited flag. **/ NDP_EXPORT bool ndp_msgna_flag_solicited(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_solicited_set: * @msgna: NA message structure * * Set NA managed flag. **/ NDP_EXPORT void ndp_msgna_flag_solicited_set(struct ndp_msgna *msgna, bool flag_solicited) { if (flag_solicited) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_SOLICITED; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_override: * @msgna: NA message structure * * Get NA override flag. * * Returns: override flag. **/ NDP_EXPORT bool ndp_msgna_flag_override(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_OVERRIDE; } /** * ndp_msgna_flag_override_set: * @msgra: NA message structure * * Set NA override flag. */ NDP_EXPORT void ndp_msgna_flag_override_set(struct ndp_msgna *msgna, bool flag_override) { if (flag_override) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_OVERRIDE; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_OVERRIDE; } /** * SECTION: msg_opt infrastructure * @short_description: Infrastructure for options */ struct ndp_msg_opt_type_info { uint8_t raw_type; size_t raw_struct_size; bool (*check_valid)(void *opt_data); }; static bool ndp_msg_opt_route_check_valid(void *opt_data) { struct __nd_opt_route_info *ri = opt_data; /* rfc4191 says: * If the Reserved (10) value is received, the Route Information Option * MUST be ignored. */ if (((ri->nd_opt_ri_prf_reserved >> 3) & 3) == 2) return false; return true; } static struct ndp_msg_opt_type_info ndp_msg_opt_type_info_list[] = { [NDP_MSG_OPT_SLLADDR] = { .raw_type = ND_OPT_SOURCE_LINKADDR, }, [NDP_MSG_OPT_TLLADDR] = { .raw_type = ND_OPT_TARGET_LINKADDR, }, [NDP_MSG_OPT_PREFIX] = { .raw_type = ND_OPT_PREFIX_INFORMATION, .raw_struct_size = sizeof(struct nd_opt_prefix_info), }, [NDP_MSG_OPT_REDIR] = { .raw_type = ND_OPT_REDIRECTED_HEADER, }, [NDP_MSG_OPT_MTU] = { .raw_type = ND_OPT_MTU, .raw_struct_size = sizeof(struct nd_opt_mtu), }, [NDP_MSG_OPT_ROUTE] = { .raw_type = __ND_OPT_ROUTE_INFO, .raw_struct_size = sizeof(struct __nd_opt_route_info), .check_valid = ndp_msg_opt_route_check_valid, }, [NDP_MSG_OPT_RDNSS] = { .raw_type = __ND_OPT_RDNSS, .raw_struct_size = sizeof(struct __nd_opt_rdnss), }, [NDP_MSG_OPT_DNSSL] = { .raw_type = __ND_OPT_DNSSL, .raw_struct_size = sizeof(struct __nd_opt_dnssl), }, }; #define NDP_MSG_OPT_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_opt_type_info_list) struct ndp_msg_opt_type_info *ndp_msg_opt_type_info(enum ndp_msg_opt_type msg_opt_type) { return &ndp_msg_opt_type_info_list[msg_opt_type]; } struct ndp_msg_opt_type_info *ndp_msg_opt_type_info_by_raw_type(uint8_t raw_type) { struct ndp_msg_opt_type_info *info; int i; for (i = 0; i < NDP_MSG_OPT_TYPE_LIST_SIZE; i++) { info = &ndp_msg_opt_type_info_list[i]; if (info->raw_type == raw_type) return info; } return NULL; } /** * ndp_msg_next_opt_offset: * @msg: message structure * @offset: option payload offset * @opt_type: option type * * Find next offset of option of given type. If offset is -1, start from * beginning, otherwise start from the given offset. * This funstion is internally used by ndp_msg_opt_for_each_offset() macro. * * Returns: offset in opt payload of found opt of -1 in case it was not found. **/ NDP_EXPORT int ndp_msg_next_opt_offset(struct ndp_msg *msg, int offset, enum ndp_msg_opt_type opt_type) { unsigned char *opts_start = ndp_msg_payload_opts(msg); unsigned char *ptr = opts_start; size_t len = ndp_msg_payload_opts_len(msg); uint8_t opt_raw_type = ndp_msg_opt_type_info(opt_type)->raw_type; bool ignore = true; if (offset == -1) { offset = 0; ignore = false; } ptr += offset; len -= offset; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len || len < cur_opt_len) break; if (cur_opt_raw_type == opt_raw_type && !ignore) return ptr - opts_start; ptr += cur_opt_len; len -= cur_opt_len; ignore = false; } return -1; } #define __INVALID_OPT_TYPE_MAGIC 0xff /* * Check for validity of options and mark by magic opt type in case it is not * so ndp_msg_next_opt_offset() will ignore it. */ static bool ndp_msg_check_opts(struct ndp_msg *msg) { unsigned char *ptr = ndp_msg_payload_opts(msg); size_t len = ndp_msg_payload_opts_len(msg); struct ndp_msg_opt_type_info *info; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len) return false; if (len < cur_opt_len) break; info = ndp_msg_opt_type_info_by_raw_type(cur_opt_raw_type); if (info) { if (cur_opt_len < info->raw_struct_size || (info->check_valid && !info->check_valid(ptr))) ptr[0] = __INVALID_OPT_TYPE_MAGIC; } ptr += cur_opt_len; len -= cur_opt_len; } return true; } /** * SECTION: msg_opt getters/setters * @short_description: Getters and setters for options */ /** * ndp_msg_opt_slladdr: * @msg: message structure * @offset: in-message offset * * Get source linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to source linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_slladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_slladdr_len: * @msg: message structure * @offset: in-message offset * * Get source linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: source linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_slladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_tlladdr: * @msg: message structure * @offset: in-message offset * * Get target linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to target linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_tlladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_tlladdr_len: * @msg: message structure * @offset: in-message offset * * Get target linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: target linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_tlladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_prefix: * @msg: message structure * @offset: in-message offset * * Get prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_prefix(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return &pi->nd_opt_pi_prefix; } /** * ndp_msg_opt_prefix_len: * @msg: message structure * @offset: in-message offset * * Get prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_prefix_len(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_prefix_len; } /** * ndp_msg_opt_prefix_valid_time: * @msg: message structure * @offset: in-message offset * * Get prefix valid time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: valid time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_valid_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_valid_time); } /** * ndp_msg_opt_prefix_preferred_time: * @msg: message structure * @offset: in-message offset * * Get prefix preferred time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: preferred time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_preferred_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_preferred_time); } /** * ndp_msg_opt_prefix_flag_on_link: * @msg: message structure * @offset: in-message offset * * Get on-link flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: on-link flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_on_link(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_ONLINK; } /** * ndp_msg_opt_prefix_flag_auto_addr_conf: * @msg: message structure * @offset: in-message offset * * Get autonomous address-configuration flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: autonomous address-configuration flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_auto_addr_conf(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_AUTO; } /** * ndp_msg_opt_prefix_flag_router_addr: * @msg: message structure * @offset: in-message offset * * Get router address flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: router address flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_router_addr(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_RADDR; } /** * ndp_msg_opt_mtu: * @msg: message structure * @offset: in-message offset * * Get MTU. User should check if mtu option is present before calling this. * * Returns: MTU. **/ NDP_EXPORT uint32_t ndp_msg_opt_mtu(struct ndp_msg *msg, int offset) { struct nd_opt_mtu *mtu = ndp_msg_payload_opts_offset(msg, offset); return ntohl(mtu->nd_opt_mtu_mtu); } /** * ndp_msg_opt_route_prefix: * @msg: message structure * @offset: in-message offset * * Get route prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_route_prefix(struct ndp_msg *msg, int offset) { static struct in6_addr prefix; struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); memset(&prefix, 0, sizeof(prefix)); memcpy(&prefix, &ri->nd_opt_ri_prefix, (ri->nd_opt_ri_len - 1) << 3); return &prefix; } /** * ndp_msg_opt_route_prefix_len: * @msg: message structure * @offset: in-message offset * * Get route prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of route prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_route_prefix_len(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ri->nd_opt_ri_prefix_len; } /** * ndp_msg_opt_route_lifetime: * @msg: message structure * @offset: in-message offset * * Get route lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_route_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ntohl(ri->nd_opt_ri_lifetime); } /** * ndp_msg_opt_route_preference: * @msg: message structure * @offset: in-message offset * * Get route preference. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msg_opt_route_preference(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return (ri->nd_opt_ri_prf_reserved >> 3) & 3; } /** * ndp_msg_opt_rdnss_lifetime: * @msg: message structure * @offset: in-message offset * * Get Recursive DNS Server lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_rdnss_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); return ntohl(rdnss->nd_opt_rdnss_lifetime); } /** * ndp_msg_opt_rdnss_addr: * @msg: message structure * @offset: in-message offset * @addr_index: address index * * Get Recursive DNS Server address. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_rdnss_addr(struct ndp_msg *msg, int offset, int addr_index) { static struct in6_addr addr; struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); size_t len = rdnss->nd_opt_rdnss_len << 3; /* convert to bytes */ len -= in_struct_offset(struct __nd_opt_rdnss, nd_opt_rdnss_addresses); if ((addr_index + 1) * sizeof(addr) > len) return NULL; memcpy(&addr, &rdnss->nd_opt_rdnss_addresses[addr_index * sizeof(addr)], sizeof(addr)); return &addr; } /** * ndp_msg_opt_dnssl_lifetime: * @msg: message structure * @offset: in-message offset * * Get DNS Search List lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_dnssl_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); return ntohl(dnssl->nd_opt_dnssl_lifetime); } /** * ndp_msg_opt_dnssl_domain: * @msg: message structure * @offset: in-message offset * @domain_index: domain index * * Get DNS Search List domain. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT char *ndp_msg_opt_dnssl_domain(struct ndp_msg *msg, int offset, int domain_index) { int i; static char buf[256]; struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); size_t len = dnssl->nd_opt_dnssl_len << 3; /* convert to bytes */ char *ptr; len -= in_struct_offset(struct __nd_opt_dnssl, nd_opt_dnssl_domains); ptr = dnssl->nd_opt_dnssl_domains; i = 0; while (len > 0) { size_t buf_len = 0; while (len > 0) { uint8_t dom_len = *ptr; ptr++; len--; if (!dom_len) break; if (dom_len > len) return NULL; if (buf_len + dom_len + 1 > sizeof(buf)) return NULL; memcpy(buf + buf_len, ptr, dom_len); buf[buf_len + dom_len] = '.'; ptr += dom_len; len -= dom_len; buf_len += dom_len + 1; } if (!buf_len) break; buf[buf_len - 1] = '\0'; /* overwrite final '.' */ if (i++ == domain_index) return buf; } return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg); static int ndp_sock_recv(struct ndp *ndp) { struct ndp_msg *msg; enum ndp_msg_type msg_type; size_t len; int err; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; len = ndp_msg_payload_maxlen(msg); err = myrecvfrom6(ndp->sock, msg->buf, &len, 0, &msg->addrto, &msg->ifindex, &msg->hoplimit); if (err) { err(ndp, "Failed to receive message"); goto free_msg; } dbg(ndp, "rcvd from: %s, ifindex: %u, hoplimit: %d", str_in6_addr(&msg->addrto), msg->ifindex, msg->hoplimit); if (msg->hoplimit != 255) { warn(ndp, "ignoring packet with bad hop limit (%d)", msg->hoplimit); err = 0; goto free_msg; } if (len < sizeof(*msg->icmp6_hdr)) { warn(ndp, "rcvd icmp6 packet too short (%luB)", len); err = 0; goto free_msg; } err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); if (err) { err = 0; goto free_msg; } ndp_msg_init(msg, msg_type); ndp_msg_payload_len_set(msg, len); if (!ndp_msg_check_valid(msg)) { warn(ndp, "rcvd invalid ND message"); err = 0; goto free_msg; } dbg(ndp, "rcvd %s, len: %zuB", ndp_msg_type_info(msg_type)->strabbr, len); if (!ndp_msg_check_opts(msg)) { err = 0; goto free_msg; } err = ndp_call_handlers(ndp, msg);; free_msg: ndp_msg_destroy(msg); return err; } /** * SECTION: msgrcv handler * @short_description: msgrcv handler and related stuff */ struct ndp_msgrcv_handler_item { struct list_item list; ndp_msgrcv_handler_func_t func; enum ndp_msg_type msg_type; uint32_t ifindex; void * priv; }; static struct ndp_msgrcv_handler_item * ndp_find_msgrcv_handler_item(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) if (handler_item->func == func && handler_item->msg_type == msg_type && handler_item->ifindex == ifindex && handler_item->priv == priv) return handler_item; return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg) { struct ndp_msgrcv_handler_item *handler_item; int err; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) { if (handler_item->msg_type != NDP_MSG_ALL && handler_item->msg_type != ndp_msg_type(msg)) continue; if (handler_item->ifindex && handler_item->ifindex != msg->ifindex) continue; err = handler_item->func(ndp, msg, handler_item->priv); if (err) return err; } return 0; } /** * ndp_msgrcv_handler_register: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Registers custom @func handler which is going to be called when * specified @msg_type is received. If one wants the function to be * called for all message types, pass NDP_MSG_ALL, * Note that @ifindex can be set to filter only messages received on * specified interface. For @func to be called for messages received on * all interfaces, just set 0. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msgrcv_handler_register(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; if (ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv)) return -EEXIST; if (!func) return -EINVAL; handler_item = malloc(sizeof(*handler_item)); if (!handler_item) return -ENOMEM; handler_item->func = func; handler_item->msg_type = msg_type; handler_item->ifindex = ifindex; handler_item->priv = priv; list_add_tail(&ndp->msgrcv_handler_list, &handler_item->list); return 0; } /** * ndp_msgrcv_handler_unregister: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Unregisters custom @func handler. * **/ NDP_EXPORT void ndp_msgrcv_handler_unregister(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; handler_item = ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv); if (!handler_item) return; list_del(&handler_item->list); free(handler_item); } /** * SECTION: event fd * @short_description: event filedescriptor related stuff */ /** * ndp_get_eventfd: * @ndp: libndp library context * * Get eventfd filedesctiptor. * * Returns: fd. **/ NDP_EXPORT int ndp_get_eventfd(struct ndp *ndp) { return ndp->sock; } /** * ndp_call_eventfd_handler: * @ndp: libndp library context * * Call eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_call_eventfd_handler(struct ndp *ndp) { return ndp_sock_recv(ndp); } /** * ndp_callall_eventfd_handler: * @ndp: libndp library context * * Call all pending events on eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_callall_eventfd_handler(struct ndp *ndp) { fd_set rfds; int fdmax; struct timeval tv; int fd = ndp_get_eventfd(ndp); int ret; int err; memset(&tv, 0, sizeof(tv)); FD_ZERO(&rfds); FD_SET(fd, &rfds); fdmax = fd + 1; while (true) { ret = select(fdmax, &rfds, NULL, NULL, &tv); if (ret == -1) return -errno; if (!FD_ISSET(fd, &rfds)) return 0; err = ndp_call_eventfd_handler(ndp); if (err) return err; } } /** * SECTION: Exported context functions * @short_description: Core context functions exported to user */ /** * ndp_open: * @p_ndp: pointer where new libndp library context address will be stored * * Allocates and initializes library context, opens raw socket. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_open(struct ndp **p_ndp) { struct ndp *ndp; const char *env; int err; ndp = myzalloc(sizeof(*ndp)); if (!ndp) return -ENOMEM; ndp->log_fn = log_stderr; ndp->log_priority = LOG_ERR; /* environment overwrites config */ env = getenv("NDP_LOG"); if (env != NULL) ndp_set_log_priority(ndp, log_priority(env)); dbg(ndp, "ndp context %p created.", ndp); dbg(ndp, "log_priority=%d", ndp->log_priority); list_init(&ndp->msgrcv_handler_list); err = ndp_sock_open(ndp); if (err) goto free_ndp; *p_ndp = ndp; return 0; free_ndp: free(ndp); return err; } /** * ndp_close: * @ndp: libndp library context * * Do library context cleanup. **/ NDP_EXPORT void ndp_close(struct ndp *ndp) { ndp_sock_close(ndp); free(ndp); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5016_0
crossvul-cpp_data_bad_5349_2
/* * TCP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on: * linux/net/ipv4/tcp.c * linux/net/ipv4/tcp_input.c * linux/net/ipv4/tcp_output.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bottom_half.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/jiffies.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/ipsec.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <net/tcp.h> #include <net/ndisc.h> #include <net/inet6_hashtables.h> #include <net/inet6_connection_sock.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/inet_ecn.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/snmp.h> #include <net/dsfield.h> #include <net/timewait_sock.h> #include <net/inet_common.h> #include <net/secure_seq.h> #include <net/busy_poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <crypto/hash.h> #include <linux/scatterlist.h> static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; static const struct inet_connection_sock_af_ops ipv6_specific; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; #else static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr) { return NULL; } #endif static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } } static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct ipv6_txoptions *opt; struct flowi6 fl6; struct dst_entry *dst; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (np->sndflow) { fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (!flowlabel) return -EINVAL; fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if (ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 0x1; addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } if (tp->rx_opt.ts_recent_stamp && !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } sk->sk_v6_daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* * TCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); if (__ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; icsk->icsk_af_ops = &ipv6_mapped; sk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; icsk->icsk_af_ops = &ipv6_specific; sk->sk_backlog_rcv = tcp_v6_do_rcv; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_specific; #endif goto failure; } np->saddr = sk->sk_v6_rcv_saddr; return err; } if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) saddr = &sk->sk_v6_rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } if (!saddr) { saddr = &fl6.saddr; sk->sk_v6_rcv_saddr = *saddr; } /* set the source address */ np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(sk, dst, NULL, NULL); if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr)) tcp_fetch_timewait_stamp(sk, dst); icsk->icsk_ext_hdr_len = 0; if (opt) icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->inet_dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); err = inet6_hash_connect(&tcp_death_row, sk); if (err) goto late_failure; sk_set_txhash(sk); if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, inet->inet_sport, inet->inet_dport); err = tcp_connect(sk); if (err) goto late_failure; return 0; late_failure: tcp_set_state(sk, TCP_CLOSE); __sk_dst_reset(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); if (!dst) return; if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { tcp_sync_mss(sk, dst_mtu(dst)); tcp_simple_retransmit(sk); } } static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct net *net = dev_net(skb->dev); struct request_sock *fastopen; struct ipv6_pinfo *np; struct tcp_sock *tp; __u32 seq, snd_una; struct sock *sk; bool fatal; int err; sk = __inet6_lookup_established(net, &tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, ntohs(th->source), skb->dev->ifindex); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } seq = ntohl(th->seq); fatal = icmpv6_err_convert(type, code, &err); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, fatal); bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = inet6_sk(sk); if (type == NDISC_REDIRECT) { struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); if (dst) dst->ops->redirect(dst, sk, skb); goto out; } if (type == ICMPV6_PKT_TOOBIG) { /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; if (!ip6_sk_accept_pmtu(sk)) goto out; tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) sock_hold(sk); goto out; } /* Might be for an request_sock */ switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * is already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_done(sk); } else sk->sk_err_soft = err; goto out; } if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; int err = -ENOMEM; /* First, grab a route. */ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, IPPROTO_TCP)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6->daddr = ireq->ir_v6_rmt_addr; if (np->repflow && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); rcu_read_lock(); opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); err = ip6_xmit(sk, skb, fl6, opt, np->tclass); rcu_read_unlock(); err = net_xmit_eval(err); } done: return err; } static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->ipv6_opt); kfree_skb(inet_rsk(req)->pktopts); } #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr) { return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); } static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); } static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) return -EINVAL; if (!cmd.tcpm_keylen) { if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET); return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6); } if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); } static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, const struct in6_addr *daddr, const struct in6_addr *saddr, const struct tcphdr *th, int nbytes) { struct tcp6_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; /* 1. TCP pseudo-header (RFC2460) */ bp->saddr = *saddr; bp->daddr = *daddr; bp->protocol = cpu_to_be32(IPPROTO_TCP); bp->len = cpu_to_be32(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->md5_req); } static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, const struct in6_addr *daddr, struct in6_addr *saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct ahash_request *req; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } static int tcp_v6_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { const struct in6_addr *saddr, *daddr; struct tcp_md5sig_pool *hp; struct ahash_request *req; const struct tcphdr *th = tcp_hdr(skb); if (sk) { /* valid for establish/request sockets */ saddr = &sk->sk_v6_rcv_saddr; daddr = &sk->sk_v6_daddr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } #endif static bool tcp_v6_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_TCP_MD5SIG const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct ipv6hdr *ip6h = ipv6_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; u8 newhash[16]; hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return false; if (hash_expected && !hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } /* check the signature */ genhash = tcp_v6_md5_hash_skb(newhash, hash_expected, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", genhash ? "failed" : "mismatch", &ip6h->saddr, ntohs(th->source), &ip6h->daddr, ntohs(th->dest)); return true; } #endif return false; } static void tcp_v6_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); const struct ipv6_pinfo *np = inet6_sk(sk_listener); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; /* So that link locals have meaning */ if (!sk_listener->sk_bound_dev_if && ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = tcp_v6_iif(skb); if (!TCP_SKB_CB(skb)->tcp_tw_isn && (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || np->repflow)) { atomic_inc(&skb->users); ireq->pktopts = skb; } } static struct dst_entry *tcp_v6_route_req(const struct sock *sk, struct flowi *fl, const struct request_sock *req, bool *strict) { if (strict) *strict = true; return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); } struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .family = AF_INET6, .obj_size = sizeof(struct tcp6_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v6_reqsk_send_ack, .destructor = tcp_v6_reqsk_destructor, .send_reset = tcp_v6_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr), #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, #endif .init_req = tcp_v6_init_req, #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v6_init_sequence, #endif .route_req = tcp_v6_route_req, .init_seq = tcp_v6_init_sequence, .send_synack = tcp_v6_send_synack, }; static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int rst, u8 tclass, __be32 label) { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; struct sk_buff *buff; struct flowi6 fl6; struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); struct sock *ctl_sk = net->ipv6.tcp_sk; unsigned int tot_len = sizeof(struct tcphdr); struct dst_entry *dst; __be32 *topt; if (tsecr) tot_len += TCPOLEN_TSTAMP_ALIGNED; #ifdef CONFIG_TCP_MD5SIG if (key) tot_len += TCPOLEN_MD5SIG_ALIGNED; #endif buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); if (!buff) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); t1 = (struct tcphdr *) skb_push(buff, tot_len); skb_reset_transport_header(buff); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len / 4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = !rst || !th->ack; t1->rst = rst; t1->window = htons(win); topt = (__be32 *)(t1 + 1); if (tsecr) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *topt++ = htonl(tsval); *topt++ = htonl(tsecr); } #ifdef CONFIG_TCP_MD5SIG if (key) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); tcp_v6_md5_hash_hdr((__u8 *)topt, key, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, t1); } #endif memset(&fl6, 0, sizeof(fl6)); fl6.daddr = ipv6_hdr(skb)->saddr; fl6.saddr = ipv6_hdr(skb)->daddr; fl6.flowlabel = label; buff->ip_summed = CHECKSUM_PARTIAL; buff->csum = 0; __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); else { if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) oif = skb->skb_iif; fl6.flowi6_oif = oif; } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); /* Pass a socket to ip6_dst_lookup either it is for RST * Underlying function will use this to retrieve the network * namespace */ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) TCP_INC_STATS(net, TCP_MIB_OUTRSTS); return; } kfree_skb(buff); } static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); u32 seq = 0, ack_seq = 0; struct tcp_md5sig_key *key = NULL; #ifdef CONFIG_TCP_MD5SIG const __u8 *hash_location = NULL; struct ipv6hdr *ipv6h = ipv6_hdr(skb); unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif int oif; if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && !ipv6_unicast_destination(skb)) return; #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); } else if (hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), &tcp_hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, ntohs(th->source), tcp_v6_iif(skb)); if (!sk1) goto out; key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); if (!key) goto out; genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto out; } #endif if (th->ack) seq = ntohl(th->ack_seq); else ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2); oif = sk ? sk->sk_bound_dev_if : 0; tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); #ifdef CONFIG_TCP_MD5SIG out: rcu_read_unlock(); #endif } static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, u8 tclass, __be32 label) { tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass, label); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); inet_twsk_put(tw); } static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); } static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v6_check(sk, skb); #endif return sk; } static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) goto drop; return tcp_conn_request(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, skb); drop: tcp_listendrop(sk); return 0; /* don't send reset */ } static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; const struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; struct tcp6_sock *newtcp6sk; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif struct flowi6 fl6; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); if (!newsk) return NULL; newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newinet = inet_sk(newsk); newnp = inet6_sk(newsk); newtp = tcp_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->saddr = newsk->sk_v6_rcv_saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = tcp_v6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); if (np->repflow) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, tcp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } ireq = inet_rsk(req); if (sk_acceptq_is_full(sk)) goto out_overflow; if (!dst) { dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); if (!dst) goto out; } newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ newsk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(newsk, dst, NULL, NULL); inet6_sk_rx_dst_set(newsk, skb); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newtp = tcp_sk(newsk); newinet = inet_sk(newsk); newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; newnp->saddr = ireq->ir_v6_loc_addr; newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; newsk->sk_bound_dev_if = ireq->ir_iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->inet_opt = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = tcp_v6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); if (np->repflow) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); /* Clone native IPv6 options from listening socket (if any) Yes, keeping reference count would be much more clever, but we make one more one thing there: reattach optmem to newsk. */ opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); if (opt) { opt = ipv6_dup_options(newsk, opt); RCU_INIT_POINTER(newnp->opt, opt); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (opt) inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + opt->opt_flen; tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); if (key) { /* We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, AF_INET6, key->key, key->keylen, sk_gfp_mask(sk, GFP_ATOMIC)); } #endif if (__inet_inherit_port(sk, newsk) < 0) { inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto out; } *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); if (*own_req) { tcp_move_syn(newtp, req); /* Clone pktoptions received with SYN, if we own the req */ if (ireq->pktopts) { newnp->pktoptions = skb_clone(ireq->pktopts, sk_gfp_mask(sk, GFP_ATOMIC)); consume_skb(ireq->pktopts); ireq->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } } return newsk; out_overflow: __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: tcp_listendrop(sk); return NULL; } static void tcp_v6_restore_cb(struct sk_buff *skb) { /* We need to move header back to the beginning if xfrm6_policy_check() * and tcp_v6_fill_cb() are going to be called again. * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. */ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, sizeof(struct inet6_skb_parm)); } /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp; struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); if (sk_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); if (opt_skb) goto ipv6_pktoptions; return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) __kfree_skb(opt_skb); return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); kfree_skb(skb); return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) np->mcast_oif = tcp_v6_iif(opt_skb); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { skb_set_owner_r(opt_skb, sk); tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } kfree_skb(opt_skb); return 0; } static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, const struct tcphdr *th) { /* This is tricky: we move IP6CB at its correct location into * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because * _decode_session6() uses IP6CB(). * barrier() makes sure compiler won't play aliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), sizeof(struct inet6_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; } static int tcp_v6_rcv(struct sk_buff *skb) { const struct tcphdr *th; const struct ipv6hdr *hdr; bool refcounted; struct sock *sk; int ret; struct net *net = dev_net(skb->dev); if (skb->pkt_type != PACKET_HOST) goto discard_it; /* * Count it even if it's bad. */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr)/4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); lookup: sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, inet6_iif(skb), &refcounted); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); struct sock *nsk; sk = req->rsk_listener; tcp_v6_fill_cb(skb, hdr, th); if (tcp_v6_inbound_md5_hash(sk, skb)) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } sock_hold(sk); refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); goto discard_and_relse; } if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v6_send_reset(nsk, skb); goto discard_and_relse; } else { sock_put(sk); return 0; } } if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; tcp_v6_fill_cb(skb, hdr, th); if (tcp_v6_inbound_md5_hash(sk, skb)) goto discard_and_relse; if (sk_filter(sk, skb)) goto discard_and_relse; skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v6_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret ? -1 : 0; no_tcp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v6_send_reset(NULL, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2; sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), tcp_v6_iif(skb)); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_deschedule_put(tw); sk = sk2; tcp_v6_restore_cb(skb); refcounted = false; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v6_timewait_ack(sk, skb); break; case TCP_TW_RST: tcp_v6_restore_cb(skb); tcp_v6_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS: ; } goto discard_it; } static void tcp_v6_early_demux(struct sk_buff *skb) { const struct ipv6hdr *hdr; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; hdr = ipv6_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; /* Note : We use inet6_iif() here, not tcp_v6_iif() */ sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, &hdr->saddr, th->source, &hdr->daddr, ntohs(th->dest), inet6_iif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); if (dst && inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } } static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor = tcp_twsk_destructor, }; static const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), .net_frag_header_len = sizeof(struct frag_hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif .mtu_reduced = tcp_v6_mtu_reduced, }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { .md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, }; #endif /* * TCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif .mtu_reduced = tcp_v4_mtu_reduced, }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; #endif return 0; } static void tcp_v6_destroy_sock(struct sock *sk) { tcp_v4_destroy_sock(sk); inet6_destroy_sock(sk); } #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, const struct request_sock *req, int i) { long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], inet_rsk(req)->ir_num, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->num_timeout, from_kuid_munged(seq_user_ns(seq), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; const struct inet_sock *inet = inet_sk(sp); const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; int rx_queue; int state; dest = &sp->sk_v6_daddr; src = &sp->sk_v6_rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = sk_state_load(sp); if (state == TCP_LISTEN) rx_queue = sp->sk_ack_backlog; else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, state, tp->write_seq - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), icsk->icsk_probes_out, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) ); } static void get_timewait6_sock(struct seq_file *seq, struct inet_timewait_sock *tw, int i) { long delta = tw->tw_timer.expires - jiffies; const struct in6_addr *dest, *src; __u16 destp, srcp; dest = &tw->tw_v6_daddr; src = &tw->tw_v6_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, tw->tw_substate, 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw); } static int tcp6_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode\n"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait6_sock(seq, v, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq6(seq, v, st->num); else get_tcp6_sock(seq, v, st->num); out: return 0; } static const struct file_operations tcp6_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp6_seq_afinfo = { .name = "tcp6", .family = AF_INET6, .seq_fops = &tcp6_afinfo_seq_fops, .seq_ops = { .show = tcp6_seq_show, }, }; int __net_init tcp6_proc_init(struct net *net) { return tcp_proc_register(net, &tcp6_seq_afinfo); } void tcp6_proc_exit(struct net *net) { tcp_proc_unregister(net, &tcp6_seq_afinfo); } #endif struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v6_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, .destroy = tcp_v6_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, .hash = inet6_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .orphan_count = &tcp_orphan_count, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, }; static const struct inet6_protocol tcpv6_protocol = { .early_demux = tcp_v6_early_demux, .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static struct inet_protosw tcpv6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_TCP, .prot = &tcpv6_prot, .ops = &inet6_stream_ops, .flags = INET_PROTOSW_PERMANENT | INET_PROTOSW_ICSK, }; static int __net_init tcpv6_net_init(struct net *net) { return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, SOCK_RAW, IPPROTO_TCP, net); } static void __net_exit tcpv6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.tcp_sk); } static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); } static struct pernet_operations tcpv6_net_ops = { .init = tcpv6_net_init, .exit = tcpv6_net_exit, .exit_batch = tcpv6_net_exit_batch, }; int __init tcpv6_init(void) { int ret; ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); if (ret) goto out; /* register inet6 protocol */ ret = inet6_register_protosw(&tcpv6_protosw); if (ret) goto out_tcpv6_protocol; ret = register_pernet_subsys(&tcpv6_net_ops); if (ret) goto out_tcpv6_protosw; out: return ret; out_tcpv6_protosw: inet6_unregister_protosw(&tcpv6_protosw); out_tcpv6_protocol: inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); goto out; } void tcpv6_exit(void) { unregister_pernet_subsys(&tcpv6_net_ops); inet6_unregister_protosw(&tcpv6_protosw); inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5349_2
crossvul-cpp_data_bad_3852_0
/* * NETLINK Kernel-user communication protocol. * * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith * added netlink_proto_exit * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> * use nlk_sk, as sk->protinfo is on a diet 8) * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> * - inc module use count of module that owns * the kernel socket in case userspace opens * socket of same protocol * - remove all module support, since netlink is * mandatory if CONFIG_NET=y these days */ #include <linux/module.h> #include <linux/capability.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/random.h> #include <linux/bitops.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/audit.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/scm.h> #include <net/netlink.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) struct netlink_sock { /* struct sock has to be the first member of netlink_sock */ struct sock sk; u32 pid; u32 dst_pid; u32 dst_group; u32 flags; u32 subscriptions; u32 ngroups; unsigned long *groups; unsigned long state; wait_queue_head_t wait; struct netlink_callback *cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; void (*netlink_rcv)(struct sk_buff *skb); void (*netlink_bind)(int group); struct module *module; }; struct listeners { struct rcu_head rcu; unsigned long masks[0]; }; #define NETLINK_KERNEL_SOCKET 0x1 #define NETLINK_RECV_PKTINFO 0x2 #define NETLINK_BROADCAST_SEND_ERROR 0x4 #define NETLINK_RECV_NO_ENOBUFS 0x8 static inline struct netlink_sock *nlk_sk(struct sock *sk) { return container_of(sk, struct netlink_sock, sk); } static inline int netlink_is_kernel(struct sock *sk) { return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; } struct nl_pid_hash { struct hlist_head *table; unsigned long rehash_time; unsigned int mask; unsigned int shift; unsigned int entries; unsigned int max_shift; u32 rnd; }; struct netlink_table { struct nl_pid_hash hash; struct hlist_head mc_list; struct listeners __rcu *listeners; unsigned int nl_nonroot; unsigned int groups; struct mutex *cb_mutex; struct module *module; void (*bind)(int group); int registered; }; static struct netlink_table *nl_table; static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); static ATOMIC_NOTIFIER_HEAD(netlink_chain); static inline u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; } static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) { return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; } static void netlink_destroy_callback(struct netlink_callback *cb) { kfree_skb(cb->skb); kfree(cb); } static void netlink_consume_callback(struct netlink_callback *cb) { consume_skb(cb->skb); kfree(cb); } static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->cb) { if (nlk->cb->done) nlk->cb->done(nlk->cb); netlink_destroy_callback(nlk->cb); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(nlk_sk(sk)->groups); } /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines. */ void netlink_table_grab(void) __acquires(nl_table_lock) { might_sleep(); write_lock_irq(&nl_table_lock); if (atomic_read(&nl_table_users)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&nl_table_wait, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&nl_table_users) == 0) break; write_unlock_irq(&nl_table_lock); schedule(); write_lock_irq(&nl_table_lock); } __set_current_state(TASK_RUNNING); remove_wait_queue(&nl_table_wait, &wait); } } void netlink_table_ungrab(void) __releases(nl_table_lock) { write_unlock_irq(&nl_table_lock); wake_up(&nl_table_wait); } static inline void netlink_lock_table(void) { /* read_lock() synchronizes us to netlink_table_grab */ read_lock(&nl_table_lock); atomic_inc(&nl_table_users); read_unlock(&nl_table_lock); } static inline void netlink_unlock_table(void) { if (atomic_dec_and_test(&nl_table_users)) wake_up(&nl_table_wait); } static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) { struct nl_pid_hash *hash = &nl_table[protocol].hash; struct hlist_head *head; struct sock *sk; struct hlist_node *node; read_lock(&nl_table_lock); head = nl_pid_hashfn(hash, pid); sk_for_each(sk, node, head) { if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { sock_hold(sk); goto found; } } sk = NULL; found: read_unlock(&nl_table_lock); return sk; } static struct hlist_head *nl_pid_hash_zalloc(size_t size) { if (size <= PAGE_SIZE) return kzalloc(size, GFP_ATOMIC); else return (struct hlist_head *) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, get_order(size)); } static void nl_pid_hash_free(struct hlist_head *table, size_t size) { if (size <= PAGE_SIZE) kfree(table); else free_pages((unsigned long)table, get_order(size)); } static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) { unsigned int omask, mask, shift; size_t osize, size; struct hlist_head *otable, *table; int i; omask = mask = hash->mask; osize = size = (mask + 1) * sizeof(*table); shift = hash->shift; if (grow) { if (++shift > hash->max_shift) return 0; mask = mask * 2 + 1; size *= 2; } table = nl_pid_hash_zalloc(size); if (!table) return 0; otable = hash->table; hash->table = table; hash->mask = mask; hash->shift = shift; get_random_bytes(&hash->rnd, sizeof(hash->rnd)); for (i = 0; i <= omask; i++) { struct sock *sk; struct hlist_node *node, *tmp; sk_for_each_safe(sk, node, tmp, &otable[i]) __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); } nl_pid_hash_free(otable, osize); hash->rehash_time = jiffies + 10 * 60 * HZ; return 1; } static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) { int avg = hash->entries >> hash->shift; if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) return 1; if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { nl_pid_hash_rehash(hash, 0); return 1; } return 0; } static const struct proto_ops netlink_ops; static void netlink_update_listeners(struct sock *sk) { struct netlink_table *tbl = &nl_table[sk->sk_protocol]; struct hlist_node *node; unsigned long mask; unsigned int i; for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { mask = 0; sk_for_each_bound(sk, node, &tbl->mc_list) { if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) mask |= nlk_sk(sk)->groups[i]; } tbl->listeners->masks[i] = mask; } /* this function is only called with the netlink table "grabbed", which * makes sure updates are visible before bind or setsockopt return. */ } static int netlink_insert(struct sock *sk, struct net *net, u32 pid) { struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; int err = -EADDRINUSE; struct sock *osk; struct hlist_node *node; int len; netlink_table_grab(); head = nl_pid_hashfn(hash, pid); len = 0; sk_for_each(osk, node, head) { if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) break; len++; } if (node) goto err; err = -EBUSY; if (nlk_sk(sk)->pid) goto err; err = -ENOMEM; if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) goto err; if (len && nl_pid_hash_dilute(hash, len)) head = nl_pid_hashfn(hash, pid); hash->entries++; nlk_sk(sk)->pid = pid; sk_add_node(sk, head); err = 0; err: netlink_table_ungrab(); return err; } static void netlink_remove(struct sock *sk) { netlink_table_grab(); if (sk_del_node_init(sk)) nl_table[sk->sk_protocol].hash.entries--; if (nlk_sk(sk)->subscriptions) __sk_del_bind_node(sk); netlink_table_ungrab(); } static struct proto netlink_proto = { .name = "NETLINK", .owner = THIS_MODULE, .obj_size = sizeof(struct netlink_sock), }; static int __netlink_create(struct net *net, struct socket *sock, struct mutex *cb_mutex, int protocol) { struct sock *sk; struct netlink_sock *nlk; sock->ops = &netlink_ops; sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); nlk = nlk_sk(sk); if (cb_mutex) { nlk->cb_mutex = cb_mutex; } else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); } init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; sk->sk_protocol = protocol; return 0; } static int netlink_create(struct net *net, struct socket *sock, int protocol, int kern) { struct module *module = NULL; struct mutex *cb_mutex; struct netlink_sock *nlk; void (*bind)(int group); int err = 0; sock->state = SS_UNCONNECTED; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; netlink_lock_table(); #ifdef CONFIG_MODULES if (!nl_table[protocol].registered) { netlink_unlock_table(); request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); netlink_lock_table(); } #endif if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; else err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; bind = nl_table[protocol].bind; netlink_unlock_table(); if (err < 0) goto out; err = __netlink_create(net, sock, cb_mutex, protocol); if (err < 0) goto out_module; local_bh_disable(); sock_prot_inuse_add(net, &netlink_proto, 1); local_bh_enable(); nlk = nlk_sk(sock->sk); nlk->module = module; nlk->netlink_bind = bind; out: return err; out_module: module_put(module); goto out; } static int netlink_release(struct socket *sock) { struct sock *sk = sock->sk; struct netlink_sock *nlk; if (!sk) return 0; netlink_remove(sk); sock_orphan(sk); nlk = nlk_sk(sk); /* * OK. Socket is unlinked, any packets that arrive now * will be purged. */ sock->sk = NULL; wake_up_interruptible_all(&nlk->wait); skb_queue_purge(&sk->sk_write_queue); if (nlk->pid) { struct netlink_notify n = { .net = sock_net(sk), .protocol = sk->sk_protocol, .pid = nlk->pid, }; atomic_notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); } module_put(nlk->module); netlink_table_grab(); if (netlink_is_kernel(sk)) { BUG_ON(nl_table[sk->sk_protocol].registered == 0); if (--nl_table[sk->sk_protocol].registered == 0) { kfree(nl_table[sk->sk_protocol].listeners); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; } } else if (nlk->subscriptions) { netlink_update_listeners(sk); } netlink_table_ungrab(); kfree(nlk->groups); nlk->groups = NULL; local_bh_disable(); sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); local_bh_enable(); sock_put(sk); return 0; } static int netlink_autobind(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; struct sock *osk; struct hlist_node *node; s32 pid = task_tgid_vnr(current); int err; static s32 rover = -4097; retry: cond_resched(); netlink_table_grab(); head = nl_pid_hashfn(hash, pid); sk_for_each(osk, node, head) { if (!net_eq(sock_net(osk), net)) continue; if (nlk_sk(osk)->pid == pid) { /* Bind collision, search negative pid values. */ pid = rover--; if (rover > -4097) rover = -4097; netlink_table_ungrab(); goto retry; } } netlink_table_ungrab(); err = netlink_insert(sk, net, pid); if (err == -EADDRINUSE) goto retry; /* If 2 threads race to autobind, that is fine. */ if (err == -EBUSY) err = 0; return err; } static inline int netlink_capable(const struct socket *sock, unsigned int flag) { return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || capable(CAP_NET_ADMIN); } static void netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->subscriptions && !subscriptions) __sk_del_bind_node(sk); else if (!nlk->subscriptions && subscriptions) sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); nlk->subscriptions = subscriptions; } static int netlink_realloc_groups(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); unsigned int groups; unsigned long *new_groups; int err = 0; netlink_table_grab(); groups = nl_table[sk->sk_protocol].groups; if (!nl_table[sk->sk_protocol].registered) { err = -ENOENT; goto out_unlock; } if (nlk->ngroups >= groups) goto out_unlock; new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); if (new_groups == NULL) { err = -ENOMEM; goto out_unlock; } memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); nlk->groups = new_groups; nlk->ngroups = groups; out_unlock: netlink_table_ungrab(); return err; } static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; if (nladdr->nl_family != AF_NETLINK) return -EINVAL; /* Only superuser is allowed to listen multicasts */ if (nladdr->nl_groups) { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); if (err) return err; } if (nlk->pid) { if (nladdr->nl_pid != nlk->pid) return -EINVAL; } else { err = nladdr->nl_pid ? netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); if (err) return err; } if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) return 0; netlink_table_grab(); netlink_update_subscriptions(sk, nlk->subscriptions + hweight32(nladdr->nl_groups) - hweight32(nlk->groups[0])); nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; netlink_update_listeners(sk); netlink_table_ungrab(); if (nlk->netlink_bind && nlk->groups[0]) { int i; for (i=0; i<nlk->ngroups; i++) { if (test_bit(i, nlk->groups)) nlk->netlink_bind(i); } } return 0; } static int netlink_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { int err = 0; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; if (alen < sizeof(addr->sa_family)) return -EINVAL; if (addr->sa_family == AF_UNSPEC) { sk->sk_state = NETLINK_UNCONNECTED; nlk->dst_pid = 0; nlk->dst_group = 0; return 0; } if (addr->sa_family != AF_NETLINK) return -EINVAL; /* Only superuser is allowed to send multicasts */ if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) return -EPERM; if (!nlk->pid) err = netlink_autobind(sock); if (err == 0) { sk->sk_state = NETLINK_CONNECTED; nlk->dst_pid = nladdr->nl_pid; nlk->dst_group = ffs(nladdr->nl_groups); } return err; } static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); nladdr->nl_family = AF_NETLINK; nladdr->nl_pad = 0; *addr_len = sizeof(*nladdr); if (peer) { nladdr->nl_pid = nlk->dst_pid; nladdr->nl_groups = netlink_group_mask(nlk->dst_group); } else { nladdr->nl_pid = nlk->pid; nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; } return 0; } static void netlink_overrun(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); } } atomic_inc(&sk->sk_drops); } static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) { struct sock *sock; struct netlink_sock *nlk; sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid); if (!sock) return ERR_PTR(-ECONNREFUSED); /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); if (sock->sk_state == NETLINK_CONNECTED && nlk->dst_pid != nlk_sk(ssk)->pid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } return sock; } struct sock *netlink_getsockbyfilp(struct file *filp) { struct inode *inode = filp->f_path.dentry->d_inode; struct sock *sock; if (!S_ISSOCK(inode->i_mode)) return ERR_PTR(-ENOTSOCK); sock = SOCKET_I(inode)->sk; if (sock->sk_family != AF_NETLINK) return ERR_PTR(-EINVAL); sock_hold(sock); return sock; } /* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the * reference is dropped. The skb is not send to the destination, just all * all error checks are performed and memory in the queue is reserved. * Return values: * < 0: error. skb freed, reference to sock dropped. * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory. */ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk) { struct netlink_sock *nlk; nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!*timeo) { if (!ssk || netlink_is_kernel(ssk)) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&nlk->wait, &wait); if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) && !sock_flag(sk, SOCK_DEAD)) *timeo = schedule_timeout(*timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&nlk->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(*timeo); } return 1; } skb_set_owner_r(skb, sk); return 0; } static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); return len; } int netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = __netlink_sendskb(sk, skb); sock_put(sk); return len; } void netlink_detachskb(struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); sock_put(sk); } static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) { int delta; skb_orphan(skb); delta = skb->end - skb->tail; if (delta * 2 < skb->truesize) return skb; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, allocation); if (!nskb) return skb; consume_skb(skb); skb = nskb; } if (!pskb_expand_head(skb, 0, -delta, allocation)) skb->truesize -= delta; return skb; } static void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (skb_queue_empty(&sk->sk_receive_queue)) clear_bit(0, &nlk->state); if (!test_bit(0, &nlk->state)) wake_up_interruptible(&nlk->wait); } static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) { int ret; struct netlink_sock *nlk = nlk_sk(sk); ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { ret = skb->len; skb_set_owner_r(skb, sk); nlk->netlink_rcv(skb); consume_skb(skb); } else { kfree_skb(skb); } sock_put(sk); return ret; } int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) { struct sock *sk; int err; long timeo; skb = netlink_trim(skb, gfp_any()); timeo = sock_sndtimeo(ssk, nonblock); retry: sk = netlink_getsockbypid(ssk, pid); if (IS_ERR(sk)) { kfree_skb(skb); return PTR_ERR(sk); } if (netlink_is_kernel(sk)) return netlink_unicast_kernel(sk, skb); if (sk_filter(sk, skb)) { err = skb->len; kfree_skb(skb); sock_put(sk); return err; } err = netlink_attachskb(sk, skb, &timeo, ssk); if (err == 1) goto retry; if (err) return err; return netlink_sendskb(sk, skb); } EXPORT_SYMBOL(netlink_unicast); int netlink_has_listeners(struct sock *sk, unsigned int group) { int res = 0; struct listeners *listeners; BUG_ON(!netlink_is_kernel(sk)); rcu_read_lock(); listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); if (group - 1 < nl_table[sk->sk_protocol].groups) res = test_bit(group - 1, listeners->masks); rcu_read_unlock(); return res; } EXPORT_SYMBOL_GPL(netlink_has_listeners); static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && !test_bit(0, &nlk->state)) { skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); } return -1; } struct netlink_broadcast_data { struct sock *exclude_sk; struct net *net; u32 pid; u32 group; int failure; int delivery_failure; int congested; int delivered; gfp_t allocation; struct sk_buff *skb, *skb2; int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); void *tx_data; }; static int do_one_broadcast(struct sock *sk, struct netlink_broadcast_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int val; if (p->exclude_sk == sk) goto out; if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; if (!net_eq(sock_net(sk), p->net)) goto out; if (p->failure) { netlink_overrun(sk); goto out; } sock_hold(sk); if (p->skb2 == NULL) { if (skb_shared(p->skb)) { p->skb2 = skb_clone(p->skb, p->allocation); } else { p->skb2 = skb_get(p->skb); /* * skb ownership may have been set when * delivered to a previous socket. */ skb_orphan(p->skb2); } } if (p->skb2 == NULL) { netlink_overrun(sk); /* Clone failed. Notify ALL listeners. */ p->failure = 1; if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) p->delivery_failure = 1; } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { kfree_skb(p->skb2); p->skb2 = NULL; } else if (sk_filter(sk, p->skb2)) { kfree_skb(p->skb2); p->skb2 = NULL; } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { netlink_overrun(sk); if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) p->delivery_failure = 1; } else { p->congested |= val; p->delivered = 1; p->skb2 = NULL; } sock_put(sk); out: return 0; } int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation, int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), void *filter_data) { struct net *net = sock_net(ssk); struct netlink_broadcast_data info; struct hlist_node *node; struct sock *sk; skb = netlink_trim(skb, allocation); info.exclude_sk = ssk; info.net = net; info.pid = pid; info.group = group; info.failure = 0; info.delivery_failure = 0; info.congested = 0; info.delivered = 0; info.allocation = allocation; info.skb = skb; info.skb2 = NULL; info.tx_filter = filter; info.tx_data = filter_data; /* While we sleep in clone, do not allow to change socket list */ netlink_lock_table(); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) do_one_broadcast(sk, &info); consume_skb(skb); netlink_unlock_table(); if (info.delivery_failure) { kfree_skb(info.skb2); return -ENOBUFS; } consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) yield(); return 0; } return -ESRCH; } EXPORT_SYMBOL(netlink_broadcast_filtered); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation) { return netlink_broadcast_filtered(ssk, skb, pid, group, allocation, NULL, NULL); } EXPORT_SYMBOL(netlink_broadcast); struct netlink_set_err_data { struct sock *exclude_sk; u32 pid; u32 group; int code; }; static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int ret = 0; if (sk == p->exclude_sk) goto out; if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) goto out; if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) { ret = 1; goto out; } sk->sk_err = p->code; sk->sk_error_report(sk); out: return ret; } /** * netlink_set_err - report error to broadcast listeners * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() * @pid: the PID of a process that we want to skip (if any) * @groups: the broadcast group that will notice the error * @code: error code, must be negative (as usual in kernelspace) * * This function returns the number of broadcast listeners that have set the * NETLINK_RECV_NO_ENOBUFS socket option. */ int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) { struct netlink_set_err_data info; struct hlist_node *node; struct sock *sk; int ret = 0; info.exclude_sk = ssk; info.pid = pid; info.group = group; /* sk->sk_err wants a positive error value */ info.code = -code; read_lock(&nl_table_lock); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) ret += do_one_set_err(sk, &info); read_unlock(&nl_table_lock); return ret; } EXPORT_SYMBOL(netlink_set_err); /* must be called with netlink table grabbed */ static void netlink_update_socket_mc(struct netlink_sock *nlk, unsigned int group, int is_new) { int old, new = !!is_new, subscriptions; old = test_bit(group - 1, nlk->groups); subscriptions = nlk->subscriptions - old + new; if (new) __set_bit(group - 1, nlk->groups); else __clear_bit(group - 1, nlk->groups); netlink_update_subscriptions(&nlk->sk, subscriptions); netlink_update_listeners(&nlk->sk); } static int netlink_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); unsigned int val = 0; int err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (optlen >= sizeof(int) && get_user(val, (unsigned int __user *)optval)) return -EFAULT; switch (optname) { case NETLINK_PKTINFO: if (val) nlk->flags |= NETLINK_RECV_PKTINFO; else nlk->flags &= ~NETLINK_RECV_PKTINFO; err = 0; break; case NETLINK_ADD_MEMBERSHIP: case NETLINK_DROP_MEMBERSHIP: { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); if (err) return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; netlink_table_grab(); netlink_update_socket_mc(nlk, val, optname == NETLINK_ADD_MEMBERSHIP); netlink_table_ungrab(); if (nlk->netlink_bind) nlk->netlink_bind(val); err = 0; break; } case NETLINK_BROADCAST_ERROR: if (val) nlk->flags |= NETLINK_BROADCAST_SEND_ERROR; else nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR; err = 0; break; case NETLINK_NO_ENOBUFS: if (val) { nlk->flags |= NETLINK_RECV_NO_ENOBUFS; clear_bit(0, &nlk->state); wake_up_interruptible(&nlk->wait); } else { nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; } err = 0; break; default: err = -ENOPROTOOPT; } return err; } static int netlink_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); int len, val, err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case NETLINK_PKTINFO: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; case NETLINK_BROADCAST_ERROR: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; case NETLINK_NO_ENOBUFS: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; default: err = -ENOPROTOOPT; } return err; } static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct nl_pktinfo info; info.group = NETLINK_CB(skb).dst_group; put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); } static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *addr = msg->msg_name; u32 dst_pid; u32 dst_group; struct sk_buff *skb; int err; struct scm_cookie scm; if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; if (NULL == siocb->scm) siocb->scm = &scm; err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; if (msg->msg_namelen) { err = -EINVAL; if (addr->nl_family != AF_NETLINK) goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; } if (!nlk->pid) { err = netlink_autobind(sock); if (err) goto out; } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; NETLINK_CB(skb).pid = nlk->pid; NETLINK_CB(skb).dst_group = dst_group; memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); goto out; } err = security_netlink_send(sk, skb); if (err) { kfree_skb(skb); goto out; } if (dst_group) { atomic_inc(&skb->users); netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: scm_destroy(siocb->scm); return err; } static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct scm_cookie scm; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); int noblock = flags&MSG_DONTWAIT; size_t copied; struct sk_buff *skb, *data_skb; int err, ret; if (flags&MSG_OOB) return -EOPNOTSUPP; copied = 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (skb == NULL) goto out; data_skb = skb; #ifdef CONFIG_COMPAT_NETLINK_MESSAGES if (unlikely(skb_shinfo(skb)->frag_list)) { /* * If this skb has a frag_list, then here that means that we * will have to use the frag_list skb's data for compat tasks * and the regular skb's data for normal (non-compat) tasks. * * If we need to send the compat skb, assign it to the * 'data_skb' variable so that it will be used below for data * copying. We keep 'skb' for everything else, including * freeing both later. */ if (flags & MSG_CMSG_COMPAT) data_skb = skb_shinfo(skb)->frag_list; } #endif msg->msg_namelen = 0; copied = data_skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(data_skb); err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; addr->nl_family = AF_NETLINK; addr->nl_pad = 0; addr->nl_pid = NETLINK_CB(skb).pid; addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); msg->msg_namelen = sizeof(*addr); } if (nlk->flags & NETLINK_RECV_PKTINFO) netlink_cmsg_recv_pktinfo(msg, skb); if (NULL == siocb->scm) { memset(&scm, 0, sizeof(scm)); siocb->scm = &scm; } siocb->scm->creds = *NETLINK_CREDS(skb); if (flags & MSG_TRUNC) copied = data_skb->len; skb_free_datagram(sk, skb); if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { sk->sk_err = ret; sk->sk_error_report(sk); } } scm_recv(sock, msg, siocb->scm, flags); out: netlink_rcv_wake(sk); return err ? : copied; } static void netlink_data_ready(struct sock *sk, int len) { BUG(); } /* * We export these functions to other modules. They provide a * complete set of kernel non-blocking support for message * queueing. */ struct sock * netlink_kernel_create(struct net *net, int unit, struct module *module, struct netlink_kernel_cfg *cfg) { struct socket *sock; struct sock *sk; struct netlink_sock *nlk; struct listeners *listeners = NULL; struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL; unsigned int groups; BUG_ON(!nl_table); if (unit < 0 || unit >= MAX_LINKS) return NULL; if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) return NULL; /* * We have to just have a reference on the net from sk, but don't * get_net it. Besides, we cannot get and then put the net here. * So we create one inside init_net and the move it to net. */ if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0) goto out_sock_release_nosk; sk = sock->sk; sk_change_net(sk, net); if (!cfg || cfg->groups < 32) groups = 32; else groups = cfg->groups; listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); if (!listeners) goto out_sock_release; sk->sk_data_ready = netlink_data_ready; if (cfg && cfg->input) nlk_sk(sk)->netlink_rcv = cfg->input; if (netlink_insert(sk, net, 0)) goto out_sock_release; nlk = nlk_sk(sk); nlk->flags |= NETLINK_KERNEL_SOCKET; netlink_table_grab(); if (!nl_table[unit].registered) { nl_table[unit].groups = groups; rcu_assign_pointer(nl_table[unit].listeners, listeners); nl_table[unit].cb_mutex = cb_mutex; nl_table[unit].module = module; nl_table[unit].bind = cfg ? cfg->bind : NULL; nl_table[unit].registered = 1; } else { kfree(listeners); nl_table[unit].registered++; } netlink_table_ungrab(); return sk; out_sock_release: kfree(listeners); netlink_kernel_release(sk); return NULL; out_sock_release_nosk: sock_release(sock); return NULL; } EXPORT_SYMBOL(netlink_kernel_create); void netlink_kernel_release(struct sock *sk) { sk_release_kernel(sk); } EXPORT_SYMBOL(netlink_kernel_release); int __netlink_change_ngroups(struct sock *sk, unsigned int groups) { struct listeners *new, *old; struct netlink_table *tbl = &nl_table[sk->sk_protocol]; if (groups < 32) groups = 32; if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); if (!new) return -ENOMEM; old = rcu_dereference_protected(tbl->listeners, 1); memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); rcu_assign_pointer(tbl->listeners, new); kfree_rcu(old, rcu); } tbl->groups = groups; return 0; } /** * netlink_change_ngroups - change number of multicast groups * * This changes the number of multicast groups that are available * on a certain netlink family. Note that it is not possible to * change the number of groups to below 32. Also note that it does * not implicitly call netlink_clear_multicast_users() when the * number of groups is reduced. * * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). * @groups: The new number of groups. */ int netlink_change_ngroups(struct sock *sk, unsigned int groups) { int err; netlink_table_grab(); err = __netlink_change_ngroups(sk, groups); netlink_table_ungrab(); return err; } void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) { struct sock *sk; struct hlist_node *node; struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; sk_for_each_bound(sk, node, &tbl->mc_list) netlink_update_socket_mc(nlk_sk(sk), group, 0); } /** * netlink_clear_multicast_users - kick off multicast listeners * * This function removes all listeners from the given group. * @ksk: The kernel netlink socket, as returned by * netlink_kernel_create(). * @group: The multicast group to clear. */ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) { netlink_table_grab(); __netlink_clear_multicast_users(ksk, group); netlink_table_ungrab(); } void netlink_set_nonroot(int protocol, unsigned int flags) { if ((unsigned int)protocol < MAX_LINKS) nl_table[protocol].nl_nonroot = flags; } EXPORT_SYMBOL(netlink_set_nonroot); struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) { struct nlmsghdr *nlh; int size = NLMSG_LENGTH(len); nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); nlh->nlmsg_type = type; nlh->nlmsg_len = size; nlh->nlmsg_flags = flags; nlh->nlmsg_pid = pid; nlh->nlmsg_seq = seq; if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); return nlh; } EXPORT_SYMBOL(__nlmsg_put); /* * It looks a bit ugly. * It would be better to create kernel thread. */ static int netlink_dump(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); struct netlink_callback *cb; struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int len, err = -ENOBUFS; int alloc_size; mutex_lock(nlk->cb_mutex); cb = nlk->cb; if (cb == NULL) { err = -EINVAL; goto errout_skb; } alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL); if (!skb) goto errout_skb; len = cb->dump(skb, cb); if (len > 0) { mutex_unlock(nlk->cb_mutex); if (sk_filter(sk, skb)) kfree_skb(skb); else __netlink_sendskb(sk, skb); return 0; } nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); if (!nlh) goto errout_skb; nl_dump_check_consistent(cb, nlh); memcpy(nlmsg_data(nlh), &len, sizeof(len)); if (sk_filter(sk, skb)) kfree_skb(skb); else __netlink_sendskb(sk, skb); if (cb->done) cb->done(cb); nlk->cb = NULL; mutex_unlock(nlk->cb_mutex); netlink_consume_callback(cb); return 0; errout_skb: mutex_unlock(nlk->cb_mutex); kfree_skb(skb); return err; } int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control) { struct netlink_callback *cb; struct sock *sk; struct netlink_sock *nlk; int ret; cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (cb == NULL) return -ENOBUFS; cb->dump = control->dump; cb->done = control->done; cb->nlh = nlh; cb->data = control->data; cb->min_dump_alloc = control->min_dump_alloc; atomic_inc(&skb->users); cb->skb = skb; sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid); if (sk == NULL) { netlink_destroy_callback(cb); return -ECONNREFUSED; } nlk = nlk_sk(sk); /* A dump is in progress... */ mutex_lock(nlk->cb_mutex); if (nlk->cb) { mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); sock_put(sk); return -EBUSY; } nlk->cb = cb; mutex_unlock(nlk->cb_mutex); ret = netlink_dump(sk); sock_put(sk); if (ret) return ret; /* We successfully started a dump, by returning -EINTR we * signal not to send ACK even if it was requested. */ return -EINTR; } EXPORT_SYMBOL(netlink_dump_start); void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) { struct sk_buff *skb; struct nlmsghdr *rep; struct nlmsgerr *errmsg; size_t payload = sizeof(*errmsg); /* error messages get the original request appened */ if (err) payload += nlmsg_len(nlh); skb = nlmsg_new(payload, GFP_KERNEL); if (!skb) { struct sock *sk; sk = netlink_lookup(sock_net(in_skb->sk), in_skb->sk->sk_protocol, NETLINK_CB(in_skb).pid); if (sk) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); sock_put(sk); } return; } rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = err; memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); } EXPORT_SYMBOL(netlink_ack); int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *)) { struct nlmsghdr *nlh; int err; while (skb->len >= nlmsg_total_size(0)) { int msglen; nlh = nlmsg_hdr(skb); err = 0; if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return 0; /* Only requests are handled by the kernel */ if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) goto ack; /* Skip control messages */ if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; err = cb(skb, nlh); if (err == -EINTR) goto skip; ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) netlink_ack(skb, nlh, err); skip: msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) msglen = skb->len; skb_pull(skb, msglen); } return 0; } EXPORT_SYMBOL(netlink_rcv_skb); /** * nlmsg_notify - send a notification netlink message * @sk: netlink socket to use * @skb: notification message * @pid: destination netlink pid for reports or 0 * @group: destination multicast group or 0 * @report: 1 to report back, 0 to disable * @flags: allocation flags */ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, unsigned int group, int report, gfp_t flags) { int err = 0; if (group) { int exclude_pid = 0; if (report) { atomic_inc(&skb->users); exclude_pid = pid; } /* errors reported via destination sk->sk_err, but propagate * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ err = nlmsg_multicast(sk, skb, exclude_pid, group, flags); } if (report) { int err2; err2 = nlmsg_unicast(sk, skb, pid); if (!err || err == -ESRCH) err = err2; } return err; } EXPORT_SYMBOL(nlmsg_notify); #ifdef CONFIG_PROC_FS struct nl_seq_iter { struct seq_net_private p; int link; int hash_idx; }; static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) { struct nl_seq_iter *iter = seq->private; int i, j; struct sock *s; struct hlist_node *node; loff_t off = 0; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; for (j = 0; j <= hash->mask; j++) { sk_for_each(s, node, &hash->table[j]) { if (sock_net(s) != seq_file_net(seq)) continue; if (off == pos) { iter->link = i; iter->hash_idx = j; return s; } ++off; } } } return NULL; } static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) __acquires(nl_table_lock) { read_lock(&nl_table_lock); return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *s; struct nl_seq_iter *iter; int i, j; ++*pos; if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); iter = seq->private; s = v; do { s = sk_next(s); } while (s && sock_net(s) != seq_file_net(seq)); if (s) return s; i = iter->link; j = iter->hash_idx + 1; do { struct nl_pid_hash *hash = &nl_table[i].hash; for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); while (s && sock_net(s) != seq_file_net(seq)) s = sk_next(s); if (s) { iter->link = i; iter->hash_idx = j; return s; } } j = 0; } while (++i < MAX_LINKS); return NULL; } static void netlink_seq_stop(struct seq_file *seq, void *v) __releases(nl_table_lock) { read_unlock(&nl_table_lock); } static int netlink_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk Eth Pid Groups " "Rmem Wmem Dump Locks Drops Inode\n"); } else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->pid, nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), nlk->cb, atomic_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) ); } return 0; } static const struct seq_operations netlink_seq_ops = { .start = netlink_seq_start, .next = netlink_seq_next, .stop = netlink_seq_stop, .show = netlink_seq_show, }; static int netlink_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &netlink_seq_ops, sizeof(struct nl_seq_iter)); } static const struct file_operations netlink_seq_fops = { .owner = THIS_MODULE, .open = netlink_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif int netlink_register_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&netlink_chain, nb); } EXPORT_SYMBOL(netlink_register_notifier); int netlink_unregister_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netlink_chain, nb); } EXPORT_SYMBOL(netlink_unregister_notifier); static const struct proto_ops netlink_ops = { .family = PF_NETLINK, .owner = THIS_MODULE, .release = netlink_release, .bind = netlink_bind, .connect = netlink_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = netlink_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = netlink_setsockopt, .getsockopt = netlink_getsockopt, .sendmsg = netlink_sendmsg, .recvmsg = netlink_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family netlink_family_ops = { .family = PF_NETLINK, .create = netlink_create, .owner = THIS_MODULE, /* for consistency 8) */ }; static int __net_init netlink_net_init(struct net *net) { #ifdef CONFIG_PROC_FS if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) return -ENOMEM; #endif return 0; } static void __net_exit netlink_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS proc_net_remove(net, "netlink"); #endif } static void __init netlink_add_usersock_entry(void) { struct listeners *listeners; int groups = 32; listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); if (!listeners) panic("netlink_add_usersock_entry: Cannot allocate listeners\n"); netlink_table_grab(); nl_table[NETLINK_USERSOCK].groups = groups; rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].registered = 1; netlink_table_ungrab(); } static struct pernet_operations __net_initdata netlink_net_ops = { .init = netlink_net_init, .exit = netlink_net_exit, }; static int __init netlink_proto_init(void) { struct sk_buff *dummy_skb; int i; unsigned long limit; unsigned int order; int err = proto_register(&netlink_proto, 0); if (err != 0) goto out; BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)); nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); if (!nl_table) goto panic; if (totalram_pages >= (128 * 1024)) limit = totalram_pages >> (21 - PAGE_SHIFT); else limit = totalram_pages >> (23 - PAGE_SHIFT); order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; limit = (1UL << order) / sizeof(struct hlist_head); order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table)); if (!hash->table) { while (i-- > 0) nl_pid_hash_free(nl_table[i].hash.table, 1 * sizeof(*hash->table)); kfree(nl_table); goto panic; } hash->max_shift = order; hash->shift = 0; hash->mask = 0; hash->rehash_time = jiffies; } netlink_add_usersock_entry(); sock_register(&netlink_family_ops); register_pernet_subsys(&netlink_net_ops); /* The netlink device handler may be needed early. */ rtnetlink_init(); out: return err; panic: panic("netlink_init: Cannot allocate nl_table\n"); } core_initcall(netlink_proto_init);
./CrossVul/dataset_final_sorted/CWE-284/c/bad_3852_0
crossvul-cpp_data_good_880_1
/* * Copyright (C) 2014-2019 Firejail Authors * * This file is part of firejail project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "firejail.h" #include "../include/ldd_utils.h" #include <sys/mount.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <dirent.h> #include <glob.h> #define MAXBUF 4096 extern void fslib_install_stdc(void); extern void fslib_install_system(void); static int lib_cnt = 0; static int dir_cnt = 0; static void report_duplication(const char *full_path) { char *fname = strrchr(full_path, '/'); if (fname && *(++fname) != '\0') { // report the file on all bin paths int i = 0; while (default_lib_paths[i]) { char *p; if (asprintf(&p, "%s/%s", default_lib_paths[i], fname) == -1) errExit("asprintf"); fs_logger2("clone", p); free(p); i++; } } } static char *build_dest_dir(const char *full_path) { assert(full_path); if (strstr(full_path, "/x86_64-linux-gnu/")) return RUN_LIB_DIR "/x86_64-linux-gnu"; return RUN_LIB_DIR; } // copy fname in private_run_dir void fslib_duplicate(const char *full_path) { assert(full_path); struct stat s; if (stat(full_path, &s) != 0 || s.st_uid != 0 || access(full_path, R_OK)) return; char *dest_dir = build_dest_dir(full_path); // don't copy it if the file is already there char *ptr = strrchr(full_path, '/'); if (!ptr) return; ptr++; if (*ptr == '\0') return; char *name; if (asprintf(&name, "%s/%s", dest_dir, ptr) == -1) errExit("asprintf"); if (stat(name, &s) == 0) { free(name); return; } free(name); if (arg_debug || arg_debug_private_lib) printf(" copying %s to private %s\n", full_path, dest_dir); sbox_run(SBOX_ROOT| SBOX_SECCOMP, 4, PATH_FCOPY, "--follow-link", full_path, dest_dir); report_duplication(full_path); lib_cnt++; } // requires full path for lib // it could be a library or an executable // lib is not copied, only libraries used by it void fslib_copy_libs(const char *full_path) { assert(full_path); if (arg_debug || arg_debug_private_lib) printf(" fslib_copy_libs %s\n", full_path); // if library/executable does not exist or the user does not have read access to it // print a warning and exit the function. if (access(full_path, R_OK)) { if (arg_debug || arg_debug_private_lib) printf("cannot find %s for private-lib, skipping...\n", full_path); return; } // create an empty RUN_LIB_FILE and allow the user to write to it unlink(RUN_LIB_FILE); // in case is there create_empty_file_as_root(RUN_LIB_FILE, 0644); if (chown(RUN_LIB_FILE, getuid(), getgid())) errExit("chown"); // run fldd to extract the list of files if (arg_debug || arg_debug_private_lib) printf(" running fldd %s\n", full_path); sbox_run(SBOX_USER | SBOX_SECCOMP | SBOX_CAPS_NONE, 3, PATH_FLDD, full_path, RUN_LIB_FILE); // open the list of libraries and install them on by one FILE *fp = fopen(RUN_LIB_FILE, "r"); if (!fp) errExit("fopen"); char buf[MAXBUF]; while (fgets(buf, MAXBUF, fp)) { // remove \n char *ptr = strchr(buf, '\n'); if (ptr) *ptr = '\0'; fslib_duplicate(buf); } fclose(fp); unlink(RUN_LIB_FILE); } void fslib_copy_dir(const char *full_path) { assert(full_path); if (arg_debug || arg_debug_private_lib) printf(" fslib_copy_dir %s\n", full_path); // do nothing if the directory does not exist or is not owned by root struct stat s; if (stat(full_path, &s) != 0 || s.st_uid != 0 || !S_ISDIR(s.st_mode) || access(full_path, R_OK)) return; char *dir_name = strrchr(full_path, '/'); assert(dir_name); dir_name++; assert(*dir_name != '\0'); // do nothing if the directory is already there char *dest; if (asprintf(&dest, "%s/%s", build_dest_dir(full_path), dir_name) == -1) errExit("asprintf"); if (stat(dest, &s) == 0) { free(dest); return; } // create new directory and mount the original on top of it mkdir_attr(dest, 0755, 0, 0); if (mount(full_path, dest, NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, dest, NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("clone", full_path); fs_logger2("mount", full_path); dir_cnt++; free(dest); } // fname should be a vallid full path at this point static void load_library(const char *fname) { assert(fname); assert(*fname == '/'); // existing file owned by root, read access struct stat s; if (stat(fname, &s) == 0 && s.st_uid == 0 && !access(fname, R_OK)) { // load directories, regular 64 bit libraries, and 64 bit executables if (is_dir(fname) || is_lib_64(fname)) { if (is_dir(fname)) fslib_copy_dir(fname); else { if (strstr(fname, ".so") || access(fname, X_OK) != 0) // don't duplicate executables, just install the libraries fslib_duplicate(fname); fslib_copy_libs(fname); } } } } static void install_list_entry(const char *lib) { assert(lib); // filename check int len = strlen(lib); if (strcspn(lib, "\\&!?\"'<>%^(){}[];,") != (size_t)len || strstr(lib, "..")) { fprintf(stderr, "Error: \"%s\" is an invalid library\n", lib); exit(1); } // if this is a full path, use it as is if (*lib == '/') return load_library(lib); // find the library int i; for (i = 0; default_lib_paths[i]; i++) { char *fname = NULL; if (asprintf(&fname, "%s/%s", default_lib_paths[i], lib) == -1) errExit("asprintf"); #define DO_GLOBBING #ifdef DO_GLOBBING // globbing glob_t globbuf; int globerr = glob(fname, GLOB_NOCHECK | GLOB_NOSORT | GLOB_PERIOD, NULL, &globbuf); if (globerr) { fprintf(stderr, "Error: failed to glob private-lib pattern %s\n", fname); exit(1); } size_t j; for (j = 0; j < globbuf.gl_pathc; j++) { assert(globbuf.gl_pathv[j]); //printf("glob %s\n", globbuf.gl_pathv[j]); // GLOB_NOCHECK - no pattern matched returns the original pattern; try to load it anyway load_library(globbuf.gl_pathv[j]); } globfree(&globbuf); #else load_library(fname); #endif free(fname); } // fwarning("%s library not found, skipping...\n", lib); return; } void fslib_install_list(const char *lib_list) { assert(lib_list); if (arg_debug || arg_debug_private_lib) printf(" fslib_install_list %s\n", lib_list); char *dlist = strdup(lib_list); if (!dlist) errExit("strdup"); char *ptr = strtok(dlist, ","); if (!ptr) { fprintf(stderr, "Error: invalid private-lib argument\n"); exit(1); } install_list_entry(ptr); while ((ptr = strtok(NULL, ",")) != NULL) install_list_entry(ptr); free(dlist); fs_logger_print(); } static void mount_directories(void) { if (arg_debug || arg_debug_private_lib) printf("Mount-bind %s on top of /lib /lib64 /usr/lib\n", RUN_LIB_DIR); if (is_dir("/lib")) { if (mount(RUN_LIB_DIR, "/lib", NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, "/lib", NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("tmpfs", "/lib"); fs_logger("mount /lib"); } if (is_dir("/lib64")) { if (mount(RUN_LIB_DIR, "/lib64", NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, "/lib64", NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("tmpfs", "/lib64"); fs_logger("mount /lib64"); } if (is_dir("/usr/lib")) { if (mount(RUN_LIB_DIR, "/usr/lib", NULL, MS_BIND|MS_REC, NULL) < 0 || mount(NULL, "/usr/lib", NULL, MS_BIND|MS_REMOUNT|MS_NOSUID|MS_NODEV|MS_REC, NULL) < 0) errExit("mount bind"); fs_logger2("tmpfs", "/usr/lib"); fs_logger("mount /usr/lib"); } // for amd64 only - we'll deal with i386 later if (is_dir("/lib32")) { if (mount(RUN_RO_DIR, "/lib32", "none", MS_BIND, "mode=400,gid=0") < 0) errExit("disable file"); fs_logger("blacklist-nolog /lib32"); } if (is_dir("/libx32")) { if (mount(RUN_RO_DIR, "/libx32", "none", MS_BIND, "mode=400,gid=0") < 0) errExit("disable file"); fs_logger("blacklist-nolog /libx32"); } } void fs_private_lib(void) { #ifndef __x86_64__ fwarning("private-lib feature is currently available only on amd64 platforms\n"); return; #endif char *private_list = cfg.lib_private_keep; if (arg_debug || arg_debug_private_lib) printf("Starting private-lib processing: program %s, shell %s\n", (cfg.original_program_index > 0)? cfg.original_argv[cfg.original_program_index]: "none", (arg_shell_none)? "none": cfg.shell); // create /run/firejail/mnt/lib directory mkdir_attr(RUN_LIB_DIR, 0755, 0, 0); // install standard C libraries if (arg_debug || arg_debug_private_lib) printf("Installing standard C library\n"); fslib_install_stdc(); // start timetrace timetrace_start(); // copy the libs in the new lib directory for the main exe if (cfg.original_program_index > 0) { if (arg_debug || arg_debug_private_lib) printf("Installing sandboxed program libraries\n"); fslib_install_list(cfg.original_argv[cfg.original_program_index]); } // for the shell if (!arg_shell_none) { if (arg_debug || arg_debug_private_lib) printf("Installing shell libraries\n"); fslib_install_list(cfg.shell); // a shell is useless without some basic commands fslib_install_list("/bin/ls,/bin/cat,/bin/mv,/bin/rm"); } // for the listed libs and directories if (private_list && *private_list != '\0') { if (arg_debug || arg_debug_private_lib) printf("Processing private-lib files\n"); fslib_install_list(private_list); } // for private-bin files if (arg_private_bin && cfg.bin_private_lib && *cfg.bin_private_lib != '\0') { if (arg_debug || arg_debug_private_lib) printf("Processing private-bin files\n"); fslib_install_list(cfg.bin_private_lib); } fmessage("Program libraries installed in %0.2f ms\n", timetrace_end()); // install the reset of the system libraries if (arg_debug || arg_debug_private_lib) printf("Installing system libraries\n"); fslib_install_system(); // bring in firejail directory for --trace and seccomp post exec // bring in firejail executable libraries in case we are redirected here by a firejail symlink from /usr/local/bin/firejail fslib_install_list("/usr/bin/firejail,firejail"); // todo: use the installed path for the executable fmessage("Installed %d %s and %d %s\n", lib_cnt, (lib_cnt == 1)? "library": "libraries", dir_cnt, (dir_cnt == 1)? "directory": "directories"); // mount lib filesystem mount_directories(); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_880_1
crossvul-cpp_data_good_5396_2
/* * The Python Imaging Library * $Id$ * * imaging storage object * * This baseline implementation is designed to efficiently handle * large images, provided they fit into the available memory. * * history: * 1995-06-15 fl Created * 1995-09-12 fl Updated API, compiles silently under ANSI C++ * 1995-11-26 fl Compiles silently under Borland 4.5 as well * 1996-05-05 fl Correctly test status from Prologue * 1997-05-12 fl Increased THRESHOLD (to speed up Tk interface) * 1997-05-30 fl Added support for floating point images * 1997-11-17 fl Added support for "RGBX" images * 1998-01-11 fl Added support for integer images * 1998-03-05 fl Exported Prologue/Epilogue functions * 1998-07-01 fl Added basic "YCrCb" support * 1998-07-03 fl Attach palette in prologue for "P" images * 1998-07-09 hk Don't report MemoryError on zero-size images * 1998-07-12 fl Change "YCrCb" to "YCbCr" (!) * 1998-10-26 fl Added "I;16" and "I;16B" storage modes (experimental) * 1998-12-29 fl Fixed allocation bug caused by previous fix * 1999-02-03 fl Added "RGBa" and "BGR" modes (experimental) * 2001-04-22 fl Fixed potential memory leak in ImagingCopyInfo * 2003-09-26 fl Added "LA" and "PA" modes (experimental) * 2005-10-02 fl Added image counter * * Copyright (c) 1998-2005 by Secret Labs AB * Copyright (c) 1995-2005 by Fredrik Lundh * * See the README file for information on usage and redistribution. */ #include "Imaging.h" #include <string.h> int ImagingNewCount = 0; /* -------------------------------------------------------------------- * Standard image object. */ Imaging ImagingNewPrologueSubtype(const char *mode, int xsize, int ysize, int size) { Imaging im; ImagingSectionCookie cookie; im = (Imaging) calloc(1, size); if (!im) return (Imaging) ImagingError_MemoryError(); /* linesize overflow check, roughly the current largest space req'd */ if (xsize > (INT_MAX / 4) - 1) { return (Imaging) ImagingError_MemoryError(); } /* Setup image descriptor */ im->xsize = xsize; im->ysize = ysize; im->type = IMAGING_TYPE_UINT8; if (strcmp(mode, "1") == 0) { /* 1-bit images */ im->bands = im->pixelsize = 1; im->linesize = xsize; } else if (strcmp(mode, "P") == 0) { /* 8-bit palette mapped images */ im->bands = im->pixelsize = 1; im->linesize = xsize; im->palette = ImagingPaletteNew("RGB"); } else if (strcmp(mode, "PA") == 0) { /* 8-bit palette with alpha */ im->bands = 2; im->pixelsize = 4; /* store in image32 memory */ im->linesize = xsize * 4; im->palette = ImagingPaletteNew("RGB"); } else if (strcmp(mode, "L") == 0) { /* 8-bit greyscale (luminance) images */ im->bands = im->pixelsize = 1; im->linesize = xsize; } else if (strcmp(mode, "LA") == 0) { /* 8-bit greyscale (luminance) with alpha */ im->bands = 2; im->pixelsize = 4; /* store in image32 memory */ im->linesize = xsize * 4; } else if (strcmp(mode, "La") == 0) { /* 8-bit greyscale (luminance) with premultiplied alpha */ im->bands = 2; im->pixelsize = 4; /* store in image32 memory */ im->linesize = xsize * 4; } else if (strcmp(mode, "F") == 0) { /* 32-bit floating point images */ im->bands = 1; im->pixelsize = 4; im->linesize = xsize * 4; im->type = IMAGING_TYPE_FLOAT32; } else if (strcmp(mode, "I") == 0) { /* 32-bit integer images */ im->bands = 1; im->pixelsize = 4; im->linesize = xsize * 4; im->type = IMAGING_TYPE_INT32; } else if (strcmp(mode, "I;16") == 0 || strcmp(mode, "I;16L") == 0 \ || strcmp(mode, "I;16B") == 0 || strcmp(mode, "I;16N") == 0) { /* EXPERIMENTAL */ /* 16-bit raw integer images */ im->bands = 1; im->pixelsize = 2; im->linesize = xsize * 2; im->type = IMAGING_TYPE_SPECIAL; } else if (strcmp(mode, "RGB") == 0) { /* 24-bit true colour images */ im->bands = 3; im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "BGR;15") == 0) { /* EXPERIMENTAL */ /* 15-bit true colour */ im->bands = 1; im->pixelsize = 2; im->linesize = (xsize*2 + 3) & -4; im->type = IMAGING_TYPE_SPECIAL; } else if (strcmp(mode, "BGR;16") == 0) { /* EXPERIMENTAL */ /* 16-bit reversed true colour */ im->bands = 1; im->pixelsize = 2; im->linesize = (xsize*2 + 3) & -4; im->type = IMAGING_TYPE_SPECIAL; } else if (strcmp(mode, "BGR;24") == 0) { /* EXPERIMENTAL */ /* 24-bit reversed true colour */ im->bands = 1; im->pixelsize = 3; im->linesize = (xsize*3 + 3) & -4; im->type = IMAGING_TYPE_SPECIAL; } else if (strcmp(mode, "BGR;32") == 0) { /* EXPERIMENTAL */ /* 32-bit reversed true colour */ im->bands = 1; im->pixelsize = 4; im->linesize = (xsize*4 + 3) & -4; im->type = IMAGING_TYPE_SPECIAL; } else if (strcmp(mode, "RGBX") == 0) { /* 32-bit true colour images with padding */ im->bands = im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "RGBA") == 0) { /* 32-bit true colour images with alpha */ im->bands = im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "RGBa") == 0) { /* EXPERIMENTAL */ /* 32-bit true colour images with premultiplied alpha */ im->bands = im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "CMYK") == 0) { /* 32-bit colour separation */ im->bands = im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "YCbCr") == 0) { /* 24-bit video format */ im->bands = 3; im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "LAB") == 0) { /* 24-bit color, luminance, + 2 color channels */ /* L is uint8, a,b are int8 */ im->bands = 3; im->pixelsize = 4; im->linesize = xsize * 4; } else if (strcmp(mode, "HSV") == 0) { /* 24-bit color, luminance, + 2 color channels */ /* L is uint8, a,b are int8 */ im->bands = 3; im->pixelsize = 4; im->linesize = xsize * 4; } else { free(im); return (Imaging) ImagingError_ValueError("unrecognized mode"); } /* Setup image descriptor */ strcpy(im->mode, mode); ImagingSectionEnter(&cookie); /* Pointer array (allocate at least one line, to avoid MemoryError exceptions on platforms where calloc(0, x) returns NULL) */ im->image = (char **) calloc((ysize > 0) ? ysize : 1, sizeof(void *)); ImagingSectionLeave(&cookie); if (!im->image) { free(im); return (Imaging) ImagingError_MemoryError(); } ImagingNewCount++; return im; } Imaging ImagingNewPrologue(const char *mode, int xsize, int ysize) { return ImagingNewPrologueSubtype( mode, xsize, ysize, sizeof(struct ImagingMemoryInstance) ); } Imaging ImagingNewEpilogue(Imaging im) { /* If the raster data allocator didn't setup a destructor, assume that it couldn't allocate the required amount of memory. */ if (!im->destroy) return (Imaging) ImagingError_MemoryError(); /* Initialize alias pointers to pixel data. */ switch (im->pixelsize) { case 1: case 2: case 3: im->image8 = (UINT8 **) im->image; break; case 4: im->image32 = (INT32 **) im->image; break; } return im; } void ImagingDelete(Imaging im) { if (!im) return; if (im->palette) ImagingPaletteDelete(im->palette); if (im->destroy) im->destroy(im); if (im->image) free(im->image); free(im); } /* Array Storage Type */ /* ------------------ */ /* Allocate image as an array of line buffers. */ static void ImagingDestroyArray(Imaging im) { int y; if (im->image) for (y = 0; y < im->ysize; y++) if (im->image[y]) free(im->image[y]); } Imaging ImagingNewArray(const char *mode, int xsize, int ysize) { Imaging im; ImagingSectionCookie cookie; int y; char* p; im = ImagingNewPrologue(mode, xsize, ysize); if (!im) return NULL; ImagingSectionEnter(&cookie); /* Allocate image as an array of lines */ for (y = 0; y < im->ysize; y++) { /* malloc check linesize checked in prologue */ p = (char *) calloc(1, im->linesize); if (!p) { ImagingDestroyArray(im); break; } im->image[y] = p; } ImagingSectionLeave(&cookie); if (y == im->ysize) im->destroy = ImagingDestroyArray; return ImagingNewEpilogue(im); } /* Block Storage Type */ /* ------------------ */ /* Allocate image as a single block. */ static void ImagingDestroyBlock(Imaging im) { if (im->block) free(im->block); } Imaging ImagingNewBlock(const char *mode, int xsize, int ysize) { Imaging im; Py_ssize_t y, i; im = ImagingNewPrologue(mode, xsize, ysize); if (!im) return NULL; /* We shouldn't overflow, since the threshold defined below says that we're only going to allocate max 4M here before going to the array allocator. Check anyway. */ if (im->linesize && im->ysize > INT_MAX / im->linesize) { /* punt if we're going to overflow */ return NULL; } if (im->ysize * im->linesize <= 0) { /* some platforms return NULL for malloc(0); this fix prevents MemoryError on zero-sized images on such platforms */ im->block = (char *) malloc(1); } else { /* malloc check ok, overflow check above */ im->block = (char *) calloc(im->ysize, im->linesize); } if (im->block) { for (y = i = 0; y < im->ysize; y++) { im->image[y] = im->block + i; i += im->linesize; } im->destroy = ImagingDestroyBlock; } return ImagingNewEpilogue(im); } /* -------------------------------------------------------------------- * Create a new, internally allocated, image. */ #if defined(IMAGING_SMALL_MODEL) #define THRESHOLD 16384L #else #define THRESHOLD (2048*2048*4L) #endif Imaging ImagingNew(const char* mode, int xsize, int ysize) { int bytes; Imaging im; if (strlen(mode) == 1) { if (mode[0] == 'F' || mode[0] == 'I') bytes = 4; else bytes = 1; } else bytes = strlen(mode); /* close enough */ if (xsize < 0 || ysize < 0) { return (Imaging) ImagingError_ValueError("bad image size"); } if ((int64_t) xsize * (int64_t) ysize <= THRESHOLD / bytes) { im = ImagingNewBlock(mode, xsize, ysize); if (im) return im; /* assume memory error; try allocating in array mode instead */ ImagingError_Clear(); } return ImagingNewArray(mode, xsize, ysize); } Imaging ImagingNew2(const char* mode, Imaging imOut, Imaging imIn) { /* allocate or validate output image */ if (imOut) { /* make sure images match */ if (strcmp(imOut->mode, mode) != 0 || imOut->xsize != imIn->xsize || imOut->ysize != imIn->ysize) { return ImagingError_Mismatch(); } } else { /* create new image */ imOut = ImagingNew(mode, imIn->xsize, imIn->ysize); if (!imOut) return NULL; } return imOut; } void ImagingCopyInfo(Imaging destination, Imaging source) { if (source->palette) { if (destination->palette) ImagingPaletteDelete(destination->palette); destination->palette = ImagingPaletteDuplicate(source->palette); } }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5396_2
crossvul-cpp_data_bad_4786_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT U U M M % % Q Q U U A A NN N T U U MM MM % % Q Q U U AAAAA N N N T U U M M M % % Q QQ U U A A N NN T U U M M % % QQQQ UUU A A N N T UUU M M % % % % IIIII M M PPPP OOO RRRR TTTTT % % I MM MM P P O O R R T % % I M M M PPPP O O RRRR T % % I M M P O O R R T % % IIIII M M P OOO R R T % % % % MagickCore Methods to Import Quantum Pixels % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/color-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p o r t Q u a n t u m P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImportQuantumPixels() transfers one or more pixel components from a user % supplied buffer into the image pixel cache of an image. The pixels are % expected in network byte order. It returns MagickTrue if the pixels are % successfully transferred, otherwise MagickFalse. % % The format of the ImportQuantumPixels method is: % % size_t ImportQuantumPixels(const Image *image,CacheView *image_view, % QuantumInfo *quantum_info,const QuantumType quantum_type, % const unsigned char *magick_restrict pixels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image cache view. % % o quantum_info: the quantum info. % % o quantum_type: Declare which pixel components to transfer (red, green, % blue, opacity, RGB, or RGBA). % % o pixels: The pixel components are transferred from this buffer. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(const Image *image,const size_t index, MagickBooleanType *range_exception) { if (index < image->colors) return((Quantum) index); *range_exception=MagickTrue; return((Quantum) 0); } static inline const unsigned char *PushDoublePixel(QuantumInfo *quantum_info, const unsigned char *magick_restrict pixels,double *pixel) { double *p; unsigned char quantum[8]; if (quantum_info->endian == LSBEndian) { quantum[0]=(*pixels++); quantum[1]=(*pixels++); quantum[2]=(*pixels++); quantum[3]=(*pixels++); quantum[4]=(*pixels++); quantum[5]=(*pixels++); quantum[6]=(*pixels++); quantum[7]=(*pixels++); p=(double *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } quantum[7]=(*pixels++); quantum[6]=(*pixels++); quantum[5]=(*pixels++); quantum[4]=(*pixels++); quantum[3]=(*pixels++); quantum[2]=(*pixels++); quantum[1]=(*pixels++); quantum[0]=(*pixels++); p=(double *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } static inline const unsigned char *PushFloatPixel(QuantumInfo *quantum_info, const unsigned char *magick_restrict pixels,float *pixel) { float *p; unsigned char quantum[4]; if (quantum_info->endian == LSBEndian) { quantum[0]=(*pixels++); quantum[1]=(*pixels++); quantum[2]=(*pixels++); quantum[3]=(*pixels++); p=(float *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } quantum[3]=(*pixels++); quantum[2]=(*pixels++); quantum[1]=(*pixels++); quantum[0]=(*pixels++); p=(float *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } static inline const unsigned char *PushQuantumPixel(QuantumInfo *quantum_info, const unsigned char *magick_restrict pixels,unsigned int *quantum) { register ssize_t i; register size_t quantum_bits; *quantum=(QuantumAny) 0; for (i=(ssize_t) quantum_info->depth; i > 0L; ) { if (quantum_info->state.bits == 0UL) { quantum_info->state.pixel=(*pixels++); quantum_info->state.bits=8UL; } quantum_bits=(size_t) i; if (quantum_bits > quantum_info->state.bits) quantum_bits=quantum_info->state.bits; i-=(ssize_t) quantum_bits; quantum_info->state.bits-=quantum_bits; *quantum=(unsigned int) ((*quantum << quantum_bits) | ((quantum_info->state.pixel >> quantum_info->state.bits) &~ ((~0UL) << quantum_bits))); } return(pixels); } static inline const unsigned char *PushQuantumLongPixel( QuantumInfo *quantum_info,const unsigned char *magick_restrict pixels, unsigned int *quantum) { register ssize_t i; register size_t quantum_bits; *quantum=0UL; for (i=(ssize_t) quantum_info->depth; i > 0; ) { if (quantum_info->state.bits == 0) { pixels=PushLongPixel(quantum_info->endian,pixels, &quantum_info->state.pixel); quantum_info->state.bits=32U; } quantum_bits=(size_t) i; if (quantum_bits > quantum_info->state.bits) quantum_bits=quantum_info->state.bits; *quantum|=(((quantum_info->state.pixel >> (32U-quantum_info->state.bits)) & quantum_info->state.mask[quantum_bits]) << (quantum_info->depth-i)); i-=(ssize_t) quantum_bits; quantum_info->state.bits-=quantum_bits; } return(pixels); } static void ImportAlphaQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportBGRQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff,range),q); SetPixelGreen(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff,range), q); SetPixelBlue(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { unsigned short pixel; for (x=0; x < (ssize_t) (3*number_pixels-1); x+=2) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch (x % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+1) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } for (bit=0; bit < (ssize_t) (3*number_pixels % 2); bit++) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+bit) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } if (bit != 0) p++; break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportBGRAQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelAlpha(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportBGROQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelOpacity(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportBlackQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportBlueQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportCbYCrYQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 10: { Quantum cbcr[4]; pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x+=4) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } cbcr[i]=(Quantum) (quantum); n++; } p+=quantum_info->pad; SetPixelRed(image,cbcr[1],q); SetPixelGreen(image,cbcr[0],q); SetPixelBlue(image,cbcr[2],q); q+=GetPixelChannels(image); SetPixelRed(image,cbcr[3],q); SetPixelGreen(image,cbcr[0],q); SetPixelBlue(image,cbcr[2],q); q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportCMYKQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportCMYKAQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportCMYKOQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportGrayQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 1: { register Quantum black, white; black=0; white=QuantumRange; if (quantum_info->min_is_white != MagickFalse) { black=QuantumRange; white=0; } for (x=0; x < ((ssize_t) number_pixels-7); x+=8) { for (bit=0; bit < 8; bit++) { SetPixelGray(image,((*p) & (1 << (7-bit))) == 0 ? black : white,q); q+=GetPixelChannels(image); } p++; } for (bit=0; bit < (ssize_t) (number_pixels % 8); bit++) { SetPixelGray(image,((*p) & (0x01 << (7-bit))) == 0 ? black : white,q); q+=GetPixelChannels(image); } if (bit != 0) p++; break; } case 4: { register unsigned char pixel; range=GetQuantumRange(quantum_info->depth); for (x=0; x < ((ssize_t) number_pixels-1); x+=2) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); pixel=(unsigned char) ((*p) & 0xf); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p++; q+=GetPixelChannels(image); } for (bit=0; bit < (ssize_t) (number_pixels % 2); bit++) { pixel=(unsigned char) (*p++ >> 4); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; if (quantum_info->min_is_white != MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGray(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGray(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { if (image->endian == LSBEndian) { for (x=0; x < (ssize_t) (number_pixels-2); x+=3) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff, range),q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff, range),q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff, range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } p=PushLongPixel(quantum_info->endian,p,&pixel); if (x++ < (ssize_t) (number_pixels-1)) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff, range),q); q+=GetPixelChannels(image); } if (x++ < (ssize_t) number_pixels) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff, range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) (number_pixels-2); x+=3) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff,range), q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff,range), q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff,range), q); p+=quantum_info->pad; q+=GetPixelChannels(image); } p=PushLongPixel(quantum_info->endian,p,&pixel); if (x++ < (ssize_t) (number_pixels-1)) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff, range),q); q+=GetPixelChannels(image); } if (x++ < (ssize_t) number_pixels) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff, range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { unsigned short pixel; for (x=0; x < (ssize_t) (number_pixels-1); x+=2) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } for (bit=0; bit < (ssize_t) (number_pixels % 2); bit++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } if (bit != 0) p++; break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->min_is_white != MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportGrayAlphaQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 1: { register unsigned char pixel; bit=0; for (x=((ssize_t) number_pixels-3); x > 0; x-=4) { for (bit=0; bit < 8; bit+=2) { pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelGray(image,(Quantum) (pixel == 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); q+=GetPixelChannels(image); } p++; } if ((number_pixels % 4) != 0) for (bit=3; bit >= (ssize_t) (4-(number_pixels % 4)); bit-=2) { pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelGray(image,(Quantum) (pixel != 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); q+=GetPixelChannels(image); } if (bit != 0) p++; break; } case 4: { register unsigned char pixel; range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); pixel=(unsigned char) ((*p) & 0xf); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p++; q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGray(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportGreenQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportIndexQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { MagickBooleanType range_exception; register ssize_t x; ssize_t bit; unsigned int pixel; if (image->storage_class != PseudoClass) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColormappedImageRequired","`%s'",image->filename); return; } range_exception=MagickFalse; switch (quantum_info->depth) { case 1: { register unsigned char pixel; for (x=0; x < ((ssize_t) number_pixels-7); x+=8) { for (bit=0; bit < 8; bit++) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception), q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); } p++; } for (bit=0; bit < (ssize_t) (number_pixels % 8); bit++) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); } break; } case 4: { register unsigned char pixel; for (x=0; x < ((ssize_t) number_pixels-1); x+=2) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); pixel=(unsigned char) ((*p) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p++; q+=GetPixelChannels(image); } for (bit=0; bit < (ssize_t) (number_pixels % 2); bit++) { pixel=(unsigned char) ((*p++ >> 4) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum( (double) QuantumRange*HalfToSinglePrecision(pixel)), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum(pixel), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum(pixel), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } if (range_exception != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "InvalidColormapIndex","`%s'",image->filename); } static void ImportIndexAlphaQuantum(const Image *image, QuantumInfo *quantum_info,const MagickSizeType number_pixels, const unsigned char *magick_restrict p,Quantum *magick_restrict q, ExceptionInfo *exception) { MagickBooleanType range_exception; QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; if (image->storage_class != PseudoClass) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColormappedImageRequired","`%s'",image->filename); return; } range_exception=MagickFalse; switch (quantum_info->depth) { case 1: { register unsigned char pixel; for (x=((ssize_t) number_pixels-3); x > 0; x-=4) { for (bit=0; bit < 8; bit+=2) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelGray(image,(Quantum) (pixel == 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); SetPixelIndex(image,(Quantum) (pixel == 0 ? 0 : 1),q); q+=GetPixelChannels(image); } } if ((number_pixels % 4) != 0) for (bit=3; bit >= (ssize_t) (4-(number_pixels % 4)); bit-=2) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelIndex(image,(Quantum) (pixel == 0 ? 0 : 1),q); SetPixelGray(image,(Quantum) (pixel == 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); q+=GetPixelChannels(image); } break; } case 4: { register unsigned char pixel; range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); pixel=(unsigned char) ((*p) & 0xf); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p++; q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum( (double) QuantumRange*HalfToSinglePrecision(pixel)), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image, ClampToQuantum(pixel),&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum(pixel), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } if (range_exception != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "InvalidColormapIndex","`%s'",image->filename); } static void ImportOpacityQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportRedQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportRGBQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff,range),q); SetPixelGreen(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff,range), q); SetPixelBlue(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { unsigned short pixel; for (x=0; x < (ssize_t) (3*number_pixels-1); x+=2) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch (x % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+1) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } for (bit=0; bit < (ssize_t) (3*number_pixels % 2); bit++) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+bit) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } if (bit != 0) p++; break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportRGBAQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelAlpha(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportRGBOQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelOpacity(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } MagickExport size_t ImportQuantumPixels(const Image *image, CacheView *image_view,QuantumInfo *quantum_info, const QuantumType quantum_type,const unsigned char *magick_restrict pixels, ExceptionInfo *exception) { MagickSizeType number_pixels; register const unsigned char *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; size_t extent; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickCoreSignature); if (pixels == (const unsigned char *) NULL) pixels=(const unsigned char *) GetQuantumPixels(quantum_info); x=0; p=pixels; if (image_view == (CacheView *) NULL) { number_pixels=GetImageExtent(image); q=GetAuthenticPixelQueue(image); } else { number_pixels=GetCacheViewExtent(image_view); q=GetCacheViewAuthenticPixelQueue(image_view); } ResetQuantumState(quantum_info); extent=GetQuantumExtent(image,quantum_info,quantum_type); switch (quantum_type) { case AlphaQuantum: { ImportAlphaQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BGRQuantum: { ImportBGRQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BGRAQuantum: { ImportBGRAQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BGROQuantum: { ImportBGROQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BlackQuantum: { ImportBlackQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BlueQuantum: case YellowQuantum: { ImportBlueQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CMYKQuantum: { ImportCMYKQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CMYKAQuantum: { ImportCMYKAQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CMYKOQuantum: { ImportCMYKOQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CbYCrYQuantum: { ImportCbYCrYQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case GrayQuantum: { ImportGrayQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case GrayAlphaQuantum: { ImportGrayAlphaQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case GreenQuantum: case MagentaQuantum: { ImportGreenQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case IndexQuantum: { ImportIndexQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case IndexAlphaQuantum: { ImportIndexAlphaQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case OpacityQuantum: { ImportOpacityQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RedQuantum: case CyanQuantum: { ImportRedQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RGBQuantum: case CbYCrQuantum: { ImportRGBQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RGBAQuantum: case CbYCrAQuantum: { ImportRGBAQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RGBOQuantum: { ImportRGBOQuantum(image,quantum_info,number_pixels,p,q,exception); break; } default: break; } if ((quantum_type == CbYCrQuantum) || (quantum_type == CbYCrAQuantum)) { Quantum quantum; register Quantum *magick_restrict q; q=GetAuthenticPixelQueue(image); if (image_view != (CacheView *) NULL) q=GetCacheViewAuthenticPixelQueue(image_view); for (x=0; x < (ssize_t) number_pixels; x++) { quantum=GetPixelRed(image,q); SetPixelRed(image,GetPixelGreen(image,q),q); SetPixelGreen(image,quantum,q); q+=GetPixelChannels(image); } } if (quantum_info->alpha_type == DisassociatedQuantumAlpha) { double gamma, Sa; register Quantum *magick_restrict q; /* Disassociate alpha. */ q=GetAuthenticPixelQueue(image); if (image_view != (CacheView *) NULL) q=GetCacheViewAuthenticPixelQueue(image_view); for (x=0; x < (ssize_t) number_pixels; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((channel == AlphaPixelChannel) || ((traits & UpdatePixelTrait) == 0)) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } } return(extent); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4786_1
crossvul-cpp_data_bad_4811_0
/* * Copyright (C) the libgit2 contributors. All rights reserved. * * This file is part of libgit2, distributed under the GNU GPL v2 with * a Linking Exception. For full terms see the included COPYING file. */ #ifndef GIT_WINHTTP #include "git2.h" #include "http_parser.h" #include "buffer.h" #include "netops.h" #include "global.h" #include "remote.h" #include "smart.h" #include "auth.h" #include "auth_negotiate.h" #include "tls_stream.h" #include "socket_stream.h" #include "curl_stream.h" git_http_auth_scheme auth_schemes[] = { { GIT_AUTHTYPE_NEGOTIATE, "Negotiate", GIT_CREDTYPE_DEFAULT, git_http_auth_negotiate }, { GIT_AUTHTYPE_BASIC, "Basic", GIT_CREDTYPE_USERPASS_PLAINTEXT, git_http_auth_basic }, }; static const char *upload_pack_service = "upload-pack"; static const char *upload_pack_ls_service_url = "/info/refs?service=git-upload-pack"; static const char *upload_pack_service_url = "/git-upload-pack"; static const char *receive_pack_service = "receive-pack"; static const char *receive_pack_ls_service_url = "/info/refs?service=git-receive-pack"; static const char *receive_pack_service_url = "/git-receive-pack"; static const char *get_verb = "GET"; static const char *post_verb = "POST"; #define OWNING_SUBTRANSPORT(s) ((http_subtransport *)(s)->parent.subtransport) #define PARSE_ERROR_GENERIC -1 #define PARSE_ERROR_REPLAY -2 /** Look at the user field */ #define PARSE_ERROR_EXT -3 #define CHUNK_SIZE 4096 enum last_cb { NONE, FIELD, VALUE }; typedef struct { git_smart_subtransport_stream parent; const char *service; const char *service_url; char *redirect_url; const char *verb; char *chunk_buffer; unsigned chunk_buffer_len; unsigned sent_request : 1, received_response : 1, chunked : 1, redirect_count : 3; } http_stream; typedef struct { git_smart_subtransport parent; transport_smart *owner; git_stream *io; gitno_connection_data connection_data; bool connected; /* Parser structures */ http_parser parser; http_parser_settings settings; gitno_buffer parse_buffer; git_buf parse_header_name; git_buf parse_header_value; char parse_buffer_data[NETIO_BUFSIZE]; char *content_type; char *location; git_vector www_authenticate; enum last_cb last_cb; int parse_error; int error; unsigned parse_finished : 1; /* Authentication */ git_cred *cred; git_cred *url_cred; git_vector auth_contexts; } http_subtransport; typedef struct { http_stream *s; http_subtransport *t; /* Target buffer details from read() */ char *buffer; size_t buf_size; size_t *bytes_read; } parser_context; static bool credtype_match(git_http_auth_scheme *scheme, void *data) { unsigned int credtype = *(unsigned int *)data; return !!(scheme->credtypes & credtype); } static bool challenge_match(git_http_auth_scheme *scheme, void *data) { const char *scheme_name = scheme->name; const char *challenge = (const char *)data; size_t scheme_len; scheme_len = strlen(scheme_name); return (strncasecmp(challenge, scheme_name, scheme_len) == 0 && (challenge[scheme_len] == '\0' || challenge[scheme_len] == ' ')); } static int auth_context_match( git_http_auth_context **out, http_subtransport *t, bool (*scheme_match)(git_http_auth_scheme *scheme, void *data), void *data) { git_http_auth_scheme *scheme = NULL; git_http_auth_context *context = NULL, *c; size_t i; *out = NULL; for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) { if (scheme_match(&auth_schemes[i], data)) { scheme = &auth_schemes[i]; break; } } if (!scheme) return 0; /* See if authentication has already started for this scheme */ git_vector_foreach(&t->auth_contexts, i, c) { if (c->type == scheme->type) { context = c; break; } } if (!context) { if (scheme->init_context(&context, &t->connection_data) < 0) return -1; else if (!context) return 0; else if (git_vector_insert(&t->auth_contexts, context) < 0) return -1; } *out = context; return 0; } static int apply_credentials(git_buf *buf, http_subtransport *t) { git_cred *cred = t->cred; git_http_auth_context *context; /* Apply the credentials given to us in the URL */ if (!cred && t->connection_data.user && t->connection_data.pass) { if (!t->url_cred && git_cred_userpass_plaintext_new(&t->url_cred, t->connection_data.user, t->connection_data.pass) < 0) return -1; cred = t->url_cred; } if (!cred) return 0; /* Get or create a context for the best scheme for this cred type */ if (auth_context_match(&context, t, credtype_match, &cred->credtype) < 0) return -1; return context->next_token(buf, context, cred); } static const char *user_agent(void) { const char *custom = git_libgit2__user_agent(); if (custom) return custom; return "libgit2 " LIBGIT2_VERSION; } static int gen_request( git_buf *buf, http_stream *s, size_t content_length) { http_subtransport *t = OWNING_SUBTRANSPORT(s); const char *path = t->connection_data.path ? t->connection_data.path : "/"; size_t i; git_buf_printf(buf, "%s %s%s HTTP/1.1\r\n", s->verb, path, s->service_url); git_buf_printf(buf, "User-Agent: git/1.0 (%s)\r\n", user_agent()); git_buf_printf(buf, "Host: %s\r\n", t->connection_data.host); if (s->chunked || content_length > 0) { git_buf_printf(buf, "Accept: application/x-git-%s-result\r\n", s->service); git_buf_printf(buf, "Content-Type: application/x-git-%s-request\r\n", s->service); if (s->chunked) git_buf_puts(buf, "Transfer-Encoding: chunked\r\n"); else git_buf_printf(buf, "Content-Length: %"PRIuZ "\r\n", content_length); } else git_buf_puts(buf, "Accept: */*\r\n"); for (i = 0; i < t->owner->custom_headers.count; i++) { if (t->owner->custom_headers.strings[i]) git_buf_printf(buf, "%s\r\n", t->owner->custom_headers.strings[i]); } /* Apply credentials to the request */ if (apply_credentials(buf, t) < 0) return -1; git_buf_puts(buf, "\r\n"); if (git_buf_oom(buf)) return -1; return 0; } static int parse_authenticate_response( git_vector *www_authenticate, http_subtransport *t, int *allowed_types) { git_http_auth_context *context; char *challenge; size_t i; git_vector_foreach(www_authenticate, i, challenge) { if (auth_context_match(&context, t, challenge_match, challenge) < 0) return -1; else if (!context) continue; if (context->set_challenge && context->set_challenge(context, challenge) < 0) return -1; *allowed_types |= context->credtypes; } return 0; } static int on_header_ready(http_subtransport *t) { git_buf *name = &t->parse_header_name; git_buf *value = &t->parse_header_value; if (!strcasecmp("Content-Type", git_buf_cstr(name))) { if (!t->content_type) { t->content_type = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(t->content_type); } } else if (!strcasecmp("WWW-Authenticate", git_buf_cstr(name))) { char *dup = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(dup); git_vector_insert(&t->www_authenticate, dup); } else if (!strcasecmp("Location", git_buf_cstr(name))) { if (!t->location) { t->location = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(t->location); } } return 0; } static int on_header_field(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; /* Both parse_header_name and parse_header_value are populated * and ready for consumption */ if (VALUE == t->last_cb) if (on_header_ready(t) < 0) return t->parse_error = PARSE_ERROR_GENERIC; if (NONE == t->last_cb || VALUE == t->last_cb) git_buf_clear(&t->parse_header_name); if (git_buf_put(&t->parse_header_name, str, len) < 0) return t->parse_error = PARSE_ERROR_GENERIC; t->last_cb = FIELD; return 0; } static int on_header_value(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; assert(NONE != t->last_cb); if (FIELD == t->last_cb) git_buf_clear(&t->parse_header_value); if (git_buf_put(&t->parse_header_value, str, len) < 0) return t->parse_error = PARSE_ERROR_GENERIC; t->last_cb = VALUE; return 0; } static int on_headers_complete(http_parser *parser) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; http_stream *s = ctx->s; git_buf buf = GIT_BUF_INIT; int error = 0, no_callback = 0, allowed_auth_types = 0; /* Both parse_header_name and parse_header_value are populated * and ready for consumption. */ if (VALUE == t->last_cb) if (on_header_ready(t) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Capture authentication headers which may be a 401 (authentication * is not complete) or a 200 (simply informing us that auth *is* * complete.) */ if (parse_authenticate_response(&t->www_authenticate, t, &allowed_auth_types) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Check for an authentication failure. */ if (parser->status_code == 401 && get_verb == s->verb) { if (!t->owner->cred_acquire_cb) { no_callback = 1; } else { if (allowed_auth_types) { if (t->cred) { t->cred->free(t->cred); t->cred = NULL; } error = t->owner->cred_acquire_cb(&t->cred, t->owner->url, t->connection_data.user, allowed_auth_types, t->owner->cred_acquire_payload); if (error == GIT_PASSTHROUGH) { no_callback = 1; } else if (error < 0) { t->error = error; return t->parse_error = PARSE_ERROR_EXT; } else { assert(t->cred); if (!(t->cred->credtype & allowed_auth_types)) { giterr_set(GITERR_NET, "credentials callback returned an invalid cred type"); return t->parse_error = PARSE_ERROR_GENERIC; } /* Successfully acquired a credential. */ t->parse_error = PARSE_ERROR_REPLAY; return 0; } } } if (no_callback) { giterr_set(GITERR_NET, "authentication required but no callback set"); return t->parse_error = PARSE_ERROR_GENERIC; } } /* Check for a redirect. * Right now we only permit a redirect to the same hostname. */ if ((parser->status_code == 301 || parser->status_code == 302 || (parser->status_code == 303 && get_verb == s->verb) || parser->status_code == 307) && t->location) { if (s->redirect_count >= 7) { giterr_set(GITERR_NET, "Too many redirects"); return t->parse_error = PARSE_ERROR_GENERIC; } if (gitno_connection_data_from_url(&t->connection_data, t->location, s->service_url) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Set the redirect URL on the stream. This is a transfer of * ownership of the memory. */ if (s->redirect_url) git__free(s->redirect_url); s->redirect_url = t->location; t->location = NULL; t->connected = 0; s->redirect_count++; t->parse_error = PARSE_ERROR_REPLAY; return 0; } /* Check for a 200 HTTP status code. */ if (parser->status_code != 200) { giterr_set(GITERR_NET, "Unexpected HTTP status code: %d", parser->status_code); return t->parse_error = PARSE_ERROR_GENERIC; } /* The response must contain a Content-Type header. */ if (!t->content_type) { giterr_set(GITERR_NET, "No Content-Type header in response"); return t->parse_error = PARSE_ERROR_GENERIC; } /* The Content-Type header must match our expectation. */ if (get_verb == s->verb) git_buf_printf(&buf, "application/x-git-%s-advertisement", ctx->s->service); else git_buf_printf(&buf, "application/x-git-%s-result", ctx->s->service); if (git_buf_oom(&buf)) return t->parse_error = PARSE_ERROR_GENERIC; if (strcmp(t->content_type, git_buf_cstr(&buf))) { git_buf_free(&buf); giterr_set(GITERR_NET, "Invalid Content-Type: %s", t->content_type); return t->parse_error = PARSE_ERROR_GENERIC; } git_buf_free(&buf); return 0; } static int on_message_complete(http_parser *parser) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; t->parse_finished = 1; return 0; } static int on_body_fill_buffer(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; /* If our goal is to replay the request (either an auth failure or * a redirect) then don't bother buffering since we're ignoring the * content anyway. */ if (t->parse_error == PARSE_ERROR_REPLAY) return 0; if (ctx->buf_size < len) { giterr_set(GITERR_NET, "Can't fit data in the buffer"); return t->parse_error = PARSE_ERROR_GENERIC; } memcpy(ctx->buffer, str, len); *(ctx->bytes_read) += len; ctx->buffer += len; ctx->buf_size -= len; return 0; } static void clear_parser_state(http_subtransport *t) { http_parser_init(&t->parser, HTTP_RESPONSE); gitno_buffer_setup_fromstream(t->io, &t->parse_buffer, t->parse_buffer_data, sizeof(t->parse_buffer_data)); t->last_cb = NONE; t->parse_error = 0; t->parse_finished = 0; git_buf_free(&t->parse_header_name); git_buf_init(&t->parse_header_name, 0); git_buf_free(&t->parse_header_value); git_buf_init(&t->parse_header_value, 0); git__free(t->content_type); t->content_type = NULL; git__free(t->location); t->location = NULL; git_vector_free_deep(&t->www_authenticate); } static int write_chunk(git_stream *io, const char *buffer, size_t len) { git_buf buf = GIT_BUF_INIT; /* Chunk header */ git_buf_printf(&buf, "%" PRIxZ "\r\n", len); if (git_buf_oom(&buf)) return -1; if (git_stream_write(io, buf.ptr, buf.size, 0) < 0) { git_buf_free(&buf); return -1; } git_buf_free(&buf); /* Chunk body */ if (len > 0 && git_stream_write(io, buffer, len, 0) < 0) return -1; /* Chunk footer */ if (git_stream_write(io, "\r\n", 2, 0) < 0) return -1; return 0; } static int http_connect(http_subtransport *t) { int error; char *proxy_url; if (t->connected && http_should_keep_alive(&t->parser) && t->parse_finished) return 0; if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; t->connected = 0; } if (t->connection_data.use_ssl) { error = git_tls_stream_new(&t->io, t->connection_data.host, t->connection_data.port); } else { #ifdef GIT_CURL error = git_curl_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #else error = git_socket_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #endif } if (error < 0) return error; GITERR_CHECK_VERSION(t->io, GIT_STREAM_VERSION, "git_stream"); if (git_stream_supports_proxy(t->io) && !git_remote__get_http_proxy(t->owner->owner, !!t->connection_data.use_ssl, &proxy_url)) { error = git_stream_set_proxy(t->io, proxy_url); git__free(proxy_url); if (error < 0) return error; } error = git_stream_connect(t->io); #if defined(GIT_OPENSSL) || defined(GIT_SECURE_TRANSPORT) || defined(GIT_CURL) if ((!error || error == GIT_ECERTIFICATE) && t->owner->certificate_check_cb != NULL && git_stream_is_encrypted(t->io)) { git_cert *cert; int is_valid; if ((error = git_stream_certificate(&cert, t->io)) < 0) return error; giterr_clear(); is_valid = error != GIT_ECERTIFICATE; error = t->owner->certificate_check_cb(cert, is_valid, t->connection_data.host, t->owner->message_cb_payload); if (error < 0) { if (!giterr_last()) giterr_set(GITERR_NET, "user cancelled certificate check"); return error; } } #endif if (error < 0) return error; t->connected = 1; return 0; } static int http_stream_read( git_smart_subtransport_stream *stream, char *buffer, size_t buf_size, size_t *bytes_read) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); parser_context ctx; size_t bytes_parsed; replay: *bytes_read = 0; assert(t->connected); if (!s->sent_request) { git_buf request = GIT_BUF_INIT; clear_parser_state(t); if (gen_request(&request, s, 0) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) { git_buf_free(&request); return -1; } git_buf_free(&request); s->sent_request = 1; } if (!s->received_response) { if (s->chunked) { assert(s->verb == post_verb); /* Flush, if necessary */ if (s->chunk_buffer_len > 0 && write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; /* Write the final chunk. */ if (git_stream_write(t->io, "0\r\n\r\n", 5, 0) < 0) return -1; } s->received_response = 1; } while (!*bytes_read && !t->parse_finished) { size_t data_offset; int error; /* * Make the parse_buffer think it's as full of data as * the buffer, so it won't try to recv more data than * we can put into it. * * data_offset is the actual data offset from which we * should tell the parser to start reading. */ if (buf_size >= t->parse_buffer.len) { t->parse_buffer.offset = 0; } else { t->parse_buffer.offset = t->parse_buffer.len - buf_size; } data_offset = t->parse_buffer.offset; if (gitno_recv(&t->parse_buffer) < 0) return -1; /* This call to http_parser_execute will result in invocations of the * on_* family of callbacks. The most interesting of these is * on_body_fill_buffer, which is called when data is ready to be copied * into the target buffer. We need to marshal the buffer, buf_size, and * bytes_read parameters to this callback. */ ctx.t = t; ctx.s = s; ctx.buffer = buffer; ctx.buf_size = buf_size; ctx.bytes_read = bytes_read; /* Set the context, call the parser, then unset the context. */ t->parser.data = &ctx; bytes_parsed = http_parser_execute(&t->parser, &t->settings, t->parse_buffer.data + data_offset, t->parse_buffer.offset - data_offset); t->parser.data = NULL; /* If there was a handled authentication failure, then parse_error * will have signaled us that we should replay the request. */ if (PARSE_ERROR_REPLAY == t->parse_error) { s->sent_request = 0; if ((error = http_connect(t)) < 0) return error; goto replay; } if (t->parse_error == PARSE_ERROR_EXT) { return t->error; } if (t->parse_error < 0) return -1; if (bytes_parsed != t->parse_buffer.offset - data_offset) { giterr_set(GITERR_NET, "HTTP parser error: %s", http_errno_description((enum http_errno)t->parser.http_errno)); return -1; } } return 0; } static int http_stream_write_chunked( git_smart_subtransport_stream *stream, const char *buffer, size_t len) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); assert(t->connected); /* Send the request, if necessary */ if (!s->sent_request) { git_buf request = GIT_BUF_INIT; clear_parser_state(t); if (gen_request(&request, s, 0) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) { git_buf_free(&request); return -1; } git_buf_free(&request); s->sent_request = 1; } if (len > CHUNK_SIZE) { /* Flush, if necessary */ if (s->chunk_buffer_len > 0) { if (write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; } /* Write chunk directly */ if (write_chunk(t->io, buffer, len) < 0) return -1; } else { /* Append as much to the buffer as we can */ int count = min(CHUNK_SIZE - s->chunk_buffer_len, len); if (!s->chunk_buffer) s->chunk_buffer = git__malloc(CHUNK_SIZE); memcpy(s->chunk_buffer + s->chunk_buffer_len, buffer, count); s->chunk_buffer_len += count; buffer += count; len -= count; /* Is the buffer full? If so, then flush */ if (CHUNK_SIZE == s->chunk_buffer_len) { if (write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; if (len > 0) { memcpy(s->chunk_buffer, buffer, len); s->chunk_buffer_len = len; } } } return 0; } static int http_stream_write_single( git_smart_subtransport_stream *stream, const char *buffer, size_t len) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); git_buf request = GIT_BUF_INIT; assert(t->connected); if (s->sent_request) { giterr_set(GITERR_NET, "Subtransport configured for only one write"); return -1; } clear_parser_state(t); if (gen_request(&request, s, len) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) goto on_error; if (len && git_stream_write(t->io, buffer, len, 0) < 0) goto on_error; git_buf_free(&request); s->sent_request = 1; return 0; on_error: git_buf_free(&request); return -1; } static void http_stream_free(git_smart_subtransport_stream *stream) { http_stream *s = (http_stream *)stream; if (s->chunk_buffer) git__free(s->chunk_buffer); if (s->redirect_url) git__free(s->redirect_url); git__free(s); } static int http_stream_alloc(http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (!stream) return -1; s = git__calloc(sizeof(http_stream), 1); GITERR_CHECK_ALLOC(s); s->parent.subtransport = &t->parent; s->parent.read = http_stream_read; s->parent.write = http_stream_write_single; s->parent.free = http_stream_free; *stream = (git_smart_subtransport_stream *)s; return 0; } static int http_uploadpack_ls( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = upload_pack_service; s->service_url = upload_pack_ls_service_url; s->verb = get_verb; return 0; } static int http_uploadpack( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = upload_pack_service; s->service_url = upload_pack_service_url; s->verb = post_verb; return 0; } static int http_receivepack_ls( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = receive_pack_service; s->service_url = receive_pack_ls_service_url; s->verb = get_verb; return 0; } static int http_receivepack( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; /* Use Transfer-Encoding: chunked for this request */ s->chunked = 1; s->parent.write = http_stream_write_chunked; s->service = receive_pack_service; s->service_url = receive_pack_service_url; s->verb = post_verb; return 0; } static int http_action( git_smart_subtransport_stream **stream, git_smart_subtransport *subtransport, const char *url, git_smart_service_t action) { http_subtransport *t = (http_subtransport *)subtransport; int ret; if (!stream) return -1; if ((!t->connection_data.host || !t->connection_data.port || !t->connection_data.path) && (ret = gitno_connection_data_from_url(&t->connection_data, url, NULL)) < 0) return ret; if ((ret = http_connect(t)) < 0) return ret; switch (action) { case GIT_SERVICE_UPLOADPACK_LS: return http_uploadpack_ls(t, stream); case GIT_SERVICE_UPLOADPACK: return http_uploadpack(t, stream); case GIT_SERVICE_RECEIVEPACK_LS: return http_receivepack_ls(t, stream); case GIT_SERVICE_RECEIVEPACK: return http_receivepack(t, stream); } *stream = NULL; return -1; } static int http_close(git_smart_subtransport *subtransport) { http_subtransport *t = (http_subtransport *) subtransport; git_http_auth_context *context; size_t i; clear_parser_state(t); if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; } if (t->cred) { t->cred->free(t->cred); t->cred = NULL; } if (t->url_cred) { t->url_cred->free(t->url_cred); t->url_cred = NULL; } git_vector_foreach(&t->auth_contexts, i, context) { if (context->free) context->free(context); } git_vector_clear(&t->auth_contexts); gitno_connection_data_free_ptrs(&t->connection_data); memset(&t->connection_data, 0x0, sizeof(gitno_connection_data)); return 0; } static void http_free(git_smart_subtransport *subtransport) { http_subtransport *t = (http_subtransport *) subtransport; http_close(subtransport); git_vector_free(&t->auth_contexts); git__free(t); } int git_smart_subtransport_http(git_smart_subtransport **out, git_transport *owner, void *param) { http_subtransport *t; GIT_UNUSED(param); if (!out) return -1; t = git__calloc(sizeof(http_subtransport), 1); GITERR_CHECK_ALLOC(t); t->owner = (transport_smart *)owner; t->parent.action = http_action; t->parent.close = http_close; t->parent.free = http_free; t->settings.on_header_field = on_header_field; t->settings.on_header_value = on_header_value; t->settings.on_headers_complete = on_headers_complete; t->settings.on_body = on_body_fill_buffer; t->settings.on_message_complete = on_message_complete; *out = (git_smart_subtransport *) t; return 0; } #endif /* !GIT_WINHTTP */
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4811_0
crossvul-cpp_data_good_1571_8
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* Show databases, tables or columns */ #define SHOW_VERSION "9.10" #include "client_priv.h" #include "my_default.h" #include <my_sys.h> #include <m_string.h> #include <mysql.h> #include <mysqld_error.h> #include <signal.h> #include <stdarg.h> #include <sslopt-vars.h> #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ static char * host=0, *opt_password=0, *user=0; static my_bool opt_show_keys= 0, opt_compress= 0, opt_count=0, opt_status= 0; static my_bool tty_password= 0, opt_table_type= 0; static my_bool debug_info_flag= 0, debug_check_flag= 0; static uint my_end_arg= 0; static uint opt_verbose=0; static char *default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME; static char *opt_plugin_dir= 0, *opt_default_auth= 0; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static uint opt_protocol=0; static char *opt_bind_addr = NULL; static void get_options(int *argc,char ***argv); static uint opt_mysql_port=0; static int list_dbs(MYSQL *mysql,const char *wild); static int list_tables(MYSQL *mysql,const char *db,const char *table); static int list_table_status(MYSQL *mysql,const char *db,const char *table); static int list_fields(MYSQL *mysql,const char *db,const char *table, const char *field); static void print_header(const char *header,uint head_length,...); static void print_row(const char *header,uint head_length,...); static void print_trailer(uint length,...); static void print_res_header(MYSQL_RES *result); static void print_res_top(MYSQL_RES *result); static void print_res_row(MYSQL_RES *result,MYSQL_ROW cur); static const char *load_default_groups[]= { "mysqlshow","client",0 }; static char * opt_mysql_unix_port=0; int main(int argc, char **argv) { int error; my_bool first_argument_uses_wildcards=0; char *wild; MYSQL mysql; MY_INIT(argv[0]); my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,&argc,&argv)) exit(1); my_getopt_use_args_separator= FALSE; get_options(&argc,&argv); wild=0; if (argc) { char *pos= argv[argc-1], *to; for (to= pos ; *pos ; pos++, to++) { switch (*pos) { case '*': *pos= '%'; first_argument_uses_wildcards= 1; break; case '?': *pos= '_'; first_argument_uses_wildcards= 1; break; case '%': case '_': first_argument_uses_wildcards= 1; break; case '\\': pos++; default: break; } *to= *pos; } *to= *pos; /* just to copy a '\0' if '\\' was used */ } if (first_argument_uses_wildcards) wild= argv[--argc]; else if (argc == 3) /* We only want one field */ wild= argv[--argc]; if (argc > 2) { fprintf(stderr,"%s: Too many arguments\n",my_progname); exit(1); } mysql_init(&mysql); if (opt_compress) mysql_options(&mysql,MYSQL_OPT_COMPRESS,NullS); SSL_SET_OPTIONS(&mysql); if (opt_protocol) mysql_options(&mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(&mysql,MYSQL_OPT_BIND,opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(&mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif mysql_options(&mysql, MYSQL_SET_CHARSET_NAME, default_charset); if (opt_plugin_dir && *opt_plugin_dir) mysql_options(&mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(&mysql, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(&mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(&mysql, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqlshow"); if (!(mysql_real_connect(&mysql,host,user,opt_password, (first_argument_uses_wildcards) ? "" : argv[0],opt_mysql_port,opt_mysql_unix_port, 0))) { fprintf(stderr,"%s: %s\n",my_progname,mysql_error(&mysql)); exit(1); } mysql.reconnect= 1; switch (argc) { case 0: error=list_dbs(&mysql,wild); break; case 1: if (opt_status) error=list_table_status(&mysql,argv[0],wild); else error=list_tables(&mysql,argv[0],wild); break; default: if (opt_status && ! wild) error=list_table_status(&mysql,argv[0],argv[1]); else error=list_fields(&mysql,argv[0],argv[1],wild); break; } mysql_close(&mysql); /* Close & free connection */ my_free(opt_password); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif my_end(my_end_arg); exit(error ? 1 : 0); return 0; /* No compiler warnings */ } static struct my_option my_long_options[] = { {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", 'c', "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"count", OPT_COUNT, "Show number of rows per table (may be slow for non-MyISAM tables).", &opt_count, &opt_count, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &host, &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"status", 'i', "Shows a lot of extra information about each table.", &opt_status, &opt_status, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"keys", 'k', "Show keys for table.", &opt_show_keys, &opt_show_keys, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given, it's " "solicited on the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"show-table-type", 't', "Show table type column.", &opt_table_type, &opt_table_type, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"user", 'u', "User for login if not current user.", &user, &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "More verbose output; you can use this multiple times to get even more " "verbose output.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,SHOW_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("Shows the structure of a MySQL database (databases, tables, and columns).\n"); printf("Usage: %s [OPTIONS] [database [table [column]]]\n",my_progname); puts("\n\ If last argument contains a shell or SQL wildcard (*,?,% or _) then only\n\ what\'s matched by the wildcard is shown.\n\ If no database is given then all matching databases are shown.\n\ If no table is given, then all matching tables in database are shown.\n\ If no column is given, then all matching columns and column types in table\n\ are shown."); print_defaults("my",load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch(optid) { case 'v': opt_verbose++; break; case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start=argument; my_free(opt_password); opt_password=my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]=0; /* Cut length of argument */ tty_password= 0; } else tty_password=1; break; case 'W': #ifdef _WIN32 opt_protocol = MYSQL_PROTOCOL_PIPE; #endif break; case OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case '#': DBUG_PUSH(argument ? argument : "d:t:o"); debug_check_flag= 1; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); break; case '?': case 'I': /* Info */ usage(); exit(0); } return 0; } static void get_options(int *argc,char ***argv) { int ho_error; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); if (tty_password) opt_password=get_tty_password(NullS); if (opt_count) { /* We need to set verbose to 2 as we need to change the output to include the number-of-rows column */ opt_verbose= 2; } if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; return; } static int list_dbs(MYSQL *mysql,const char *wild) { const char *header; uint length, counter = 0; ulong rowcount = 0L; char tables[NAME_LEN+1], rows[NAME_LEN+1]; char query[NAME_LEN + 100]; MYSQL_FIELD *field; MYSQL_RES *result; MYSQL_ROW row= NULL, rrow; if (!(result=mysql_list_dbs(mysql,wild))) { fprintf(stderr,"%s: Cannot list databases: %s\n",my_progname, mysql_error(mysql)); return 1; } /* If a wildcard was used, but there was only one row and it's name is an exact match, we'll assume they really wanted to see the contents of that database. This is because it is fairly common for database names to contain the underscore (_), like INFORMATION_SCHEMA. */ if (wild && mysql_num_rows(result) == 1) { row= mysql_fetch_row(result); if (!my_strcasecmp(&my_charset_latin1, row[0], wild)) { mysql_free_result(result); if (opt_status) return list_table_status(mysql, wild, NULL); else return list_tables(mysql, wild, NULL); } } if (wild) printf("Wildcard: %s\n",wild); header="Databases"; length=(uint) strlen(header); field=mysql_fetch_field(result); if (length < field->max_length) length=field->max_length; if (!opt_verbose) print_header(header,length,NullS); else if (opt_verbose == 1) print_header(header,length,"Tables",6,NullS); else print_header(header,length,"Tables",6,"Total Rows",12,NullS); /* The first row may have already been read up above. */ while (row || (row= mysql_fetch_row(result))) { counter++; if (opt_verbose) { if (!(mysql_select_db(mysql,row[0]))) { MYSQL_RES *tresult = mysql_list_tables(mysql,(char*)NULL); if (mysql_affected_rows(mysql) > 0) { sprintf(tables,"%6lu",(ulong) mysql_affected_rows(mysql)); rowcount = 0; if (opt_verbose > 1) { /* Print the count of tables and rows for each database */ MYSQL_ROW trow; while ((trow = mysql_fetch_row(tresult))) { my_snprintf(query, sizeof(query), "SELECT COUNT(*) FROM `%s`", trow[0]); if (!(mysql_query(mysql,query))) { MYSQL_RES *rresult; if ((rresult = mysql_store_result(mysql))) { rrow = mysql_fetch_row(rresult); rowcount += (ulong) strtoull(rrow[0], (char**) 0, 10); mysql_free_result(rresult); } } } sprintf(rows,"%12lu",rowcount); } } else { sprintf(tables,"%6d",0); sprintf(rows,"%12d",0); } mysql_free_result(tresult); } else { my_stpcpy(tables,"N/A"); my_stpcpy(rows,"N/A"); } } if (!opt_verbose) print_row(row[0],length,0); else if (opt_verbose == 1) print_row(row[0],length,tables,6,NullS); else print_row(row[0],length,tables,6,rows,12,NullS); row= NULL; } print_trailer(length, (opt_verbose > 0 ? 6 : 0), (opt_verbose > 1 ? 12 :0), 0); if (counter && opt_verbose) printf("%u row%s in set.\n",counter,(counter > 1) ? "s" : ""); mysql_free_result(result); return 0; } static int list_tables(MYSQL *mysql,const char *db,const char *table) { const char *header; uint head_length, counter = 0; char query[NAME_LEN + 100], rows[NAME_LEN], fields[16]; MYSQL_FIELD *field; MYSQL_RES *result; MYSQL_ROW row, rrow; if (mysql_select_db(mysql,db)) { fprintf(stderr,"%s: Cannot connect to db %s: %s\n",my_progname,db, mysql_error(mysql)); return 1; } if (table) { /* We just hijack the 'rows' variable for a bit to store the escaped table name */ mysql_real_escape_string(mysql, rows, table, (unsigned long)strlen(table)); my_snprintf(query, sizeof(query), "show%s tables like '%s'", opt_table_type ? " full" : "", rows); } else my_snprintf(query, sizeof(query), "show%s tables", opt_table_type ? " full" : ""); if (mysql_query(mysql, query) || !(result= mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot list tables in %s: %s\n",my_progname,db, mysql_error(mysql)); exit(1); } printf("Database: %s",db); if (table) printf(" Wildcard: %s",table); putchar('\n'); header="Tables"; head_length=(uint) strlen(header); field=mysql_fetch_field(result); if (head_length < field->max_length) head_length=field->max_length; if (opt_table_type) { if (!opt_verbose) print_header(header,head_length,"table_type",10,NullS); else if (opt_verbose == 1) print_header(header,head_length,"table_type",10,"Columns",8,NullS); else { print_header(header,head_length,"table_type",10,"Columns",8, "Total Rows",10,NullS); } } else { if (!opt_verbose) print_header(header,head_length,NullS); else if (opt_verbose == 1) print_header(header,head_length,"Columns",8,NullS); else print_header(header,head_length,"Columns",8, "Total Rows",10,NullS); } while ((row = mysql_fetch_row(result))) { counter++; if (opt_verbose > 0) { if (!(mysql_select_db(mysql,db))) { MYSQL_RES *rresult = mysql_list_fields(mysql,row[0],NULL); ulong rowcount=0L; if (!rresult) { my_stpcpy(fields,"N/A"); my_stpcpy(rows,"N/A"); } else { sprintf(fields,"%8u",(uint) mysql_num_fields(rresult)); mysql_free_result(rresult); if (opt_verbose > 1) { /* Print the count of rows for each table */ my_snprintf(query, sizeof(query), "SELECT COUNT(*) FROM `%s`", row[0]); if (!(mysql_query(mysql,query))) { if ((rresult = mysql_store_result(mysql))) { rrow = mysql_fetch_row(rresult); rowcount += (unsigned long) strtoull(rrow[0], (char**) 0, 10); mysql_free_result(rresult); } sprintf(rows,"%10lu",rowcount); } else sprintf(rows,"%10d",0); } } } else { my_stpcpy(fields,"N/A"); my_stpcpy(rows,"N/A"); } } if (opt_table_type) { if (!opt_verbose) print_row(row[0],head_length,row[1],10,NullS); else if (opt_verbose == 1) print_row(row[0],head_length,row[1],10,fields,8,NullS); else print_row(row[0],head_length,row[1],10,fields,8,rows,10,NullS); } else { if (!opt_verbose) print_row(row[0],head_length,NullS); else if (opt_verbose == 1) print_row(row[0],head_length, fields,8, NullS); else print_row(row[0],head_length, fields,8, rows,10, NullS); } } print_trailer(head_length, (opt_table_type ? 10 : opt_verbose > 0 ? 8 : 0), (opt_table_type ? (opt_verbose > 0 ? 8 : 0) : (opt_verbose > 1 ? 10 :0)), !opt_table_type ? 0 : opt_verbose > 1 ? 10 :0, 0); if (counter && opt_verbose) printf("%u row%s in set.\n\n",counter,(counter > 1) ? "s" : ""); mysql_free_result(result); return 0; } static int list_table_status(MYSQL *mysql,const char *db,const char *wild) { char query[NAME_LEN + 100]; int len; MYSQL_RES *result; MYSQL_ROW row; len= sizeof(query); len-= my_snprintf(query, len, "show table status from `%s`", db); if (wild && wild[0] && len) strxnmov(query + strlen(query), len - 1, " like '", wild, "'", NullS); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot get status for db: %s, table: %s: %s\n", my_progname,db,wild ? wild : "",mysql_error(mysql)); if (mysql_errno(mysql) == ER_PARSE_ERROR) fprintf(stderr,"This error probably means that your MySQL server doesn't support the\n\'show table status' command.\n"); return 1; } printf("Database: %s",db); if (wild) printf(" Wildcard: %s",wild); putchar('\n'); print_res_header(result); while ((row=mysql_fetch_row(result))) print_res_row(result,row); print_res_top(result); mysql_free_result(result); return 0; } /* list fields uses field interface as an example of how to parse a MYSQL FIELD */ static int list_fields(MYSQL *mysql,const char *db,const char *table, const char *wild) { char query[NAME_LEN + 100]; int len; MYSQL_RES *result; MYSQL_ROW row; ulong UNINIT_VAR(rows); if (mysql_select_db(mysql,db)) { fprintf(stderr,"%s: Cannot connect to db: %s: %s\n",my_progname,db, mysql_error(mysql)); return 1; } if (opt_count) { my_snprintf(query, sizeof(query), "select count(*) from `%s`", table); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot get record count for db: %s, table: %s: %s\n", my_progname,db,table,mysql_error(mysql)); return 1; } row= mysql_fetch_row(result); rows= (ulong) strtoull(row[0], (char**) 0, 10); mysql_free_result(result); } len= sizeof(query); len-= my_snprintf(query, len, "show /*!32332 FULL */ columns from `%s`", table); if (wild && wild[0] && len) strxnmov(query + strlen(query), len - 1, " like '", wild, "'", NullS); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot list columns in db: %s, table: %s: %s\n", my_progname,db,table,mysql_error(mysql)); return 1; } printf("Database: %s Table: %s", db, table); if (opt_count) printf(" Rows: %lu", rows); if (wild && wild[0]) printf(" Wildcard: %s",wild); putchar('\n'); print_res_header(result); while ((row=mysql_fetch_row(result))) print_res_row(result,row); print_res_top(result); if (opt_show_keys) { my_snprintf(query, sizeof(query), "show keys from `%s`", table); if (mysql_query(mysql,query) || !(result=mysql_store_result(mysql))) { fprintf(stderr,"%s: Cannot list keys in db: %s, table: %s: %s\n", my_progname,db,table,mysql_error(mysql)); return 1; } if (mysql_num_rows(result)) { print_res_header(result); while ((row=mysql_fetch_row(result))) print_res_row(result,row); print_res_top(result); } else puts("Table has no keys"); } mysql_free_result(result); return 0; } /***************************************************************************** General functions to print a nice ascii-table from data *****************************************************************************/ static void print_header(const char *header,uint head_length,...) { va_list args; uint length,i,str_length,pre_space; const char *field; va_start(args,head_length); putchar('+'); field=header; length=head_length; for (;;) { for (i=0 ; i < length+2 ; i++) putchar('-'); putchar('+'); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('\n'); va_start(args,head_length); field=header; length=head_length; putchar('|'); for (;;) { str_length=(uint) strlen(field); if (str_length > length) str_length=length+1; pre_space=(uint) (((int) length-(int) str_length)/2)+1; for (i=0 ; i < pre_space ; i++) putchar(' '); for (i = 0 ; i < str_length ; i++) putchar(field[i]); length=length+2-str_length-pre_space; for (i=0 ; i < length ; i++) putchar(' '); putchar('|'); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('\n'); va_start(args,head_length); putchar('+'); field=header; length=head_length; for (;;) { for (i=0 ; i < length+2 ; i++) putchar('-'); putchar('+'); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('\n'); } static void print_row(const char *header,uint head_length,...) { va_list args; const char *field; uint i,length,field_length; va_start(args,head_length); field=header; length=head_length; for (;;) { putchar('|'); putchar(' '); fputs(field,stdout); field_length=(uint) strlen(field); for (i=field_length ; i <= length ; i++) putchar(' '); if (!(field=va_arg(args,char *))) break; length=va_arg(args,uint); } va_end(args); putchar('|'); putchar('\n'); } static void print_trailer(uint head_length,...) { va_list args; uint length,i; va_start(args,head_length); length=head_length; putchar('+'); for (;;) { for (i=0 ; i < length+2 ; i++) putchar('-'); putchar('+'); if (!(length=va_arg(args,uint))) break; } va_end(args); putchar('\n'); } static void print_res_header(MYSQL_RES *result) { MYSQL_FIELD *field; print_res_top(result); mysql_field_seek(result,0); putchar('|'); while ((field = mysql_fetch_field(result))) { printf(" %-*s|",(int) field->max_length+1,field->name); } putchar('\n'); print_res_top(result); } static void print_res_top(MYSQL_RES *result) { uint i,length; MYSQL_FIELD *field; putchar('+'); mysql_field_seek(result,0); while((field = mysql_fetch_field(result))) { if ((length=(uint) strlen(field->name)) > field->max_length) field->max_length=length; else length=field->max_length; for (i=length+2 ; i--> 0 ; ) putchar('-'); putchar('+'); } putchar('\n'); } static void print_res_row(MYSQL_RES *result,MYSQL_ROW cur) { uint i,length; MYSQL_FIELD *field; putchar('|'); mysql_field_seek(result,0); for (i=0 ; i < mysql_num_fields(result); i++) { field = mysql_fetch_field(result); length=field->max_length; printf(" %-*s|",length+1,cur[i] ? (char*) cur[i] : ""); } putchar('\n'); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_1571_8
crossvul-cpp_data_good_4813_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIIIIIIII PPPPPPPP LL % % II PP PP LL % % II PP PP LL % % II PP PP LL % % II PPPPPPPP LL % % II PP LL % % II PP LL % % IIIIIIIIII PP LLLLLLLL % % % % % % % % Read/Write Scanalytics IPLab Image Format % % Sean Burke % % 2008.05.07 % % v 0.9 % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" /* Typedef declarations. */ typedef struct _IPLInfo { unsigned int tag, size, time, z, width, height, colors, depth, byteType; } IPLInfo; static MagickBooleanType WriteIPLImage(const ImageInfo *,Image *); /* static void increase (void *pixel, int byteType){ switch(byteType){ case 0:(*((unsigned char *) pixel))++; break; case 1:(*((signed int *) pixel))++; break; case 2:(*((unsigned int *) pixel))++; break; case 3:(*((signed long *) pixel))++; break; default:(*((unsigned int *) pixel))++; break; } } */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I P L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsIPL() returns MagickTrue if the image format type, identified by the % magick string, is IPL. % % The format of the IsIPL method is: % % MagickBooleanType IsIPL(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsIPL(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"data",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadIPLImage() reads a Scanalytics IPLab image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % According to the IPLab spec, the data is blocked out in five dimensions: % { t, z, c, y, x }. When we return the image, the latter three are folded % into the standard "Image" structure. The "scenes" (image_info->scene) % correspond to the order: { {t0,z0}, {t0, z1}, ..., {t1,z0}, {t1,z1}... } % The number of scenes is t*z. % % The format of the ReadIPLImage method is: % % Image *ReadIPLImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static void SetHeaderFromIPL(Image *image, IPLInfo *ipl){ image->columns = ipl->width; image->rows = ipl->height; image->depth = ipl->depth; image->x_resolution = 1; image->y_resolution = 1; } static Image *ReadIPLImage(const ImageInfo *image_info,ExceptionInfo *exception) { /* Declare variables. */ Image *image; MagickBooleanType status; register PixelPacket *q; unsigned char magick[12], *pixels; ssize_t count; ssize_t y; size_t t_count=0; size_t length; IPLInfo ipl_info; QuantumFormatType quantum_format; QuantumInfo *quantum_info; QuantumType quantum_type; /* Open Image */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if ( image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read IPL image */ /* Determine endianness If we get back "iiii", we have LSB,"mmmm", MSB */ count=ReadBlob(image,4,magick); (void) count; if((LocaleNCompare((char *) magick,"iiii",4) == 0)) image->endian=LSBEndian; else{ if((LocaleNCompare((char *) magick,"mmmm",4) == 0)) image->endian=MSBEndian; else{ ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } } /* Skip o'er the next 8 bytes (garbage) */ count=ReadBlob(image, 8, magick); /* Excellent, now we read the header unimpeded. */ count=ReadBlob(image,4,magick); if((LocaleNCompare((char *) magick,"data",4) != 0)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); ipl_info.size=ReadBlobLong(image); ipl_info.width=ReadBlobLong(image); ipl_info.height=ReadBlobLong(image); if((ipl_info.width == 0UL) || (ipl_info.height == 0UL)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); ipl_info.colors=ReadBlobLong(image); if(ipl_info.colors == 3){ SetImageColorspace(image,sRGBColorspace);} else { image->colorspace = GRAYColorspace; } ipl_info.z=ReadBlobLong(image); ipl_info.time=ReadBlobLong(image); ipl_info.byteType=ReadBlobLong(image); /* Initialize Quantum Info */ switch (ipl_info.byteType) { case 0: ipl_info.depth=8; quantum_format = UnsignedQuantumFormat; break; case 1: ipl_info.depth=16; quantum_format = SignedQuantumFormat; break; case 2: ipl_info.depth=16; quantum_format = UnsignedQuantumFormat; break; case 3: ipl_info.depth=32; quantum_format = SignedQuantumFormat; break; case 4: ipl_info.depth=32; quantum_format = FloatingPointQuantumFormat; break; case 5: ipl_info.depth=8; quantum_format = UnsignedQuantumFormat; break; case 6: ipl_info.depth=16; quantum_format = UnsignedQuantumFormat; break; case 10: ipl_info.depth=64; quantum_format = FloatingPointQuantumFormat; break; default: ipl_info.depth=16; quantum_format = UnsignedQuantumFormat; break; } /* Set number of scenes of image */ SetHeaderFromIPL(image, &ipl_info); /* Thats all we need if we are pinging. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } length=image->columns; quantum_type=GetQuantumType(image,exception); do { SetHeaderFromIPL(image, &ipl_info); if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* printf("Length: %.20g, Memory size: %.20g\n", (double) length,(double) image->depth); */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); status=SetQuantumFormat(image,quantum_info,quantum_format); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=GetQuantumPixels(quantum_info); if(image->columns != ipl_info.width){ /* printf("Columns not set correctly! Wanted: %.20g, got: %.20g\n", (double) ipl_info.width, (double) image->columns); */ } /* Covert IPL binary to pixel packets */ if(ipl_info.colors == 1){ for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else{ for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RedQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GreenQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, BlueQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } SetQuantumImageType(image,quantum_type); t_count++; quantum_info = DestroyQuantumInfo(quantum_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if(t_count < ipl_info.z * ipl_info.time){ /* Proceed to next image. */ AcquireNextImage(image_info, image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (t_count < ipl_info.z*ipl_info.time); CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterIPLImage() add attributes for the Scanalytics IPL image format to the % list of supported formats. % % */ ModuleExport size_t RegisterIPLImage(void) { MagickInfo *entry; entry=SetMagickInfo("IPL"); entry->decoder=(DecodeImageHandler *) ReadIPLImage; entry->encoder=(EncodeImageHandler *) WriteIPLImage; entry->magick=(IsImageFormatHandler *) IsIPL; entry->adjoin=MagickTrue; entry->description=ConstantString("IPL Image Sequence"); entry->module=ConstantString("IPL"); entry->endian_support=MagickTrue; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterIPLImage() removes format registrations made by the % IPL module from the list of supported formats. % % The format of the UnregisterIPLImage method is: % % UnregisterIPLImage(void) % */ ModuleExport void UnregisterIPLImage(void) { (void) UnregisterMagickInfo("IPL"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteIPLImage() writes an image to a file in Scanalytics IPLabimage format. % % The format of the WriteIPLImage method is: % % MagickBooleanType WriteIPLImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: The image info. % % o image: The image. % */ static MagickBooleanType WriteIPLImage(const ImageInfo *image_info,Image *image) { ExceptionInfo *exception; IPLInfo ipl_info; MagickBooleanType status; MagickOffsetType scene; register const PixelPacket *p; QuantumInfo *quantum_info; ssize_t y; unsigned char *pixels; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); scene=0; quantum_info=AcquireQuantumInfo(image_info, image); if (quantum_info == (QuantumInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); if ((quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,&image->exception) != MagickFalse)) SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); switch(quantum_info->depth){ case 8: ipl_info.byteType = 0; break; case 16: if(quantum_info->format == SignedQuantumFormat){ ipl_info.byteType = 2; } else{ ipl_info.byteType = 1; } break; case 32: if(quantum_info->format == FloatingPointQuantumFormat){ ipl_info.byteType = 3; } else{ ipl_info.byteType = 4; } break; case 64: ipl_info.byteType = 10; break; default: ipl_info.byteType = 2; break; } ipl_info.z = (unsigned int) GetImageListLength(image); /* There is no current method for detecting whether we have T or Z stacks */ ipl_info.time = 1; ipl_info.width = (unsigned int) image->columns; ipl_info.height = (unsigned int) image->rows; (void) TransformImageColorspace(image,sRGBColorspace); if(IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { ipl_info.colors = 3; } else{ ipl_info.colors = 1; } ipl_info.size = (unsigned int) (28 + ((image->depth)/8)*ipl_info.height*ipl_info.width*ipl_info.colors*ipl_info.z); /* Ok! Calculations are done. Lets write this puppy down! */ /* Write IPL header. */ /* Shockingly (maybe not if you have used IPLab), IPLab itself CANNOT read MSBEndian files! The reader above can, but they cannot. For compatability reasons, I will leave the code in here, but it is all but useless if you want to use IPLab. */ if(image_info->endian == MSBEndian) (void) WriteBlob(image, 4, (const unsigned char *) "mmmm"); else{ image->endian = LSBEndian; (void) WriteBlob(image, 4, (const unsigned char *) "iiii"); } (void) WriteBlobLong(image, 4); (void) WriteBlob(image, 4, (const unsigned char *) "100f"); (void) WriteBlob(image, 4, (const unsigned char *) "data"); (void) WriteBlobLong(image, ipl_info.size); (void) WriteBlobLong(image, ipl_info.width); (void) WriteBlobLong(image, ipl_info.height); (void) WriteBlobLong(image, ipl_info.colors); if(image_info->adjoin == MagickFalse) (void) WriteBlobLong(image, 1); else (void) WriteBlobLong(image, ipl_info.z); (void) WriteBlobLong(image, ipl_info.time); (void) WriteBlobLong(image, ipl_info.byteType); exception=(&image->exception); do { /* Convert MIFF to IPL raster pixels. */ pixels=GetQuantumPixels(quantum_info); if(ipl_info.colors == 1){ /* Red frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, GrayQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); } } if(ipl_info.colors == 3){ /* Red frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, RedQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); } /* Green frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, GreenQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); } /* Blue frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, BlueQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } quantum_info=DestroyQuantumInfo(quantum_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; }while (image_info->adjoin != MagickFalse); (void) WriteBlob(image, 4, (const unsigned char *) "fini"); (void) WriteBlobLong(image, 0); CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_4813_0
crossvul-cpp_data_good_1571_6
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ /* mysqldump.c - Dump a tables contents and format to an ASCII file ** ** The author's original notes follow :- ** ** AUTHOR: Igor Romanenko (igor@frog.kiev.ua) ** DATE: December 3, 1994 ** WARRANTY: None, expressed, impressed, implied ** or other ** STATUS: Public domain ** Adapted and optimized for MySQL by ** Michael Widenius, Sinisa Milivojevic, Jani Tolonen ** -w --where added 9/10/98 by Jim Faucette ** slave code by David Saez Padros <david@ols.es> ** master/autocommit code by Brian Aker <brian@tangent.org> ** SSL by ** Andrei Errapart <andreie@no.spam.ee> ** Tõnu Samuel <tonu@please.do.not.remove.this.spam.ee> ** XML by Gary Huntress <ghuntress@mediaone.net> 10/10/01, cleaned up ** and adapted to mysqldump 05/11/01 by Jani Tolonen ** Added --single-transaction option 06/06/2002 by Peter Zaitsev ** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov */ #define DUMP_VERSION "10.13" #include <my_global.h> #include <my_sys.h> #include <my_user.h> #include <m_string.h> #include <m_ctype.h> #include <hash.h> #include <stdarg.h> #include "client_priv.h" #include "my_default.h" #include "mysql.h" #include "mysql_version.h" #include "mysqld_error.h" #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ /* Exit codes */ #define EX_USAGE 1 #define EX_MYSQLERR 2 #define EX_CONSCHECK 3 #define EX_EOM 4 #define EX_EOF 5 /* ferror for output file was got */ #define EX_ILLEGAL_TABLE 6 /* index into 'show fields from table' */ #define SHOW_FIELDNAME 0 #define SHOW_TYPE 1 #define SHOW_NULL 2 #define SHOW_DEFAULT 4 #define SHOW_EXTRA 5 /* Size of buffer for dump's select query */ #define QUERY_LENGTH 1536 /* Size of comment buffer. */ #define COMMENT_LENGTH 2048 /* ignore table flags */ #define IGNORE_NONE 0x00 /* no ignore */ #define IGNORE_DATA 0x01 /* don't dump data for this table */ static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value); static ulong find_set(TYPELIB *lib, const char *x, uint length, char **err_pos, uint *err_len); static char *alloc_query_str(ulong size); static void field_escape(DYNAMIC_STRING* in, const char *from); static my_bool verbose= 0, opt_no_create_info= 0, opt_no_data= 0, quick= 1, extended_insert= 1, lock_tables= 1, opt_force= 0, flush_logs= 0, flush_privileges= 0, opt_drop=1,opt_keywords=0,opt_lock=1,opt_compress=0, create_options=1,opt_quoted=0,opt_databases=0, opt_alldbs=0,opt_create_db=0,opt_lock_all_tables=0, opt_set_charset=0, opt_dump_date=1, opt_autocommit=0,opt_disable_keys=1,opt_xml=0, opt_delete_master_logs=0, tty_password=0, opt_single_transaction=0, opt_comments= 0, opt_compact= 0, opt_hex_blob=0, opt_order_by_primary=0, opt_ignore=0, opt_complete_insert= 0, opt_drop_database= 0, opt_replace_into= 0, opt_dump_triggers= 0, opt_routines=0, opt_tz_utc=1, opt_slave_apply= 0, opt_include_master_host_port= 0, opt_events= 0, opt_comments_used= 0, opt_alltspcs=0, opt_notspcs= 0, opt_drop_trigger= 0; static my_bool insert_pat_inited= 0, debug_info_flag= 0, debug_check_flag= 0; static ulong opt_max_allowed_packet, opt_net_buffer_length; static MYSQL mysql_connection,*mysql=0; static DYNAMIC_STRING insert_pat; static char *opt_password=0,*current_user=0, *current_host=0,*path=0,*fields_terminated=0, *lines_terminated=0, *enclosed=0, *opt_enclosed=0, *escaped=0, *where=0, *order_by=0, *opt_compatible_mode_str= 0, *err_ptr= 0, *opt_ignore_error= 0, *log_error_file= NULL; static char **defaults_argv= 0; static char compatible_mode_normal_str[255]; /* Server supports character_set_results session variable? */ static my_bool server_supports_switching_charsets= TRUE; static ulong opt_compatible_mode= 0; #define MYSQL_OPT_MASTER_DATA_EFFECTIVE_SQL 1 #define MYSQL_OPT_MASTER_DATA_COMMENTED_SQL 2 #define MYSQL_OPT_SLAVE_DATA_EFFECTIVE_SQL 1 #define MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL 2 static uint opt_mysql_port= 0, opt_master_data; static uint opt_slave_data; static uint my_end_arg; static char * opt_mysql_unix_port=0; static char *opt_bind_addr = NULL; static int first_error=0; static DYNAMIC_STRING extended_row; #include <sslopt-vars.h> FILE *md_result_file= 0; FILE *stderror_file=0; const char *set_gtid_purged_mode_names[]= {"OFF", "AUTO", "ON", NullS}; static TYPELIB set_gtid_purged_mode_typelib= {array_elements(set_gtid_purged_mode_names) -1, "", set_gtid_purged_mode_names, NULL}; static enum enum_set_gtid_purged_mode { SET_GTID_PURGED_OFF= 0, SET_GTID_PURGED_AUTO =1, SET_GTID_PURGED_ON=2 } opt_set_gtid_purged_mode= SET_GTID_PURGED_AUTO; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static uint opt_protocol= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; DYNAMIC_ARRAY ignore_error; static int parse_ignore_error(); /* Dynamic_string wrapper functions. In this file use these wrappers, they will terminate the process if there is an allocation failure. */ static void init_dynamic_string_checked(DYNAMIC_STRING *str, const char *init_str, uint init_alloc, uint alloc_increment); static void dynstr_append_checked(DYNAMIC_STRING* dest, const char* src); static void dynstr_set_checked(DYNAMIC_STRING *str, const char *init_str); static void dynstr_append_mem_checked(DYNAMIC_STRING *str, const char *append, uint length); static void dynstr_realloc_checked(DYNAMIC_STRING *str, ulong additional_size); /* Constant for detection of default value of default_charset. If default_charset is equal to mysql_universal_client_charset, then it is the default value which assigned at the very beginning of main(). */ static const char *mysql_universal_client_charset= MYSQL_UNIVERSAL_CLIENT_CHARSET; static char *default_charset; static CHARSET_INFO *charset_info= &my_charset_latin1; const char *default_dbug_option="d:t:o,/tmp/mysqldump.trace"; /* have we seen any VIEWs during table scanning? */ my_bool seen_views= 0; const char *compatible_mode_names[]= { "MYSQL323", "MYSQL40", "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "ANSI", NullS }; #define MASK_ANSI_QUOTES \ (\ (1<<2) | /* POSTGRESQL */\ (1<<3) | /* ORACLE */\ (1<<4) | /* MSSQL */\ (1<<5) | /* DB2 */\ (1<<6) | /* MAXDB */\ (1<<10) /* ANSI */\ ) TYPELIB compatible_mode_typelib= {array_elements(compatible_mode_names) - 1, "", compatible_mode_names, NULL}; HASH ignore_table; static struct my_option my_long_options[] = { {"all-databases", 'A', "Dump all the databases. This will be same as --databases with all databases selected.", &opt_alldbs, &opt_alldbs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"all-tablespaces", 'Y', "Dump all the tablespaces.", &opt_alltspcs, &opt_alltspcs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-tablespaces", 'y', "Do not dump any tablespace information.", &opt_notspcs, &opt_notspcs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"add-drop-database", OPT_DROP_DATABASE, "Add a DROP DATABASE before each create.", &opt_drop_database, &opt_drop_database, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"add-drop-table", OPT_DROP, "Add a DROP TABLE before each create.", &opt_drop, &opt_drop, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"add-drop-trigger", 0, "Add a DROP TRIGGER before each create.", &opt_drop_trigger, &opt_drop_trigger, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"add-locks", OPT_LOCKS, "Add locks around INSERT statements.", &opt_lock, &opt_lock, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"allow-keywords", OPT_KEYWORDS, "Allow creation of column names that are keywords.", &opt_keywords, &opt_keywords, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"apply-slave-statements", OPT_MYSQLDUMP_SLAVE_APPLY, "Adds 'STOP SLAVE' prior to 'CHANGE MASTER' and 'START SLAVE' to bottom of dump.", &opt_slave_apply, &opt_slave_apply, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"comments", 'i', "Write additional information.", &opt_comments, &opt_comments, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"compatible", OPT_COMPATIBLE, "Change the dump to be compatible with a given mode. By default tables " "are dumped in a format optimized for MySQL. Legal modes are: ansi, " "mysql323, mysql40, postgresql, oracle, mssql, db2, maxdb, no_key_options, " "no_table_options, no_field_options. One can use several modes separated " "by commas. Note: Requires MySQL server version 4.1.0 or higher. " "This option is ignored with earlier server versions.", &opt_compatible_mode_str, &opt_compatible_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compact", OPT_COMPACT, "Give less verbose output (useful for debugging). Disables structure " "comments and header/footer constructs. Enables options --skip-add-" "drop-table --skip-add-locks --skip-comments --skip-disable-keys " "--skip-set-charset.", &opt_compact, &opt_compact, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"complete-insert", 'c', "Use complete insert statements.", &opt_complete_insert, &opt_complete_insert, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"create-options", 'a', "Include all MySQL specific create options.", &create_options, &create_options, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"databases", 'B', "Dump several databases. Note the difference in usage; in this case no tables are given. All name arguments are regarded as database names. 'USE db_name;' will be included in the output.", &opt_databases, &opt_databases, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit.", 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log.", &default_dbug_option, &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"delete-master-logs", OPT_DELETE_MASTER_LOGS, "Delete logs on master after backup. This automatically enables --master-data.", &opt_delete_master_logs, &opt_delete_master_logs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"disable-keys", 'K', "'/*!40000 ALTER TABLE tb_name DISABLE KEYS */; and '/*!40000 ALTER " "TABLE tb_name ENABLE KEYS */; will be put in the output.", &opt_disable_keys, &opt_disable_keys, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"dump-slave", OPT_MYSQLDUMP_SLAVE_DATA, "This causes the binary log position and filename of the master to be " "appended to the dumped data output. Setting the value to 1, will print" "it as a CHANGE MASTER command in the dumped data output; if equal" " to 2, that command will be prefixed with a comment symbol. " "This option will turn --lock-all-tables on, unless " "--single-transaction is specified too (in which case a " "global read lock is only taken a short time at the beginning of the dump " "- don't forget to read about --single-transaction below). In all cases " "any action on logs will happen at the exact moment of the dump." "Option automatically turns --lock-tables off.", &opt_slave_data, &opt_slave_data, 0, GET_UINT, OPT_ARG, 0, 0, MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL, 0, 0, 0}, {"events", 'E', "Dump events.", &opt_events, &opt_events, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"extended-insert", 'e', "Use multiple-row INSERT syntax that include several VALUES lists.", &extended_insert, &extended_insert, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"fields-terminated-by", OPT_FTB, "Fields in the output file are terminated by the given string.", &fields_terminated, &fields_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-enclosed-by", OPT_ENC, "Fields in the output file are enclosed by the given character.", &enclosed, &enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0 ,0, 0}, {"fields-optionally-enclosed-by", OPT_O_ENC, "Fields in the output file are optionally enclosed by the given character.", &opt_enclosed, &opt_enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0 ,0, 0}, {"fields-escaped-by", OPT_ESC, "Fields in the output file are escaped by the given character.", &escaped, &escaped, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"flush-logs", 'F', "Flush logs file in server before starting dump. " "Note that if you dump many databases at once (using the option " "--databases= or --all-databases), the logs will be flushed for " "each database dumped. The exception is when using --lock-all-tables " "or --master-data: " "in this case the logs will be flushed only once, corresponding " "to the moment all tables are locked. So if you want your dump and " "the log flush to happen at the same exact moment you should use " "--lock-all-tables or --master-data with --flush-logs.", &flush_logs, &flush_logs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"flush-privileges", OPT_ESC, "Emit a FLUSH PRIVILEGES statement " "after dumping the mysql database. This option should be used any " "time the dump contains the mysql database and any other database " "that depends on the data in the mysql database for proper restore. ", &flush_privileges, &flush_privileges, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Continue even if we get an SQL error.", &opt_force, &opt_force, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"hex-blob", OPT_HEXBLOB, "Dump binary strings (BINARY, " "VARBINARY, BLOB) in hexadecimal format.", &opt_hex_blob, &opt_hex_blob, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &current_host, &current_host, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-error", OPT_MYSQLDUMP_IGNORE_ERROR, "A comma-separated list of " "error numbers to be ignored if encountered during dump.", &opt_ignore_error, &opt_ignore_error, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-table", OPT_IGNORE_TABLE, "Do not dump the specified table. To specify more than one table to ignore, " "use the directive multiple times, once for each table. Each table must " "be specified with both database and table names, e.g., " "--ignore-table=database.table.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"include-master-host-port", OPT_MYSQLDUMP_INCLUDE_MASTER_HOST_PORT, "Adds 'MASTER_HOST=<host>, MASTER_PORT=<port>' to 'CHANGE MASTER TO..' " "in dump produced with --dump-slave.", &opt_include_master_host_port, &opt_include_master_host_port, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"insert-ignore", OPT_INSERT_IGNORE, "Insert rows with INSERT IGNORE.", &opt_ignore, &opt_ignore, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"lines-terminated-by", OPT_LTB, "Lines in the output file are terminated by the given string.", &lines_terminated, &lines_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"lock-all-tables", 'x', "Locks all tables across all databases. This " "is achieved by taking a global read lock for the duration of the whole " "dump. Automatically turns --single-transaction and --lock-tables off.", &opt_lock_all_tables, &opt_lock_all_tables, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"lock-tables", 'l', "Lock all tables for read.", &lock_tables, &lock_tables, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"log-error", OPT_ERROR_LOG_FILE, "Append warnings and errors to given file.", &log_error_file, &log_error_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-data", OPT_MASTER_DATA, "This causes the binary log position and filename to be appended to the " "output. If equal to 1, will print it as a CHANGE MASTER command; if equal" " to 2, that command will be prefixed with a comment symbol. " "This option will turn --lock-all-tables on, unless " "--single-transaction is specified too (in which case a " "global read lock is only taken a short time at the beginning of the dump; " "don't forget to read about --single-transaction below). In all cases, " "any action on logs will happen at the exact moment of the dump. " "Option automatically turns --lock-tables off.", &opt_master_data, &opt_master_data, 0, GET_UINT, OPT_ARG, 0, 0, MYSQL_OPT_MASTER_DATA_COMMENTED_SQL, 0, 0, 0}, {"max_allowed_packet", OPT_MAX_ALLOWED_PACKET, "The maximum packet length to send to or receive from server.", &opt_max_allowed_packet, &opt_max_allowed_packet, 0, GET_ULONG, REQUIRED_ARG, 24*1024*1024, 4096, (longlong) 2L*1024L*1024L*1024L, MALLOC_OVERHEAD, 1024, 0}, {"net_buffer_length", OPT_NET_BUFFER_LENGTH, "The buffer size for TCP/IP and socket communication.", &opt_net_buffer_length, &opt_net_buffer_length, 0, GET_ULONG, REQUIRED_ARG, 1024*1024L-1025, 4096, 16*1024L*1024L, MALLOC_OVERHEAD-1024, 1024, 0}, {"no-autocommit", OPT_AUTOCOMMIT, "Wrap tables with autocommit/commit statements.", &opt_autocommit, &opt_autocommit, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-create-db", 'n', "Suppress the CREATE DATABASE ... IF EXISTS statement that normally is " "output for each dumped database if --all-databases or --databases is " "given.", &opt_create_db, &opt_create_db, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-create-info", 't', "Don't write table creation info.", &opt_no_create_info, &opt_no_create_info, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-data", 'd', "No row information.", &opt_no_data, &opt_no_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-set-names", 'N', "Same as --skip-set-charset.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"opt", OPT_OPTIMIZE, "Same as --add-drop-table, --add-locks, --create-options, --quick, --extended-insert, --lock-tables, --set-charset, and --disable-keys. Enabled by default, disable with --skip-opt.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"order-by-primary", OPT_ORDER_BY_PRIMARY, "Sorts each table's rows by primary key, or first unique key, if such a key exists. Useful when dumping a MyISAM table to be loaded into an InnoDB table, but will make the dump itself take considerably longer.", &opt_order_by_primary, &opt_order_by_primary, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given it's solicited on the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"port", 'P', "Port number to use for connection.", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"quick", 'q', "Don't buffer query, dump directly to stdout.", &quick, &quick, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"quote-names",'Q', "Quote table and column names with backticks (`).", &opt_quoted, &opt_quoted, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"replace", OPT_MYSQL_REPLACE_INTO, "Use REPLACE INTO instead of INSERT INTO.", &opt_replace_into, &opt_replace_into, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"result-file", 'r', "Direct output to a given file. This option should be used in systems " "(e.g., DOS, Windows) that use carriage-return linefeed pairs (\\r\\n) " "to separate text lines. This option ensures that only a single newline " "is used.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"routines", 'R', "Dump stored routines (functions and procedures).", &opt_routines, &opt_routines, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"set-charset", OPT_SET_CHARSET, "Add 'SET NAMES default_character_set' to the output.", &opt_set_charset, &opt_set_charset, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"set-gtid-purged", OPT_SET_GTID_PURGED, "Add 'SET @@GLOBAL.GTID_PURGED' to the output. Possible values for " "this option are ON, OFF and AUTO. If ON is used and GTIDs " "are not enabled on the server, an error is generated. If OFF is " "used, this option does nothing. If AUTO is used and GTIDs are enabled " "on the server, 'SET @@GLOBAL.GTID_PURGED' is added to the output. " "If GTIDs are disabled, AUTO does nothing. Default is AUTO.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* Note that the combination --single-transaction --master-data will give bullet-proof binlog position only if server >=4.1.3. That's the old "FLUSH TABLES WITH READ LOCK does not block commit" fixed bug. */ {"single-transaction", OPT_TRANSACTION, "Creates a consistent snapshot by dumping all tables in a single " "transaction. Works ONLY for tables stored in storage engines which " "support multiversioning (currently only InnoDB does); the dump is NOT " "guaranteed to be consistent for other storage engines. " "While a --single-transaction dump is in process, to ensure a valid " "dump file (correct table contents and binary log position), no other " "connection should use the following statements: ALTER TABLE, DROP " "TABLE, RENAME TABLE, TRUNCATE TABLE, as consistent snapshot is not " "isolated from them. Option automatically turns off --lock-tables.", &opt_single_transaction, &opt_single_transaction, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"dump-date", OPT_DUMP_DATE, "Put a dump date to the end of the output.", &opt_dump_date, &opt_dump_date, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"skip-opt", OPT_SKIP_OPTIMIZATION, "Disable --opt. Disables --add-drop-table, --add-locks, --create-options, --quick, --extended-insert, --lock-tables, --set-charset, and --disable-keys.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"tab",'T', "Create tab-separated textfile for each table to given path. (Create .sql " "and .txt files.) NOTE: This only works if mysqldump is run on the same " "machine as the mysqld server.", &path, &path, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"tables", OPT_TABLES, "Overrides option --databases (-B).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"triggers", OPT_TRIGGERS, "Dump triggers for each dumped table.", &opt_dump_triggers, &opt_dump_triggers, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"tz-utc", OPT_TZ_UTC, "SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data when a server has data in different time zones or data is being moved between servers with different time zones.", &opt_tz_utc, &opt_tz_utc, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &current_user, &current_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print info about the various stages.", &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version",'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"where", 'w', "Dump only selected records. Quotes are mandatory.", &where, &where, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"xml", 'X', "Dump a database as well formed XML.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static const char *load_default_groups[]= { "mysqldump","client",0 }; static void maybe_exit(int error); static void die(int error, const char* reason, ...); static void maybe_die(int error, const char* reason, ...); static void write_header(FILE *sql_file, char *db_name); static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, const char *prefix,const char *name, int string_value); static int dump_selected_tables(char *db, char **table_names, int tables); static int dump_all_tables_in_db(char *db); static int init_dumping_views(char *); static int init_dumping_tables(char *); static int init_dumping(char *, int init_func(char*)); static int dump_databases(char **); static int dump_all_databases(); static char *quote_name(const char *name, char *buff, my_bool force); char check_if_ignore_table(const char *table_name, char *table_type); static char *primary_key_fields(const char *table_name); static my_bool get_view_structure(char *table, char* db); static my_bool dump_all_views_in_db(char *database); static int dump_all_tablespaces(); static int dump_tablespaces_for_tables(char *db, char **table_names, int tables); static int dump_tablespaces_for_databases(char** databases); static int dump_tablespaces(char* ts_where); static void print_comment(FILE *sql_file, my_bool is_error, const char *format, ...); /* Print the supplied message if in verbose mode SYNOPSIS verbose_msg() fmt format specifier ... variable number of parameters */ static void verbose_msg(const char *fmt, ...) { va_list args; DBUG_ENTER("verbose_msg"); if (!verbose) DBUG_VOID_RETURN; va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); fflush(stderr); DBUG_VOID_RETURN; } /* exit with message if ferror(file) SYNOPSIS check_io() file - checked file */ void check_io(FILE *file) { if (ferror(file)) die(EX_EOF, "Got errno %d on write", errno); } static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,DUMP_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } /* print_version */ static void short_usage_sub(void) { printf("Usage: %s [OPTIONS] database [tables]\n", my_progname); printf("OR %s [OPTIONS] --databases [OPTIONS] DB1 [DB2 DB3...]\n", my_progname); printf("OR %s [OPTIONS] --all-databases [OPTIONS]\n", my_progname); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("Dumping structure and contents of MySQL databases and tables."); short_usage_sub(); print_defaults("my",load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } /* usage */ static void short_usage(void) { short_usage_sub(); printf("For more options, use %s --help\n", my_progname); } static void write_header(FILE *sql_file, char *db_name) { if (opt_xml) { fputs("<?xml version=\"1.0\"?>\n", sql_file); /* Schema reference. Allows use of xsi:nil for NULL values and xsi:type to define an element's data type. */ fputs("<mysqldump ", sql_file); fputs("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"", sql_file); fputs(">\n", sql_file); check_io(sql_file); } else if (!opt_compact) { print_comment(sql_file, 0, "-- MySQL dump %s Distrib %s, for %s (%s)\n--\n", DUMP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); print_comment(sql_file, 0, "-- Host: %s Database: %s\n", current_host ? current_host : "localhost", db_name ? db_name : ""); print_comment(sql_file, 0, "-- ------------------------------------------------------\n" ); print_comment(sql_file, 0, "-- Server version\t%s\n", mysql_get_server_info(&mysql_connection)); if (opt_set_charset) fprintf(sql_file, "\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;" "\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;" "\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;" "\n/*!40101 SET NAMES %s */;\n",default_charset); if (opt_tz_utc) { fprintf(sql_file, "/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;\n"); fprintf(sql_file, "/*!40103 SET TIME_ZONE='+00:00' */;\n"); } if (!path) { fprintf(md_result_file,"\ /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\n\ /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\n\ "); } fprintf(sql_file, "/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='%s%s%s' */;\n" "/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\n", path?"":"NO_AUTO_VALUE_ON_ZERO",compatible_mode_normal_str[0]==0?"":",", compatible_mode_normal_str); check_io(sql_file); } } /* write_header */ static void write_footer(FILE *sql_file) { if (opt_xml) { fputs("</mysqldump>\n", sql_file); check_io(sql_file); } else if (!opt_compact) { if (opt_tz_utc) fprintf(sql_file,"/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;\n"); fprintf(sql_file,"\n/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\n"); if (!path) { fprintf(md_result_file,"\ /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\n\ /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\n"); } if (opt_set_charset) fprintf(sql_file, "/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n" "/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n" "/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n"); fprintf(sql_file, "/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\n"); fputs("\n", sql_file); if (opt_dump_date) { char time_str[20]; get_date(time_str, GETDATE_DATE_TIME, 0); print_comment(sql_file, 0, "-- Dump completed on %s\n", time_str); } else print_comment(sql_file, 0, "-- Dump completed\n"); check_io(sql_file); } } /* write_footer */ uchar* get_table_key(const char *entry, size_t *length, my_bool not_used __attribute__((unused))) { *length= strlen(entry); return (uchar*) entry; } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch (optid) { case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start=argument; my_free(opt_password); opt_password=my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]=0; /* Cut length of argument */ tty_password= 0; } else tty_password=1; break; case 'r': if (!(md_result_file= my_fopen(argument, O_WRONLY | FILE_BINARY, MYF(MY_WME)))) exit(1); break; case 'W': #ifdef _WIN32 opt_protocol= MYSQL_PROTOCOL_PIPE; #endif break; case 'N': opt_set_charset= 0; break; case 'T': opt_disable_keys=0; if (strlen(argument) >= FN_REFLEN) { /* This check is made because the some the file functions below have FN_REFLEN sized stack allocated buffers and will cause a crash even if the input destination buffer is large enough to hold the output. */ die(EX_USAGE, "Input filename too long: %s", argument); } break; case '#': DBUG_PUSH(argument ? argument : default_dbug_option); debug_check_flag= 1; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); case 'X': opt_xml= 1; extended_insert= opt_drop= opt_lock= opt_disable_keys= opt_autocommit= opt_create_db= 0; break; case 'i': opt_comments_used= 1; break; case 'I': case '?': usage(); exit(0); case (int) OPT_MASTER_DATA: if (!argument) /* work like in old versions */ opt_master_data= MYSQL_OPT_MASTER_DATA_EFFECTIVE_SQL; break; case (int) OPT_MYSQLDUMP_SLAVE_DATA: if (!argument) /* work like in old versions */ opt_slave_data= MYSQL_OPT_SLAVE_DATA_EFFECTIVE_SQL; break; case (int) OPT_OPTIMIZE: extended_insert= opt_drop= opt_lock= quick= create_options= opt_disable_keys= lock_tables= opt_set_charset= 1; break; case (int) OPT_SKIP_OPTIMIZATION: extended_insert= opt_drop= opt_lock= quick= create_options= opt_disable_keys= lock_tables= opt_set_charset= 0; break; case (int) OPT_COMPACT: if (opt_compact) { opt_comments= opt_drop= opt_disable_keys= opt_lock= 0; opt_set_charset= 0; } break; case (int) OPT_TABLES: opt_databases=0; break; case (int) OPT_IGNORE_TABLE: { if (!strchr(argument, '.')) { fprintf(stderr, "Illegal use of option --ignore-table=<database>.<table>\n"); exit(1); } if (my_hash_insert(&ignore_table, (uchar*)my_strdup(PSI_NOT_INSTRUMENTED, argument, MYF(0)))) exit(EX_EOM); break; } case (int) OPT_COMPATIBLE: { char buff[255]; char *end= compatible_mode_normal_str; int i; ulong mode; uint err_len; opt_quoted= 1; opt_set_charset= 0; opt_compatible_mode_str= argument; opt_compatible_mode= find_set(&compatible_mode_typelib, argument, (uint) strlen(argument), &err_ptr, &err_len); if (err_len) { strmake(buff, err_ptr, MY_MIN(sizeof(buff) - 1, err_len)); fprintf(stderr, "Invalid mode to --compatible: %s\n", buff); exit(1); } #if !defined(DBUG_OFF) { uint size_for_sql_mode= 0; const char **ptr; for (ptr= compatible_mode_names; *ptr; ptr++) size_for_sql_mode+= strlen(*ptr); size_for_sql_mode+= sizeof(compatible_mode_names)-1; DBUG_ASSERT(sizeof(compatible_mode_normal_str)>=size_for_sql_mode); } #endif mode= opt_compatible_mode; for (i= 0, mode= opt_compatible_mode; mode; mode>>= 1, i++) { if (mode & 1) { end= my_stpcpy(end, compatible_mode_names[i]); end= my_stpcpy(end, ","); } } if (end!=compatible_mode_normal_str) end[-1]= 0; /* Set charset to the default compiled value if it hasn't been reset yet by --default-character-set=xxx. */ if (default_charset == mysql_universal_client_charset) default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME; break; } case (int) OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case (int) OPT_SET_GTID_PURGED: { opt_set_gtid_purged_mode= find_type_or_exit(argument, &set_gtid_purged_mode_typelib, opt->name)-1; break; } case (int) OPT_MYSQLDUMP_IGNORE_ERROR: /* Store the supplied list of errors into an array. */ if (parse_ignore_error()) exit(EX_EOM); break; } return 0; } static int get_options(int *argc, char ***argv) { int ho_error; MYSQL_PARAMETERS *mysql_params= mysql_get_parameters(); opt_max_allowed_packet= *mysql_params->p_max_allowed_packet; opt_net_buffer_length= *mysql_params->p_net_buffer_length; md_result_file= stdout; my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,argc,argv)) return 1; my_getopt_use_args_separator= FALSE; defaults_argv= *argv; if (my_hash_init(&ignore_table, charset_info, 16, 0, 0, (my_hash_get_key) get_table_key, my_free, 0)) return(EX_EOM); /* Don't copy internal log tables */ if (my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.apply_status", MYF(MY_WME))) || my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.schema", MYF(MY_WME))) || my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.general_log", MYF(MY_WME))) || my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.slow_log", MYF(MY_WME)))) return(EX_EOM); if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) return(ho_error); *mysql_params->p_max_allowed_packet= opt_max_allowed_packet; *mysql_params->p_net_buffer_length= opt_net_buffer_length; if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (!path && (enclosed || opt_enclosed || escaped || lines_terminated || fields_terminated)) { fprintf(stderr, "%s: You must use option --tab with --fields-...\n", my_progname); return(EX_USAGE); } /* We don't delete master logs if slave data option */ if (opt_slave_data) { opt_lock_all_tables= !opt_single_transaction; opt_master_data= 0; opt_delete_master_logs= 0; } /* Ensure consistency of the set of binlog & locking options */ if (opt_delete_master_logs && !opt_master_data) opt_master_data= MYSQL_OPT_MASTER_DATA_COMMENTED_SQL; if (opt_single_transaction && opt_lock_all_tables) { fprintf(stderr, "%s: You can't use --single-transaction and " "--lock-all-tables at the same time.\n", my_progname); return(EX_USAGE); } if (opt_master_data) { opt_lock_all_tables= !opt_single_transaction; opt_slave_data= 0; } if (opt_single_transaction || opt_lock_all_tables) lock_tables= 0; if (enclosed && opt_enclosed) { fprintf(stderr, "%s: You can't use ..enclosed.. and ..optionally-enclosed.. at the same time.\n", my_progname); return(EX_USAGE); } if ((opt_databases || opt_alldbs) && path) { fprintf(stderr, "%s: --databases or --all-databases can't be used with --tab.\n", my_progname); return(EX_USAGE); } if (strcmp(default_charset, charset_info->csname) && !(charset_info= get_charset_by_csname(default_charset, MY_CS_PRIMARY, MYF(MY_WME)))) exit(1); if ((*argc < 1 && !opt_alldbs) || (*argc > 0 && opt_alldbs)) { short_usage(); return EX_USAGE; } if (tty_password) opt_password=get_tty_password(NullS); return(0); } /* get_options */ /* ** DB_error -- prints mysql error message and exits the program. */ static void DB_error(MYSQL *mysql_arg, const char *when) { DBUG_ENTER("DB_error"); maybe_die(EX_MYSQLERR, "Got error: %d: %s %s", mysql_errno(mysql_arg), mysql_error(mysql_arg), when); DBUG_VOID_RETURN; } /* Prints out an error message and kills the process. SYNOPSIS die() error_num - process return value fmt_reason - a format string for use by my_vsnprintf. ... - variable arguments for above fmt_reason string DESCRIPTION This call prints out the formatted error message to stderr and then terminates the process. */ static void die(int error_num, const char* fmt_reason, ...) { char buffer[1000]; va_list args; va_start(args,fmt_reason); my_vsnprintf(buffer, sizeof(buffer), fmt_reason, args); va_end(args); fprintf(stderr, "%s: %s\n", my_progname, buffer); fflush(stderr); /* force the exit */ opt_force= 0; if (opt_ignore_error) my_free(opt_ignore_error); opt_ignore_error= 0; maybe_exit(error_num); } /* Prints out an error message and maybe kills the process. SYNOPSIS maybe_die() error_num - process return value fmt_reason - a format string for use by my_vsnprintf. ... - variable arguments for above fmt_reason string DESCRIPTION This call prints out the formatted error message to stderr and then terminates the process, unless the --force command line option is used. This call should be used for non-fatal errors (such as database errors) that the code may still be able to continue to the next unit of work. */ static void maybe_die(int error_num, const char* fmt_reason, ...) { char buffer[1000]; va_list args; va_start(args,fmt_reason); my_vsnprintf(buffer, sizeof(buffer), fmt_reason, args); va_end(args); fprintf(stderr, "%s: %s\n", my_progname, buffer); fflush(stderr); maybe_exit(error_num); } /* Sends a query to server, optionally reads result, prints error message if some. SYNOPSIS mysql_query_with_error_report() mysql_con connection to use res if non zero, result will be put there with mysql_store_result() query query to send to server RETURN VALUES 0 query sending and (if res!=0) result reading went ok 1 error */ static int mysql_query_with_error_report(MYSQL *mysql_con, MYSQL_RES **res, const char *query) { if (mysql_query(mysql_con, query) || (res && !((*res)= mysql_store_result(mysql_con)))) { maybe_die(EX_MYSQLERR, "Couldn't execute '%s': %s (%d)", query, mysql_error(mysql_con), mysql_errno(mysql_con)); return 1; } return 0; } static int fetch_db_collation(const char *db_name, char *db_cl_name, int db_cl_size) { my_bool err_status= FALSE; char query[QUERY_LENGTH]; MYSQL_RES *db_cl_res; MYSQL_ROW db_cl_row; char quoted_database_buf[NAME_LEN*2+3]; char *qdatabase= quote_name(db_name, quoted_database_buf, 1); my_snprintf(query, sizeof (query), "use %s", qdatabase); if (mysql_query_with_error_report(mysql, NULL, query)) return 1; if (mysql_query_with_error_report(mysql, &db_cl_res, "select @@collation_database")) return 1; do { if (mysql_num_rows(db_cl_res) != 1) { err_status= TRUE; break; } if (!(db_cl_row= mysql_fetch_row(db_cl_res))) { err_status= TRUE; break; } strncpy(db_cl_name, db_cl_row[0], db_cl_size); db_cl_name[db_cl_size - 1]= 0; /* just in case. */ } while (FALSE); mysql_free_result(db_cl_res); return err_status ? 1 : 0; } static char *my_case_str(const char *str, uint str_len, const char *token, uint token_len) { my_match_t match; uint status= my_charset_latin1.coll->instr(&my_charset_latin1, str, str_len, token, token_len, &match, 1); return status ? (char *) str + match.end : NULL; } static int switch_db_collation(FILE *sql_file, const char *db_name, const char *delimiter, const char *current_db_cl_name, const char *required_db_cl_name, int *db_cl_altered) { if (strcmp(current_db_cl_name, required_db_cl_name) != 0) { char quoted_db_buf[NAME_LEN * 2 + 3]; char *quoted_db_name= quote_name(db_name, quoted_db_buf, FALSE); CHARSET_INFO *db_cl= get_charset_by_name(required_db_cl_name, MYF(0)); if (!db_cl) return 1; fprintf(sql_file, "ALTER DATABASE %s CHARACTER SET %s COLLATE %s %s\n", (const char *) quoted_db_name, (const char *) db_cl->csname, (const char *) db_cl->name, (const char *) delimiter); *db_cl_altered= 1; return 0; } *db_cl_altered= 0; return 0; } static int restore_db_collation(FILE *sql_file, const char *db_name, const char *delimiter, const char *db_cl_name) { char quoted_db_buf[NAME_LEN * 2 + 3]; char *quoted_db_name= quote_name(db_name, quoted_db_buf, FALSE); CHARSET_INFO *db_cl= get_charset_by_name(db_cl_name, MYF(0)); if (!db_cl) return 1; fprintf(sql_file, "ALTER DATABASE %s CHARACTER SET %s COLLATE %s %s\n", (const char *) quoted_db_name, (const char *) db_cl->csname, (const char *) db_cl->name, (const char *) delimiter); return 0; } static void switch_cs_variables(FILE *sql_file, const char *delimiter, const char *character_set_client, const char *character_set_results, const char *collation_connection) { fprintf(sql_file, "/*!50003 SET @saved_cs_client = @@character_set_client */ %s\n" "/*!50003 SET @saved_cs_results = @@character_set_results */ %s\n" "/*!50003 SET @saved_col_connection = @@collation_connection */ %s\n" "/*!50003 SET character_set_client = %s */ %s\n" "/*!50003 SET character_set_results = %s */ %s\n" "/*!50003 SET collation_connection = %s */ %s\n", (const char *) delimiter, (const char *) delimiter, (const char *) delimiter, (const char *) character_set_client, (const char *) delimiter, (const char *) character_set_results, (const char *) delimiter, (const char *) collation_connection, (const char *) delimiter); } static void restore_cs_variables(FILE *sql_file, const char *delimiter) { fprintf(sql_file, "/*!50003 SET character_set_client = @saved_cs_client */ %s\n" "/*!50003 SET character_set_results = @saved_cs_results */ %s\n" "/*!50003 SET collation_connection = @saved_col_connection */ %s\n", (const char *) delimiter, (const char *) delimiter, (const char *) delimiter); } static void switch_sql_mode(FILE *sql_file, const char *delimiter, const char *sql_mode) { fprintf(sql_file, "/*!50003 SET @saved_sql_mode = @@sql_mode */ %s\n" "/*!50003 SET sql_mode = '%s' */ %s\n", (const char *) delimiter, (const char *) sql_mode, (const char *) delimiter); } static void restore_sql_mode(FILE *sql_file, const char *delimiter) { fprintf(sql_file, "/*!50003 SET sql_mode = @saved_sql_mode */ %s\n", (const char *) delimiter); } static void switch_time_zone(FILE *sql_file, const char *delimiter, const char *time_zone) { fprintf(sql_file, "/*!50003 SET @saved_time_zone = @@time_zone */ %s\n" "/*!50003 SET time_zone = '%s' */ %s\n", (const char *) delimiter, (const char *) time_zone, (const char *) delimiter); } static void restore_time_zone(FILE *sql_file, const char *delimiter) { fprintf(sql_file, "/*!50003 SET time_zone = @saved_time_zone */ %s\n", (const char *) delimiter); } /** Switch charset for results to some specified charset. If the server does not support character_set_results variable, nothing can be done here. As for whether something should be done here, future new callers of this function should be aware that the server lacking the facility of switching charsets is treated as success. @note If the server lacks support, then nothing is changed and no error condition is returned. @returns whether there was an error or not */ static int switch_character_set_results(MYSQL *mysql, const char *cs_name) { char query_buffer[QUERY_LENGTH]; size_t query_length; /* Server lacks facility. This is not an error, by arbitrary decision . */ if (!server_supports_switching_charsets) return FALSE; query_length= my_snprintf(query_buffer, sizeof (query_buffer), "SET SESSION character_set_results = '%s'", (const char *) cs_name); return mysql_real_query(mysql, query_buffer, query_length); } /** Rewrite statement, enclosing DEFINER clause in version-specific comment. This function parses any CREATE statement and encloses DEFINER-clause in version-specific comment: input query: CREATE DEFINER=a@b FUNCTION ... rewritten query: CREATE * / / *!50020 DEFINER=a@b * / / *!50003 FUNCTION ... @note This function will go away when WL#3995 is implemented. @param[in] stmt_str CREATE statement string. @param[in] stmt_length Length of the stmt_str. @param[in] definer_version_str Minimal MySQL version number when DEFINER clause is supported in the given statement. @param[in] definer_version_length Length of definer_version_str. @param[in] stmt_version_str Minimal MySQL version number when the given statement is supported. @param[in] stmt_version_length Length of stmt_version_str. @param[in] keyword_str Keyword to look for after CREATE. @param[in] keyword_length Length of keyword_str. @return pointer to the new allocated query string. */ static char *cover_definer_clause(const char *stmt_str, uint stmt_length, const char *definer_version_str, uint definer_version_length, const char *stmt_version_str, uint stmt_version_length, const char *keyword_str, uint keyword_length) { char *definer_begin= my_case_str(stmt_str, stmt_length, C_STRING_WITH_LEN(" DEFINER")); char *definer_end= NULL; char *query_str= NULL; char *query_ptr; if (!definer_begin) return NULL; definer_end= my_case_str(definer_begin, strlen(definer_begin), keyword_str, keyword_length); if (!definer_end) return NULL; /* Allocate memory for new query string: original string from SHOW statement and version-specific comments. */ query_str= alloc_query_str(stmt_length + 23); query_ptr= my_stpncpy(query_str, stmt_str, definer_begin - stmt_str); query_ptr= my_stpncpy(query_ptr, C_STRING_WITH_LEN("*/ /*!")); query_ptr= my_stpncpy(query_ptr, definer_version_str, definer_version_length); query_ptr= my_stpncpy(query_ptr, definer_begin, definer_end - definer_begin); query_ptr= my_stpncpy(query_ptr, C_STRING_WITH_LEN("*/ /*!")); query_ptr= my_stpncpy(query_ptr, stmt_version_str, stmt_version_length); query_ptr= strxmov(query_ptr, definer_end, NullS); return query_str; } /* Open a new .sql file to dump the table or view into SYNOPSIS open_sql_file_for_table name name of the table or view flags flags (as per "man 2 open") RETURN VALUES 0 Failed to open file > 0 Handle of the open file */ static FILE* open_sql_file_for_table(const char* table, int flags) { FILE* res; char filename[FN_REFLEN], tmp_path[FN_REFLEN]; convert_dirname(tmp_path,path,NullS); res= my_fopen(fn_format(filename, table, tmp_path, ".sql", 4), flags, MYF(MY_WME)); return res; } static void free_resources() { if (md_result_file && md_result_file != stdout) my_fclose(md_result_file, MYF(0)); my_free(opt_password); if (my_hash_inited(&ignore_table)) my_hash_free(&ignore_table); if (extended_insert) dynstr_free(&extended_row); if (insert_pat_inited) dynstr_free(&insert_pat); if (defaults_argv) free_defaults(defaults_argv); if (opt_ignore_error) my_free(opt_ignore_error); delete_dynamic(&ignore_error); my_end(my_end_arg); } /** Parse the list of error numbers to be ignored and store into a dynamic array. @return Operation status @retval 0 Success @retval >0 Failure */ static int parse_ignore_error() { const char *search= ","; char *token; uint my_err; DBUG_ENTER("parse_ignore_error"); if (my_init_dynamic_array(&ignore_error, sizeof(uint), 12, 12)) goto error; token= strtok(opt_ignore_error, search); while (token != NULL) { my_err= atoi(token); // filter out 0s, if any if (my_err != 0) { if (insert_dynamic(&ignore_error, &my_err)) goto error; } token= strtok(NULL, search); } DBUG_RETURN(0); error: DBUG_RETURN(EX_EOM); } /** Check if the last error should be ignored. @retval 1 yes 0 no */ static my_bool do_ignore_error() { uint i, last_errno, *my_err; my_bool found= 0; DBUG_ENTER("do_ignore_error"); last_errno= mysql_errno(mysql); if (last_errno == 0) goto done; for (i= 0; i < ignore_error.elements; i++) { my_err= dynamic_element(&ignore_error, i, uint *); if (last_errno == *my_err) { found= 1; break; } } done: DBUG_RETURN(found); } static void maybe_exit(int error) { if (!first_error) first_error= error; /* Return if --force is used; else return only if the last error number is in the list of error numbers specified using --ignore-error option. */ if (opt_force || (opt_ignore_error && do_ignore_error())) return; if (mysql) mysql_close(mysql); free_resources(); exit(error); } /* db_connect -- connects to the host and selects DB. */ static int connect_to_db(char *host, char *user,char *passwd) { char buff[20+FN_REFLEN]; DBUG_ENTER("connect_to_db"); verbose_msg("-- Connecting to %s...\n", host ? host : "localhost"); mysql_init(&mysql_connection); if (opt_compress) mysql_options(&mysql_connection,MYSQL_OPT_COMPRESS,NullS); SSL_SET_OPTIONS(&mysql_connection); if (opt_protocol) mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(&mysql_connection,MYSQL_OPT_BIND,opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset); if (opt_plugin_dir && *opt_plugin_dir) mysql_options(&mysql_connection, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(&mysql_connection, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqldump"); if (!(mysql= mysql_real_connect(&mysql_connection,host,user,passwd, NULL,opt_mysql_port,opt_mysql_unix_port, 0))) { DB_error(&mysql_connection, "when trying to connect"); DBUG_RETURN(1); } if ((mysql_get_server_version(&mysql_connection) < 40100) || (opt_compatible_mode & 3)) { /* Don't dump SET NAMES with a pre-4.1 server (bug#7997). */ opt_set_charset= 0; /* Don't switch charsets for 4.1 and earlier. (bug#34192). */ server_supports_switching_charsets= FALSE; } /* As we're going to set SQL_MODE, it would be lost on reconnect, so we cannot reconnect. */ mysql->reconnect= 0; my_snprintf(buff, sizeof(buff), "/*!40100 SET @@SQL_MODE='%s' */", compatible_mode_normal_str); if (mysql_query_with_error_report(mysql, 0, buff)) DBUG_RETURN(1); /* set time_zone to UTC to allow dumping date types between servers with different time zone settings */ if (opt_tz_utc) { my_snprintf(buff, sizeof(buff), "/*!40103 SET TIME_ZONE='+00:00' */"); if (mysql_query_with_error_report(mysql, 0, buff)) DBUG_RETURN(1); } DBUG_RETURN(0); } /* connect_to_db */ /* ** dbDisconnect -- disconnects from the host. */ static void dbDisconnect(char *host) { verbose_msg("-- Disconnecting from %s...\n", host ? host : "localhost"); mysql_close(mysql); } /* dbDisconnect */ static void unescape(FILE *file,char *pos,uint length) { char *tmp; DBUG_ENTER("unescape"); if (!(tmp=(char*) my_malloc(PSI_NOT_INSTRUMENTED, length*2+1, MYF(MY_WME)))) die(EX_MYSQLERR, "Couldn't allocate memory"); mysql_real_escape_string(&mysql_connection, tmp, pos, length); fputc('\'', file); fputs(tmp, file); fputc('\'', file); check_io(file); my_free(tmp); DBUG_VOID_RETURN; } /* unescape */ static my_bool test_if_special_chars(const char *str) { #if MYSQL_VERSION_ID >= 32300 for ( ; *str ; str++) if (!my_isvar(charset_info,*str) && *str != '$') return 1; #endif return 0; } /* test_if_special_chars */ /* quote_name(name, buff, force) Quotes char string, taking into account compatible mode Args name Unquoted string containing that which will be quoted buff The buffer that contains the quoted value, also returned force Flag to make it ignore 'test_if_special_chars' Returns buff quoted string */ static char *quote_name(const char *name, char *buff, my_bool force) { char *to= buff; char qtype= (opt_compatible_mode & MASK_ANSI_QUOTES) ? '\"' : '`'; if (!force && !opt_quoted && !test_if_special_chars(name)) return (char*) name; *to++= qtype; while (*name) { if (*name == qtype) *to++= qtype; *to++= *name++; } to[0]= qtype; to[1]= 0; return buff; } /* quote_name */ /* Quote a table name so it can be used in "SHOW TABLES LIKE <tabname>" SYNOPSIS quote_for_like() name name of the table buff quoted name of the table DESCRIPTION Quote \, _, ' and % characters Note: Because MySQL uses the C escape syntax in strings (for example, '\n' to represent newline), you must double any '\' that you use in your LIKE strings. For example, to search for '\n', specify it as '\\n'. To search for '\', specify it as '\\\\' (the backslashes are stripped once by the parser and another time when the pattern match is done, leaving a single backslash to be matched). Example: "t\1" => "t\\\\1" */ static char *quote_for_like(const char *name, char *buff) { char *to= buff; *to++= '\''; while (*name) { if (*name == '\\') { *to++='\\'; *to++='\\'; *to++='\\'; } else if (*name == '\'' || *name == '_' || *name == '%') *to++= '\\'; *to++= *name++; } to[0]= '\''; to[1]= 0; return buff; } /** Quote and print a string. @param xml_file - Output file. @param str - String to print. @param len - Its length. @param is_attribute_name - A check for attribute name or value. @description Quote '<' '>' '&' '\"' chars and print a string to the xml_file. */ static void print_quoted_xml(FILE *xml_file, const char *str, ulong len, my_bool is_attribute_name) { const char *end; for (end= str + len; str != end; str++) { switch (*str) { case '<': fputs("&lt;", xml_file); break; case '>': fputs("&gt;", xml_file); break; case '&': fputs("&amp;", xml_file); break; case '\"': fputs("&quot;", xml_file); break; case ' ': /* Attribute names cannot contain spaces. */ if (is_attribute_name) { fputs("_", xml_file); break; } /* fall through */ default: fputc(*str, xml_file); break; } } check_io(xml_file); } /* Print xml tag. Optionally add attribute(s). SYNOPSIS print_xml_tag(xml_file, sbeg, send, tag_name, first_attribute_name, ..., attribute_name_n, attribute_value_n, NullS) xml_file - output file sbeg - line beginning line_end - line ending tag_name - XML tag name. first_attribute_name - tag and first attribute first_attribute_value - (Implied) value of first attribute attribute_name_n - attribute n attribute_value_n - value of attribute n DESCRIPTION Print XML tag with any number of attribute="value" pairs to the xml_file. Format is: sbeg<tag_name first_attribute_name="first_attribute_value" ... attribute_name_n="attribute_value_n">send NOTE Additional arguments must be present in attribute/value pairs. The last argument should be the null character pointer. All attribute_value arguments MUST be NULL terminated strings. All attribute_value arguments will be quoted before output. */ static void print_xml_tag(FILE * xml_file, const char* sbeg, const char* line_end, const char* tag_name, const char* first_attribute_name, ...) { va_list arg_list; const char *attribute_name, *attribute_value; fputs(sbeg, xml_file); fputc('<', xml_file); fputs(tag_name, xml_file); va_start(arg_list, first_attribute_name); attribute_name= first_attribute_name; while (attribute_name != NullS) { attribute_value= va_arg(arg_list, char *); DBUG_ASSERT(attribute_value != NullS); fputc(' ', xml_file); fputs(attribute_name, xml_file); fputc('\"', xml_file); print_quoted_xml(xml_file, attribute_value, strlen(attribute_value), 0); fputc('\"', xml_file); attribute_name= va_arg(arg_list, char *); } va_end(arg_list); fputc('>', xml_file); fputs(line_end, xml_file); check_io(xml_file); } /* Print xml tag with for a field that is null SYNOPSIS print_xml_null_tag() xml_file - output file sbeg - line beginning stag_atr - tag and attribute sval - value of attribute line_end - line ending DESCRIPTION Print tag with one attribute to the xml_file. Format is: <stag_atr="sval" xsi:nil="true"/> NOTE sval MUST be a NULL terminated string. sval string will be qouted before output. */ static void print_xml_null_tag(FILE * xml_file, const char* sbeg, const char* stag_atr, const char* sval, const char* line_end) { fputs(sbeg, xml_file); fputs("<", xml_file); fputs(stag_atr, xml_file); fputs("\"", xml_file); print_quoted_xml(xml_file, sval, strlen(sval), 0); fputs("\" xsi:nil=\"true\" />", xml_file); fputs(line_end, xml_file); check_io(xml_file); } /** Print xml CDATA section. @param xml_file - output file @param str - string to print @param len - length of the string @note This function also takes care of the presence of '[[>' string in the str. If found, the CDATA section is broken into two CDATA sections, <![CDATA[]]]]> and <![CDATA[>]]. */ static void print_xml_cdata(FILE *xml_file, const char *str, ulong len) { const char *end; fputs("<![CDATA[\n", xml_file); for (end= str + len; str != end; str++) { switch(*str) { case ']': if ((*(str + 1) == ']') && (*(str + 2) =='>')) { fputs("]]]]><![CDATA[>", xml_file); str += 2; continue; } /* fall through */ default: fputc(*str, xml_file); break; } } fputs("\n]]>\n", xml_file); check_io(xml_file); } /* Print xml tag with many attributes. SYNOPSIS print_xml_row() xml_file - output file row_name - xml tag name tableRes - query result row - result row str_create - create statement header string DESCRIPTION Print tag with many attribute to the xml_file. Format is: \t\t<row_name Atr1="Val1" Atr2="Val2"... /> NOTE All atributes and values will be quoted before output. */ static void print_xml_row(FILE *xml_file, const char *row_name, MYSQL_RES *tableRes, MYSQL_ROW *row, const char *str_create) { uint i; char *create_stmt_ptr= NULL; ulong create_stmt_len= 0; MYSQL_FIELD *field; ulong *lengths= mysql_fetch_lengths(tableRes); fprintf(xml_file, "\t\t<%s", row_name); check_io(xml_file); mysql_field_seek(tableRes, 0); for (i= 0; (field= mysql_fetch_field(tableRes)); i++) { if ((*row)[i]) { /* For 'create' statements, dump using CDATA. */ if ((str_create) && (strcmp(str_create, field->name) == 0)) { create_stmt_ptr= (*row)[i]; create_stmt_len= lengths[i]; } else { fputc(' ', xml_file); print_quoted_xml(xml_file, field->name, field->name_length, 1); fputs("=\"", xml_file); print_quoted_xml(xml_file, (*row)[i], lengths[i], 0); fputc('"', xml_file); check_io(xml_file); } } } if (create_stmt_len) { fputs(">\n", xml_file); print_xml_cdata(xml_file, create_stmt_ptr, create_stmt_len); fprintf(xml_file, "\t\t</%s>\n", row_name); } else fputs(" />\n", xml_file); check_io(xml_file); } /** Print xml comments. @param xml_file - output file @param len - length of comment message @param comment_string - comment message @description Print the comment message in the format: "<!-- \n comment string \n -->\n" @note Any occurrence of continuous hyphens will be squeezed to a single hyphen. */ static void print_xml_comment(FILE *xml_file, ulong len, const char *comment_string) { const char* end; fputs("<!-- ", xml_file); for (end= comment_string + len; comment_string != end; comment_string++) { /* The string "--" (double-hyphen) MUST NOT occur within xml comments. */ switch (*comment_string) { case '-': if (*(comment_string + 1) == '-') /* Only one hyphen allowed. */ break; default: fputc(*comment_string, xml_file); break; } } fputs(" -->\n", xml_file); check_io(xml_file); } /* A common printing function for xml and non-xml modes. */ static void print_comment(FILE *sql_file, my_bool is_error, const char *format, ...) { static char comment_buff[COMMENT_LENGTH]; va_list args; /* If its an error message, print it ignoring opt_comments. */ if (!is_error && !opt_comments) return; va_start(args, format); my_vsnprintf(comment_buff, COMMENT_LENGTH, format, args); va_end(args); if (!opt_xml) { fputs(comment_buff, sql_file); check_io(sql_file); return; } print_xml_comment(sql_file, strlen(comment_buff), comment_buff); } /* create_delimiter Generate a new (null-terminated) string that does not exist in query and is therefore suitable for use as a query delimiter. Store this delimiter in delimiter_buff . This is quite simple in that it doesn't even try to parse statements as an interpreter would. It merely returns a string that is not in the query, which is much more than adequate for constructing a delimiter. RETURN ptr to the delimiter on Success NULL on Failure */ static char *create_delimiter(char *query, char *delimiter_buff, int delimiter_max_size) { int proposed_length; char *presence; delimiter_buff[0]= ';'; /* start with one semicolon, and */ for (proposed_length= 2; proposed_length < delimiter_max_size; delimiter_max_size++) { delimiter_buff[proposed_length-1]= ';'; /* add semicolons, until */ delimiter_buff[proposed_length]= '\0'; presence = strstr(query, delimiter_buff); if (presence == NULL) { /* the proposed delimiter is not in the query. */ return delimiter_buff; } } return NULL; /* but if we run out of space, return nothing at all. */ } /* dump_events_for_db -- retrieves list of events for a given db, and prints out the CREATE EVENT statement into the output (the dump). RETURN 0 Success 1 Error */ static uint dump_events_for_db(char *db) { char query_buff[QUERY_LENGTH]; char db_name_buff[NAME_LEN*2+3], name_buff[NAME_LEN*2+3]; char *event_name; char delimiter[QUERY_LENGTH]; FILE *sql_file= md_result_file; MYSQL_RES *event_res, *event_list_res; MYSQL_ROW row, event_list_row; char db_cl_name[MY_CS_NAME_SIZE]; int db_cl_altered= FALSE; DBUG_ENTER("dump_events_for_db"); DBUG_PRINT("enter", ("db: '%s'", db)); mysql_real_escape_string(mysql, db_name_buff, db, strlen(db)); /* nice comments */ print_comment(sql_file, 0, "\n--\n-- Dumping events for database '%s'\n--\n", db); /* not using "mysql_query_with_error_report" because we may have not enough privileges to lock mysql.events. */ if (lock_tables) mysql_query(mysql, "LOCK TABLES mysql.event READ"); if (mysql_query_with_error_report(mysql, &event_list_res, "show events")) DBUG_RETURN(0); strcpy(delimiter, ";"); if (mysql_num_rows(event_list_res) > 0) { if (opt_xml) fputs("\t<events>\n", sql_file); else { fprintf(sql_file, "/*!50106 SET @save_time_zone= @@TIME_ZONE */ ;\n"); /* Get database collation. */ if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) DBUG_RETURN(1); } if (switch_character_set_results(mysql, "binary")) DBUG_RETURN(1); while ((event_list_row= mysql_fetch_row(event_list_res)) != NULL) { event_name= quote_name(event_list_row[1], name_buff, 0); DBUG_PRINT("info", ("retrieving CREATE EVENT for %s", name_buff)); my_snprintf(query_buff, sizeof(query_buff), "SHOW CREATE EVENT %s", event_name); if (mysql_query_with_error_report(mysql, &event_res, query_buff)) DBUG_RETURN(1); while ((row= mysql_fetch_row(event_res)) != NULL) { if (opt_xml) { print_xml_row(sql_file, "event", event_res, &row, "Create Event"); continue; } /* if the user has EXECUTE privilege he can see event names, but not the event body! */ if (strlen(row[3]) != 0) { char *query_str; if (opt_drop) fprintf(sql_file, "/*!50106 DROP EVENT IF EXISTS %s */%s\n", event_name, delimiter); if (create_delimiter(row[3], delimiter, sizeof(delimiter)) == NULL) { fprintf(stderr, "%s: Warning: Can't create delimiter for event '%s'\n", my_progname, event_name); DBUG_RETURN(1); } fprintf(sql_file, "DELIMITER %s\n", delimiter); if (mysql_num_fields(event_res) >= 7) { if (switch_db_collation(sql_file, db_name_buff, delimiter, db_cl_name, row[6], &db_cl_altered)) { DBUG_RETURN(1); } switch_cs_variables(sql_file, delimiter, row[4], /* character_set_client */ row[4], /* character_set_results */ row[5]); /* collation_connection */ } else { /* mysqldump is being run against the server, that does not provide character set information in SHOW CREATE statements. NOTE: the dump may be incorrect, since character set information is required in order to restore event properly. */ fprintf(sql_file, "--\n" "-- WARNING: old server version. " "The following dump may be incomplete.\n" "--\n"); } switch_sql_mode(sql_file, delimiter, row[1]); switch_time_zone(sql_file, delimiter, row[2]); query_str= cover_definer_clause(row[3], strlen(row[3]), C_STRING_WITH_LEN("50117"), C_STRING_WITH_LEN("50106"), C_STRING_WITH_LEN(" EVENT")); fprintf(sql_file, "/*!50106 %s */ %s\n", (const char *) (query_str != NULL ? query_str : row[3]), (const char *) delimiter); restore_time_zone(sql_file, delimiter); restore_sql_mode(sql_file, delimiter); if (mysql_num_fields(event_res) >= 7) { restore_cs_variables(sql_file, delimiter); if (db_cl_altered) { if (restore_db_collation(sql_file, db_name_buff, delimiter, db_cl_name)) DBUG_RETURN(1); } } } } /* end of event printing */ mysql_free_result(event_res); } /* end of list of events */ if (opt_xml) { fputs("\t</events>\n", sql_file); check_io(sql_file); } else { fprintf(sql_file, "DELIMITER ;\n"); fprintf(sql_file, "/*!50106 SET TIME_ZONE= @save_time_zone */ ;\n"); } if (switch_character_set_results(mysql, default_charset)) DBUG_RETURN(1); } mysql_free_result(event_list_res); if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); DBUG_RETURN(0); } /* Print hex value for blob data. SYNOPSIS print_blob_as_hex() output_file - output file str - string to print len - its length DESCRIPTION Print hex value for blob data. */ static void print_blob_as_hex(FILE *output_file, const char *str, ulong len) { /* sakaik got the idea to to provide blob's in hex notation. */ const char *ptr= str, *end= ptr + len; for (; ptr < end ; ptr++) fprintf(output_file, "%02X", *((uchar *)ptr)); check_io(output_file); } /* dump_routines_for_db -- retrieves list of routines for a given db, and prints out the CREATE PROCEDURE definition into the output (the dump). This function has logic to print the appropriate syntax depending on whether this is a procedure or functions RETURN 0 Success 1 Error */ static uint dump_routines_for_db(char *db) { char query_buff[QUERY_LENGTH]; const char *routine_type[]= {"FUNCTION", "PROCEDURE"}; char db_name_buff[NAME_LEN*2+3], name_buff[NAME_LEN*2+3]; char *routine_name; int i; FILE *sql_file= md_result_file; MYSQL_RES *routine_res, *routine_list_res; MYSQL_ROW row, routine_list_row; char db_cl_name[MY_CS_NAME_SIZE]; int db_cl_altered= FALSE; DBUG_ENTER("dump_routines_for_db"); DBUG_PRINT("enter", ("db: '%s'", db)); mysql_real_escape_string(mysql, db_name_buff, db, strlen(db)); /* nice comments */ print_comment(sql_file, 0, "\n--\n-- Dumping routines for database '%s'\n--\n", db); /* not using "mysql_query_with_error_report" because we may have not enough privileges to lock mysql.proc. */ if (lock_tables) mysql_query(mysql, "LOCK TABLES mysql.proc READ"); /* Get database collation. */ if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) DBUG_RETURN(1); if (switch_character_set_results(mysql, "binary")) DBUG_RETURN(1); if (opt_xml) fputs("\t<routines>\n", sql_file); /* 0, retrieve and dump functions, 1, procedures */ for (i= 0; i <= 1; i++) { my_snprintf(query_buff, sizeof(query_buff), "SHOW %s STATUS WHERE Db = '%s'", routine_type[i], db_name_buff); if (mysql_query_with_error_report(mysql, &routine_list_res, query_buff)) DBUG_RETURN(1); if (mysql_num_rows(routine_list_res)) { while ((routine_list_row= mysql_fetch_row(routine_list_res))) { routine_name= quote_name(routine_list_row[1], name_buff, 0); DBUG_PRINT("info", ("retrieving CREATE %s for %s", routine_type[i], name_buff)); my_snprintf(query_buff, sizeof(query_buff), "SHOW CREATE %s %s", routine_type[i], routine_name); if (mysql_query_with_error_report(mysql, &routine_res, query_buff)) DBUG_RETURN(1); while ((row= mysql_fetch_row(routine_res))) { /* if the user has EXECUTE privilege he see routine names, but NOT the routine body of other routines that are not the creator of! */ DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d", routine_name, row[2] ? row[2] : "(null)", row[2] ? (int) strlen(row[2]) : 0)); if (row[2] == NULL) { print_comment(sql_file, 1, "\n-- insufficient privileges to %s\n", query_buff); print_comment(sql_file, 1, "-- does %s have permissions on mysql.proc?\n\n", current_user); maybe_die(EX_MYSQLERR,"%s has insufficent privileges to %s!", current_user, query_buff); } else if (strlen(row[2])) { if (opt_xml) { if (i) // Procedures. print_xml_row(sql_file, "routine", routine_res, &row, "Create Procedure"); else // Functions. print_xml_row(sql_file, "routine", routine_res, &row, "Create Function"); continue; } if (opt_drop) fprintf(sql_file, "/*!50003 DROP %s IF EXISTS %s */;\n", routine_type[i], routine_name); if (mysql_num_fields(routine_res) >= 6) { if (switch_db_collation(sql_file, db_name_buff, ";", db_cl_name, row[5], &db_cl_altered)) { DBUG_RETURN(1); } switch_cs_variables(sql_file, ";", row[3], /* character_set_client */ row[3], /* character_set_results */ row[4]); /* collation_connection */ } else { /* mysqldump is being run against the server, that does not provide character set information in SHOW CREATE statements. NOTE: the dump may be incorrect, since character set information is required in order to restore stored procedure/function properly. */ fprintf(sql_file, "--\n" "-- WARNING: old server version. " "The following dump may be incomplete.\n" "--\n"); } switch_sql_mode(sql_file, ";", row[1]); fprintf(sql_file, "DELIMITER ;;\n" "%s ;;\n" "DELIMITER ;\n", (const char *) row[2]); restore_sql_mode(sql_file, ";"); if (mysql_num_fields(routine_res) >= 6) { restore_cs_variables(sql_file, ";"); if (db_cl_altered) { if (restore_db_collation(sql_file, db_name_buff, ";", db_cl_name)) DBUG_RETURN(1); } } } } /* end of routine printing */ mysql_free_result(routine_res); } /* end of list of routines */ } mysql_free_result(routine_list_res); } /* end of for i (0 .. 1) */ if (opt_xml) { fputs("\t</routines>\n", sql_file); check_io(sql_file); } if (switch_character_set_results(mysql, default_charset)) DBUG_RETURN(1); if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); DBUG_RETURN(0); } /* general_log or slow_log tables under mysql database */ static inline my_bool general_log_or_slow_log_tables(const char *db, const char *table) { return (!my_strcasecmp(charset_info, db, "mysql")) && (!my_strcasecmp(charset_info, table, "general_log") || !my_strcasecmp(charset_info, table, "slow_log")); } /* get_table_structure -- retrievs database structure, prints out corresponding CREATE statement and fills out insert_pat if the table is the type we will be dumping. ARGS table - table name db - db name table_type - table type, e.g. "MyISAM" or "InnoDB", but also "VIEW" ignore_flag - what we must particularly ignore - see IGNORE_ defines above RETURN number of fields in table, 0 if error */ static uint get_table_structure(char *table, char *db, char *table_type, char *ignore_flag) { my_bool init=0, write_data, complete_insert; my_ulonglong num_fields; char *result_table, *opt_quoted_table; const char *insert_option; char name_buff[NAME_LEN+3],table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3], query_buff[QUERY_LENGTH]; const char *show_fields_stmt= "SELECT `COLUMN_NAME` AS `Field`, " "`COLUMN_TYPE` AS `Type`, " "`IS_NULLABLE` AS `Null`, " "`COLUMN_KEY` AS `Key`, " "`COLUMN_DEFAULT` AS `Default`, " "`EXTRA` AS `Extra`, " "`COLUMN_COMMENT` AS `Comment` " "FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE " "TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s'"; FILE *sql_file= md_result_file; int len; my_bool is_log_table; MYSQL_RES *result; MYSQL_ROW row; DBUG_ENTER("get_table_structure"); DBUG_PRINT("enter", ("db: %s table: %s", db, table)); *ignore_flag= check_if_ignore_table(table, table_type); complete_insert= 0; if ((write_data= !(*ignore_flag & IGNORE_DATA))) { complete_insert= opt_complete_insert; if (!insert_pat_inited) { insert_pat_inited= 1; init_dynamic_string_checked(&insert_pat, "", 1024, 1024); } else dynstr_set_checked(&insert_pat, ""); } insert_option= (opt_ignore ? " IGNORE " : ""); verbose_msg("-- Retrieving table structure for table %s...\n", table); len= my_snprintf(query_buff, sizeof(query_buff), "SET SQL_QUOTE_SHOW_CREATE=%d", (opt_quoted || opt_keywords)); if (!create_options) my_stpcpy(query_buff+len, "/*!40102 ,SQL_MODE=concat(@@sql_mode, _utf8 ',NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS') */"); result_table= quote_name(table, table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); if (opt_order_by_primary) order_by= primary_key_fields(result_table); if (!opt_xml && !mysql_query_with_error_report(mysql, 0, query_buff)) { /* using SHOW CREATE statement */ if (!opt_no_create_info) { /* Make an sql-file, if path was given iow. option -T was given */ char buff[20+FN_REFLEN]; MYSQL_FIELD *field; my_snprintf(buff, sizeof(buff), "show create table %s", result_table); if (switch_character_set_results(mysql, "binary") || mysql_query_with_error_report(mysql, &result, buff) || switch_character_set_results(mysql, default_charset)) DBUG_RETURN(0); if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) DBUG_RETURN(0); write_header(sql_file, db); } if (strcmp (table_type, "VIEW") == 0) /* view */ print_comment(sql_file, 0, "\n--\n-- Temporary table structure for view %s\n--\n\n", result_table); else print_comment(sql_file, 0, "\n--\n-- Table structure for table %s\n--\n\n", result_table); if (opt_drop) { /* Even if the "table" is a view, we do a DROP TABLE here. The view-specific code below fills in the DROP VIEW. We will skip the DROP TABLE for general_log and slow_log, since those stmts will fail, in case we apply dump by enabling logging. */ if (!general_log_or_slow_log_tables(db, table)) fprintf(sql_file, "DROP TABLE IF EXISTS %s;\n", opt_quoted_table); check_io(sql_file); } field= mysql_fetch_field_direct(result, 0); if (strcmp(field->name, "View") == 0) { char *scv_buff= NULL; my_ulonglong n_cols; verbose_msg("-- It's a view, create dummy table for view\n"); /* save "show create" statement for later */ if ((row= mysql_fetch_row(result)) && (scv_buff=row[1])) scv_buff= my_strdup(PSI_NOT_INSTRUMENTED, scv_buff, MYF(0)); mysql_free_result(result); /* Create a table with the same name as the view and with columns of the same name in order to satisfy views that depend on this view. The table will be removed when the actual view is created. The properties of each column, are not preserved in this temporary table, because they are not necessary. This will not be necessary once we can determine dependencies between views and can simply dump them in the appropriate order. */ my_snprintf(query_buff, sizeof(query_buff), "SHOW FIELDS FROM %s", result_table); if (switch_character_set_results(mysql, "binary") || mysql_query_with_error_report(mysql, &result, query_buff) || switch_character_set_results(mysql, default_charset)) { /* View references invalid or privileged table/col/fun (err 1356), so we cannot create a stand-in table. Be defensive and dump a comment with the view's 'show create' statement. (Bug #17371) */ if (mysql_errno(mysql) == ER_VIEW_INVALID) fprintf(sql_file, "\n-- failed on view %s: %s\n\n", result_table, scv_buff ? scv_buff : ""); my_free(scv_buff); DBUG_RETURN(0); } else my_free(scv_buff); n_cols= mysql_num_rows(result); if (0 != n_cols) { /* The actual formula is based on the column names and how the .FRM files are stored and is too volatile to be repeated here. Thus we simply warn the user if the columns exceed a limit we know works most of the time. */ if (n_cols >= 1000) fprintf(stderr, "-- Warning: Creating a stand-in table for view %s may" " fail when replaying the dump file produced because " "of the number of columns exceeding 1000. Exercise " "caution when replaying the produced dump file.\n", table); if (opt_drop) { /* We have already dropped any table of the same name above, so here we just drop the view. */ fprintf(sql_file, "/*!50001 DROP VIEW IF EXISTS %s*/;\n", opt_quoted_table); check_io(sql_file); } fprintf(sql_file, "SET @saved_cs_client = @@character_set_client;\n" "SET character_set_client = utf8;\n" "/*!50001 CREATE TABLE %s (\n", result_table); /* Get first row, following loop will prepend comma - keeps from having to know if the row being printed is last to determine if there should be a _trailing_ comma. */ row= mysql_fetch_row(result); /* The actual column type doesn't matter anyway, since the table will be dropped at run time. We do tinyint to avoid hitting the row size limit. */ fprintf(sql_file, " %s tinyint NOT NULL", quote_name(row[0], name_buff, 0)); while((row= mysql_fetch_row(result))) { /* col name, col type */ fprintf(sql_file, ",\n %s tinyint NOT NULL", quote_name(row[0], name_buff, 0)); } /* Stand-in tables are always MyISAM tables as the default engine might have a column-limit that's lower than the number of columns in the view, and MyISAM support is guaranteed to be in the server anyway. */ fprintf(sql_file, "\n) ENGINE=MyISAM */;\n" "SET character_set_client = @saved_cs_client;\n"); check_io(sql_file); } mysql_free_result(result); if (path) my_fclose(sql_file, MYF(MY_WME)); seen_views= 1; DBUG_RETURN(0); } row= mysql_fetch_row(result); is_log_table= general_log_or_slow_log_tables(db, table); if (is_log_table) row[1]+= 13; /* strlen("CREATE TABLE ")= 13 */ if (opt_compatible_mode & 3) { fprintf(sql_file, is_log_table ? "CREATE TABLE IF NOT EXISTS %s;\n" : "%s;\n", row[1]); } else { fprintf(sql_file, "/*!40101 SET @saved_cs_client = @@character_set_client */;\n" "/*!40101 SET character_set_client = utf8 */;\n" "%s%s;\n" "/*!40101 SET character_set_client = @saved_cs_client */;\n", is_log_table ? "CREATE TABLE IF NOT EXISTS " : "", row[1]); } check_io(sql_file); mysql_free_result(result); } my_snprintf(query_buff, sizeof(query_buff), "show fields from %s", result_table); if (mysql_query_with_error_report(mysql, &result, query_buff)) { if (path) my_fclose(sql_file, MYF(MY_WME)); DBUG_RETURN(0); } /* If write_data is true, then we build up insert statements for the table's data. Note: in subsequent lines of code, this test will have to be performed each time we are appending to insert_pat. */ if (write_data) { if (opt_replace_into) dynstr_append_checked(&insert_pat, "REPLACE "); else dynstr_append_checked(&insert_pat, "INSERT "); dynstr_append_checked(&insert_pat, insert_option); dynstr_append_checked(&insert_pat, "INTO "); dynstr_append_checked(&insert_pat, opt_quoted_table); if (complete_insert) { dynstr_append_checked(&insert_pat, " ("); } else { dynstr_append_checked(&insert_pat, " VALUES "); if (!extended_insert) dynstr_append_checked(&insert_pat, "("); } } while ((row= mysql_fetch_row(result))) { if (complete_insert) { if (init) { dynstr_append_checked(&insert_pat, ", "); } init=1; dynstr_append_checked(&insert_pat, quote_name(row[SHOW_FIELDNAME], name_buff, 0)); } } num_fields= mysql_num_rows(result); mysql_free_result(result); } else { verbose_msg("%s: Warning: Can't set SQL_QUOTE_SHOW_CREATE option (%s)\n", my_progname, mysql_error(mysql)); my_snprintf(query_buff, sizeof(query_buff), show_fields_stmt, db, table); if (mysql_query_with_error_report(mysql, &result, query_buff)) DBUG_RETURN(0); /* Make an sql-file, if path was given iow. option -T was given */ if (!opt_no_create_info) { if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) DBUG_RETURN(0); write_header(sql_file, db); } print_comment(sql_file, 0, "\n--\n-- Table structure for table %s\n--\n\n", result_table); if (opt_drop) fprintf(sql_file, "DROP TABLE IF EXISTS %s;\n", result_table); if (!opt_xml) fprintf(sql_file, "CREATE TABLE %s (\n", result_table); else print_xml_tag(sql_file, "\t", "\n", "table_structure", "name=", table, NullS); check_io(sql_file); } if (write_data) { if (opt_replace_into) dynstr_append_checked(&insert_pat, "REPLACE "); else dynstr_append_checked(&insert_pat, "INSERT "); dynstr_append_checked(&insert_pat, insert_option); dynstr_append_checked(&insert_pat, "INTO "); dynstr_append_checked(&insert_pat, result_table); if (complete_insert) dynstr_append_checked(&insert_pat, " ("); else { dynstr_append_checked(&insert_pat, " VALUES "); if (!extended_insert) dynstr_append_checked(&insert_pat, "("); } } while ((row= mysql_fetch_row(result))) { ulong *lengths= mysql_fetch_lengths(result); if (init) { if (!opt_xml && !opt_no_create_info) { fputs(",\n",sql_file); check_io(sql_file); } if (complete_insert) dynstr_append_checked(&insert_pat, ", "); } init=1; if (complete_insert) dynstr_append_checked(&insert_pat, quote_name(row[SHOW_FIELDNAME], name_buff, 0)); if (!opt_no_create_info) { if (opt_xml) { print_xml_row(sql_file, "field", result, &row, NullS); continue; } if (opt_keywords) fprintf(sql_file, " %s.%s %s", result_table, quote_name(row[SHOW_FIELDNAME],name_buff, 0), row[SHOW_TYPE]); else fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME], name_buff, 0), row[SHOW_TYPE]); if (row[SHOW_DEFAULT]) { fputs(" DEFAULT ", sql_file); unescape(sql_file, row[SHOW_DEFAULT], lengths[SHOW_DEFAULT]); } if (!row[SHOW_NULL][0]) fputs(" NOT NULL", sql_file); if (row[SHOW_EXTRA][0]) fprintf(sql_file, " %s",row[SHOW_EXTRA]); check_io(sql_file); } } num_fields= mysql_num_rows(result); mysql_free_result(result); if (!opt_no_create_info) { /* Make an sql-file, if path was given iow. option -T was given */ char buff[20+FN_REFLEN]; uint keynr,primary_key; my_snprintf(buff, sizeof(buff), "show keys from %s", result_table); if (mysql_query_with_error_report(mysql, &result, buff)) { if (mysql_errno(mysql) == ER_WRONG_OBJECT) { /* it is VIEW */ fputs("\t\t<options Comment=\"view\" />\n", sql_file); goto continue_xml; } fprintf(stderr, "%s: Can't get keys for table %s (%s)\n", my_progname, result_table, mysql_error(mysql)); if (path) my_fclose(sql_file, MYF(MY_WME)); DBUG_RETURN(0); } /* Find first which key is primary key */ keynr=0; primary_key=INT_MAX; while ((row= mysql_fetch_row(result))) { if (atoi(row[3]) == 1) { keynr++; if (!strcmp(row[2],"PRIMARY")) { primary_key=keynr; break; } } } mysql_data_seek(result,0); keynr=0; while ((row= mysql_fetch_row(result))) { if (opt_xml) { print_xml_row(sql_file, "key", result, &row, NullS); continue; } if (atoi(row[3]) == 1) { if (keynr++) putc(')', sql_file); if (atoi(row[1])) /* Test if duplicate key */ /* Duplicate allowed */ fprintf(sql_file, ",\n KEY %s (",quote_name(row[2],name_buff,0)); else if (keynr == primary_key) fputs(",\n PRIMARY KEY (",sql_file); /* First UNIQUE is primary */ else fprintf(sql_file, ",\n UNIQUE %s (",quote_name(row[2],name_buff, 0)); } else putc(',', sql_file); fputs(quote_name(row[4], name_buff, 0), sql_file); if (row[7]) fprintf(sql_file, " (%s)",row[7]); /* Sub key */ check_io(sql_file); } mysql_free_result(result); if (!opt_xml) { if (keynr) putc(')', sql_file); fputs("\n)",sql_file); check_io(sql_file); } /* Get MySQL specific create options */ if (create_options) { char show_name_buff[NAME_LEN*2+2+24]; /* Check memory for quote_for_like() */ my_snprintf(buff, sizeof(buff), "show table status like %s", quote_for_like(table, show_name_buff)); if (mysql_query_with_error_report(mysql, &result, buff)) { if (mysql_errno(mysql) != ER_PARSE_ERROR) { /* If old MySQL version */ verbose_msg("-- Warning: Couldn't get status information for " \ "table %s (%s)\n", result_table,mysql_error(mysql)); } } else if (!(row= mysql_fetch_row(result))) { fprintf(stderr, "Error: Couldn't read status information for table %s (%s)\n", result_table,mysql_error(mysql)); } else { if (opt_xml) print_xml_row(sql_file, "options", result, &row, NullS); else { fputs("/*!",sql_file); print_value(sql_file,result,row,"engine=","Engine",0); print_value(sql_file,result,row,"","Create_options",0); print_value(sql_file,result,row,"comment=","Comment",1); fputs(" */",sql_file); check_io(sql_file); } } mysql_free_result(result); /* Is always safe to free */ } continue_xml: if (!opt_xml) fputs(";\n", sql_file); else fputs("\t</table_structure>\n", sql_file); check_io(sql_file); } } if (complete_insert) { dynstr_append_checked(&insert_pat, ") VALUES "); if (!extended_insert) dynstr_append_checked(&insert_pat, "("); } if (sql_file != md_result_file) { fputs("\n", sql_file); write_footer(sql_file); my_fclose(sql_file, MYF(MY_WME)); } DBUG_RETURN((uint) num_fields); } /* get_table_structure */ static void dump_trigger_old(FILE *sql_file, MYSQL_RES *show_triggers_rs, MYSQL_ROW *show_trigger_row, const char *table_name) { char quoted_table_name_buf[NAME_LEN * 2 + 3]; char *quoted_table_name= quote_name(table_name, quoted_table_name_buf, 1); char name_buff[NAME_LEN * 4 + 3]; const char *xml_msg= "\nWarning! mysqldump being run against old server " "that does not\nsupport 'SHOW CREATE TRIGGERS' " "statement. Skipping..\n"; DBUG_ENTER("dump_trigger_old"); if (opt_xml) { print_xml_comment(sql_file, strlen(xml_msg), xml_msg); check_io(sql_file); DBUG_VOID_RETURN; } fprintf(sql_file, "--\n" "-- WARNING: old server version. " "The following dump may be incomplete.\n" "--\n"); if (opt_compact) fprintf(sql_file, "/*!50003 SET @OLD_SQL_MODE=@@SQL_MODE*/;\n"); if (opt_drop_trigger) fprintf(sql_file, "/*!50032 DROP TRIGGER IF EXISTS %s */;\n", (*show_trigger_row)[0]); fprintf(sql_file, "DELIMITER ;;\n" "/*!50003 SET SESSION SQL_MODE=\"%s\" */;;\n" "/*!50003 CREATE */ ", (*show_trigger_row)[6]); if (mysql_num_fields(show_triggers_rs) > 7) { /* mysqldump can be run against the server, that does not support definer in triggers (there is no DEFINER column in SHOW TRIGGERS output). So, we should check if we have this column before accessing it. */ size_t user_name_len; char user_name_str[USERNAME_LENGTH + 1]; char quoted_user_name_str[USERNAME_LENGTH * 2 + 3]; size_t host_name_len; char host_name_str[HOSTNAME_LENGTH + 1]; char quoted_host_name_str[HOSTNAME_LENGTH * 2 + 3]; parse_user((*show_trigger_row)[7], strlen((*show_trigger_row)[7]), user_name_str, &user_name_len, host_name_str, &host_name_len); fprintf(sql_file, "/*!50017 DEFINER=%s@%s */ ", quote_name(user_name_str, quoted_user_name_str, FALSE), quote_name(host_name_str, quoted_host_name_str, FALSE)); } fprintf(sql_file, "/*!50003 TRIGGER %s %s %s ON %s FOR EACH ROW%s%s */;;\n" "DELIMITER ;\n", quote_name((*show_trigger_row)[0], name_buff, 0), /* Trigger */ (*show_trigger_row)[4], /* Timing */ (*show_trigger_row)[1], /* Event */ quoted_table_name, (strchr(" \t\n\r", *((*show_trigger_row)[3]))) ? "" : " ", (*show_trigger_row)[3] /* Statement */); if (opt_compact) fprintf(sql_file, "/*!50003 SET SESSION SQL_MODE=@OLD_SQL_MODE */;\n"); DBUG_VOID_RETURN; } static int dump_trigger(FILE *sql_file, MYSQL_RES *show_create_trigger_rs, const char *db_name, const char *db_cl_name) { MYSQL_ROW row; char *query_str; int db_cl_altered= FALSE; DBUG_ENTER("dump_trigger"); while ((row= mysql_fetch_row(show_create_trigger_rs))) { if (opt_xml) { print_xml_row(sql_file, "trigger", show_create_trigger_rs, &row, "SQL Original Statement"); check_io(sql_file); continue; } query_str= cover_definer_clause(row[2], strlen(row[2]), C_STRING_WITH_LEN("50017"), C_STRING_WITH_LEN("50003"), C_STRING_WITH_LEN(" TRIGGER")); if (switch_db_collation(sql_file, db_name, ";", db_cl_name, row[5], &db_cl_altered)) DBUG_RETURN(TRUE); switch_cs_variables(sql_file, ";", row[3], /* character_set_client */ row[3], /* character_set_results */ row[4]); /* collation_connection */ switch_sql_mode(sql_file, ";", row[1]); if (opt_drop_trigger) fprintf(sql_file, "/*!50032 DROP TRIGGER IF EXISTS %s */;\n", row[0]); fprintf(sql_file, "DELIMITER ;;\n" "/*!50003 %s */;;\n" "DELIMITER ;\n", (const char *) (query_str != NULL ? query_str : row[2])); restore_sql_mode(sql_file, ";"); restore_cs_variables(sql_file, ";"); if (db_cl_altered) { if (restore_db_collation(sql_file, db_name, ";", db_cl_name)) DBUG_RETURN(TRUE); } my_free(query_str); } DBUG_RETURN(FALSE); } /** Dump the triggers for a given table. This should be called after the tables have been dumped in case a trigger depends on the existence of a table. @param[in] table_name @param[in] db_name @return Error status. @retval TRUE error has occurred. @retval FALSE operation succeed. */ static int dump_triggers_for_table(char *table_name, char *db_name) { char name_buff[NAME_LEN*4+3]; char query_buff[QUERY_LENGTH]; uint old_opt_compatible_mode= opt_compatible_mode; MYSQL_RES *show_triggers_rs; MYSQL_ROW row; FILE *sql_file= md_result_file; char db_cl_name[MY_CS_NAME_SIZE]; int ret= TRUE; DBUG_ENTER("dump_triggers_for_table"); DBUG_PRINT("enter", ("db: %s, table_name: %s", db_name, table_name)); if (path && !(sql_file= open_sql_file_for_table(table_name, O_WRONLY | O_APPEND))) DBUG_RETURN(1); /* Do not use ANSI_QUOTES on triggers in dump */ opt_compatible_mode&= ~MASK_ANSI_QUOTES; /* Get database collation. */ if (switch_character_set_results(mysql, "binary")) goto done; if (fetch_db_collation(db_name, db_cl_name, sizeof (db_cl_name))) goto done; /* Get list of triggers. */ my_snprintf(query_buff, sizeof(query_buff), "SHOW TRIGGERS LIKE %s", quote_for_like(table_name, name_buff)); if (mysql_query_with_error_report(mysql, &show_triggers_rs, query_buff)) goto done; /* Dump triggers. */ if (! mysql_num_rows(show_triggers_rs)) goto skip; if (opt_xml) print_xml_tag(sql_file, "\t", "\n", "triggers", "name=", table_name, NullS); while ((row= mysql_fetch_row(show_triggers_rs))) { my_snprintf(query_buff, sizeof (query_buff), "SHOW CREATE TRIGGER %s", quote_name(row[0], name_buff, TRUE)); if (mysql_query(mysql, query_buff)) { /* mysqldump is being run against old server, that does not support SHOW CREATE TRIGGER statement. We should use SHOW TRIGGERS output. NOTE: the dump may be incorrect, as old SHOW TRIGGERS does not provide all the necessary information to restore trigger properly. */ dump_trigger_old(sql_file, show_triggers_rs, &row, table_name); } else { MYSQL_RES *show_create_trigger_rs= mysql_store_result(mysql); if (!show_create_trigger_rs || dump_trigger(sql_file, show_create_trigger_rs, db_name, db_cl_name)) goto done; mysql_free_result(show_create_trigger_rs); } } if (opt_xml) { fputs("\t</triggers>\n", sql_file); check_io(sql_file); } skip: mysql_free_result(show_triggers_rs); if (switch_character_set_results(mysql, default_charset)) goto done; /* make sure to set back opt_compatible mode to original value */ opt_compatible_mode=old_opt_compatible_mode; ret= FALSE; done: if (path) my_fclose(sql_file, MYF(0)); DBUG_RETURN(ret); } static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value) { if (!option_value) { /* Null value means we don't add this option. */ return; } dynstr_append_checked(str, option); if (strncmp(option_value, "0x", sizeof("0x")-1) == 0) { /* It's a hex constant, don't escape */ dynstr_append_checked(str, option_value); } else { /* char constant; escape */ field_escape(str, option_value); } } /* Allow the user to specify field terminator strings like: "'", "\", "\\" (escaped backslash), "\t" (tab), "\n" (newline) This is done by doubling ' and add a end -\ if needed to avoid syntax errors from the SQL parser. */ static void field_escape(DYNAMIC_STRING* in, const char *from) { uint end_backslashes= 0; dynstr_append_checked(in, "'"); while (*from) { dynstr_append_mem_checked(in, from, 1); if (*from == '\\') end_backslashes^=1; /* find odd number of backslashes */ else { if (*from == '\'' && !end_backslashes) { /* We want a duplicate of "'" for MySQL */ dynstr_append_checked(in, "\'"); } end_backslashes=0; } from++; } /* Add missing backslashes if user has specified odd number of backs.*/ if (end_backslashes) dynstr_append_checked(in, "\\"); dynstr_append_checked(in, "'"); } static char *alloc_query_str(ulong size) { char *query; if (!(query= (char*) my_malloc(PSI_NOT_INSTRUMENTED, size, MYF(MY_WME)))) die(EX_MYSQLERR, "Couldn't allocate a query string."); return query; } /* SYNOPSIS dump_table() dump_table saves database contents as a series of INSERT statements. ARGS table - table name db - db name RETURNS void */ static void dump_table(char *table, char *db) { char ignore_flag; char buf[200], table_buff[NAME_LEN+3]; DYNAMIC_STRING query_string; char table_type[NAME_LEN]; char *result_table, table_buff2[NAME_LEN*2+3], *opt_quoted_table; int error= 0; ulong rownr, row_break, total_length, init_length; uint num_fields; MYSQL_RES *res; MYSQL_FIELD *field; MYSQL_ROW row; DBUG_ENTER("dump_table"); /* Make sure you get the create table info before the following check for --no-data flag below. Otherwise, the create table info won't be printed. */ num_fields= get_table_structure(table, db, table_type, &ignore_flag); /* The "table" could be a view. If so, we don't do anything here. */ if (strcmp(table_type, "VIEW") == 0) DBUG_VOID_RETURN; /* Check --no-data flag */ if (opt_no_data) { verbose_msg("-- Skipping dump data for table '%s', --no-data was used\n", table); DBUG_VOID_RETURN; } DBUG_PRINT("info", ("ignore_flag: %x num_fields: %d", (int) ignore_flag, num_fields)); /* If the table type is a merge table or any type that has to be _completely_ ignored and no data dumped */ if (ignore_flag & IGNORE_DATA) { verbose_msg("-- Warning: Skipping data for table '%s' because " \ "it's of type %s\n", table, table_type); DBUG_VOID_RETURN; } /* Check that there are any fields in the table */ if (num_fields == 0) { verbose_msg("-- Skipping dump data for table '%s', it has no fields\n", table); DBUG_VOID_RETURN; } result_table= quote_name(table,table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); verbose_msg("-- Sending SELECT query...\n"); init_dynamic_string_checked(&query_string, "", 1024, 1024); if (path) { char filename[FN_REFLEN], tmp_path[FN_REFLEN]; /* Convert the path to native os format and resolve to the full filepath. */ convert_dirname(tmp_path,path,NullS); my_load_path(tmp_path, tmp_path, NULL); fn_format(filename, table, tmp_path, ".txt", MYF(MY_UNPACK_FILENAME)); /* Must delete the file that 'INTO OUTFILE' will write to */ my_delete(filename, MYF(0)); /* convert to a unix path name to stick into the query */ to_unix_path(filename); /* now build the query string */ dynstr_append_checked(&query_string, "SELECT /*!40001 SQL_NO_CACHE */ * INTO OUTFILE '"); dynstr_append_checked(&query_string, filename); dynstr_append_checked(&query_string, "'"); dynstr_append_checked(&query_string, " /*!50138 CHARACTER SET "); dynstr_append_checked(&query_string, default_charset == mysql_universal_client_charset ? my_charset_bin.name : /* backward compatibility */ default_charset); dynstr_append_checked(&query_string, " */"); if (fields_terminated || enclosed || opt_enclosed || escaped) dynstr_append_checked(&query_string, " FIELDS"); add_load_option(&query_string, " TERMINATED BY ", fields_terminated); add_load_option(&query_string, " ENCLOSED BY ", enclosed); add_load_option(&query_string, " OPTIONALLY ENCLOSED BY ", opt_enclosed); add_load_option(&query_string, " ESCAPED BY ", escaped); add_load_option(&query_string, " LINES TERMINATED BY ", lines_terminated); dynstr_append_checked(&query_string, " FROM "); dynstr_append_checked(&query_string, result_table); if (where) { dynstr_append_checked(&query_string, " WHERE "); dynstr_append_checked(&query_string, where); } if (order_by) { dynstr_append_checked(&query_string, " ORDER BY "); dynstr_append_checked(&query_string, order_by); } if (mysql_real_query(mysql, query_string.str, query_string.length)) { DB_error(mysql, "when executing 'SELECT INTO OUTFILE'"); dynstr_free(&query_string); DBUG_VOID_RETURN; } } else { print_comment(md_result_file, 0, "\n--\n-- Dumping data for table %s\n--\n", result_table); dynstr_append_checked(&query_string, "SELECT /*!40001 SQL_NO_CACHE */ * FROM "); dynstr_append_checked(&query_string, result_table); if (where) { print_comment(md_result_file, 0, "-- WHERE: %s\n", where); dynstr_append_checked(&query_string, " WHERE "); dynstr_append_checked(&query_string, where); } if (order_by) { print_comment(md_result_file, 0, "-- ORDER BY: %s\n", order_by); dynstr_append_checked(&query_string, " ORDER BY "); dynstr_append_checked(&query_string, order_by); } if (!opt_xml && !opt_compact) { fputs("\n", md_result_file); check_io(md_result_file); } if (mysql_query_with_error_report(mysql, 0, query_string.str)) { DB_error(mysql, "when retrieving data from server"); goto err; } if (quick) res=mysql_use_result(mysql); else res=mysql_store_result(mysql); if (!res) { DB_error(mysql, "when retrieving data from server"); goto err; } verbose_msg("-- Retrieving rows...\n"); if (mysql_num_fields(res) != num_fields) { fprintf(stderr,"%s: Error in field count for table: %s ! Aborting.\n", my_progname, result_table); error= EX_CONSCHECK; goto err; } if (opt_lock) { fprintf(md_result_file,"LOCK TABLES %s WRITE;\n", opt_quoted_table); check_io(md_result_file); } /* Moved disable keys to after lock per bug 15977 */ if (opt_disable_keys) { fprintf(md_result_file, "/*!40000 ALTER TABLE %s DISABLE KEYS */;\n", opt_quoted_table); check_io(md_result_file); } total_length= opt_net_buffer_length; /* Force row break */ row_break=0; rownr=0; init_length=(uint) insert_pat.length+4; if (opt_xml) print_xml_tag(md_result_file, "\t", "\n", "table_data", "name=", table, NullS); if (opt_autocommit) { fprintf(md_result_file, "set autocommit=0;\n"); check_io(md_result_file); } while ((row= mysql_fetch_row(res))) { uint i; ulong *lengths= mysql_fetch_lengths(res); rownr++; if (!extended_insert && !opt_xml) { fputs(insert_pat.str,md_result_file); check_io(md_result_file); } mysql_field_seek(res,0); if (opt_xml) { fputs("\t<row>\n", md_result_file); check_io(md_result_file); } for (i= 0; i < mysql_num_fields(res); i++) { int is_blob; ulong length= lengths[i]; if (!(field= mysql_fetch_field(res))) die(EX_CONSCHECK, "Not enough fields from table %s! Aborting.\n", result_table); /* 63 is my_charset_bin. If charsetnr is not 63, we have not a BLOB but a TEXT column. we'll dump in hex only BLOB columns. */ is_blob= (opt_hex_blob && field->charsetnr == 63 && (field->type == MYSQL_TYPE_BIT || field->type == MYSQL_TYPE_STRING || field->type == MYSQL_TYPE_VAR_STRING || field->type == MYSQL_TYPE_VARCHAR || field->type == MYSQL_TYPE_BLOB || field->type == MYSQL_TYPE_LONG_BLOB || field->type == MYSQL_TYPE_MEDIUM_BLOB || field->type == MYSQL_TYPE_TINY_BLOB)) ? 1 : 0; if (extended_insert && !opt_xml) { if (i == 0) dynstr_set_checked(&extended_row,"("); else dynstr_append_checked(&extended_row,","); if (row[i]) { if (length) { if (!(field->flags & NUM_FLAG)) { /* "length * 2 + 2" is OK for both HEX and non-HEX modes: - In HEX mode we need exactly 2 bytes per character plus 2 bytes for '0x' prefix. - In non-HEX mode we need up to 2 bytes per character, plus 2 bytes for leading and trailing '\'' characters. Also we need to reserve 1 byte for terminating '\0'. */ dynstr_realloc_checked(&extended_row,length * 2 + 2 + 1); if (opt_hex_blob && is_blob) { dynstr_append_checked(&extended_row, "0x"); extended_row.length+= mysql_hex_string(extended_row.str + extended_row.length, row[i], length); DBUG_ASSERT(extended_row.length+1 <= extended_row.max_length); /* mysql_hex_string() already terminated string by '\0' */ DBUG_ASSERT(extended_row.str[extended_row.length] == '\0'); } else { dynstr_append_checked(&extended_row,"'"); extended_row.length += mysql_real_escape_string(&mysql_connection, &extended_row.str[extended_row.length], row[i],length); extended_row.str[extended_row.length]='\0'; dynstr_append_checked(&extended_row,"'"); } } else { /* change any strings ("inf", "-inf", "nan") into NULL */ char *ptr= row[i]; if (my_isalpha(charset_info, *ptr) || (*ptr == '-' && my_isalpha(charset_info, ptr[1]))) dynstr_append_checked(&extended_row, "NULL"); else { if (field->type == MYSQL_TYPE_DECIMAL) { /* add " signs around */ dynstr_append_checked(&extended_row, "'"); dynstr_append_checked(&extended_row, ptr); dynstr_append_checked(&extended_row, "'"); } else dynstr_append_checked(&extended_row, ptr); } } } else dynstr_append_checked(&extended_row,"''"); } else dynstr_append_checked(&extended_row,"NULL"); } else { if (i && !opt_xml) { fputc(',', md_result_file); check_io(md_result_file); } if (row[i]) { if (!(field->flags & NUM_FLAG)) { if (opt_xml) { if (opt_hex_blob && is_blob && length) { /* Define xsi:type="xs:hexBinary" for hex encoded data */ print_xml_tag(md_result_file, "\t\t", "", "field", "name=", field->name, "xsi:type=", "xs:hexBinary", NullS); print_blob_as_hex(md_result_file, row[i], length); } else { print_xml_tag(md_result_file, "\t\t", "", "field", "name=", field->name, NullS); print_quoted_xml(md_result_file, row[i], length, 0); } fputs("</field>\n", md_result_file); } else if (opt_hex_blob && is_blob && length) { fputs("0x", md_result_file); print_blob_as_hex(md_result_file, row[i], length); } else unescape(md_result_file, row[i], length); } else { /* change any strings ("inf", "-inf", "nan") into NULL */ char *ptr= row[i]; if (opt_xml) { print_xml_tag(md_result_file, "\t\t", "", "field", "name=", field->name, NullS); fputs(!my_isalpha(charset_info, *ptr) ? ptr: "NULL", md_result_file); fputs("</field>\n", md_result_file); } else if (my_isalpha(charset_info, *ptr) || (*ptr == '-' && my_isalpha(charset_info, ptr[1]))) fputs("NULL", md_result_file); else if (field->type == MYSQL_TYPE_DECIMAL) { /* add " signs around */ fputc('\'', md_result_file); fputs(ptr, md_result_file); fputc('\'', md_result_file); } else fputs(ptr, md_result_file); } } else { /* The field value is NULL */ if (!opt_xml) fputs("NULL", md_result_file); else print_xml_null_tag(md_result_file, "\t\t", "field name=", field->name, "\n"); } check_io(md_result_file); } } if (opt_xml) { fputs("\t</row>\n", md_result_file); check_io(md_result_file); } if (extended_insert) { ulong row_length; dynstr_append_checked(&extended_row,")"); row_length= 2 + extended_row.length; if (total_length + row_length < opt_net_buffer_length) { total_length+= row_length; fputc(',',md_result_file); /* Always row break */ fputs(extended_row.str,md_result_file); } else { if (row_break) fputs(";\n", md_result_file); row_break=1; /* This is first row */ fputs(insert_pat.str,md_result_file); fputs(extended_row.str,md_result_file); total_length= row_length+init_length; } check_io(md_result_file); } else if (!opt_xml) { fputs(");\n", md_result_file); check_io(md_result_file); } } /* XML - close table tag and supress regular output */ if (opt_xml) fputs("\t</table_data>\n", md_result_file); else if (extended_insert && row_break) fputs(";\n", md_result_file); /* If not empty table */ fflush(md_result_file); check_io(md_result_file); if (mysql_errno(mysql)) { my_snprintf(buf, sizeof(buf), "%s: Error %d: %s when dumping table %s at row: %ld\n", my_progname, mysql_errno(mysql), mysql_error(mysql), result_table, rownr); fputs(buf,stderr); error= EX_CONSCHECK; goto err; } /* Moved enable keys to before unlock per bug 15977 */ if (opt_disable_keys) { fprintf(md_result_file,"/*!40000 ALTER TABLE %s ENABLE KEYS */;\n", opt_quoted_table); check_io(md_result_file); } if (opt_lock) { fputs("UNLOCK TABLES;\n", md_result_file); check_io(md_result_file); } if (opt_autocommit) { fprintf(md_result_file, "commit;\n"); check_io(md_result_file); } mysql_free_result(res); } dynstr_free(&query_string); DBUG_VOID_RETURN; err: dynstr_free(&query_string); maybe_exit(error); DBUG_VOID_RETURN; } /* dump_table */ static char *getTableName(int reset) { static MYSQL_RES *res= NULL; MYSQL_ROW row; if (!res) { if (!(res= mysql_list_tables(mysql,NullS))) return(NULL); } if ((row= mysql_fetch_row(res))) return((char*) row[0]); if (reset) mysql_data_seek(res,0); /* We want to read again */ else { mysql_free_result(res); res= NULL; } return(NULL); } /* getTableName */ /* dump all logfile groups and tablespaces */ static int dump_all_tablespaces() { return dump_tablespaces(NULL); } static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) { DYNAMIC_STRING where; int r; int i; char name_buff[NAME_LEN*2+3]; mysql_real_escape_string(mysql, name_buff, db, strlen(db)); init_dynamic_string_checked(&where, " AND TABLESPACE_NAME IN (" "SELECT DISTINCT TABLESPACE_NAME FROM" " INFORMATION_SCHEMA.PARTITIONS" " WHERE" " TABLE_SCHEMA='", 256, 1024); dynstr_append_checked(&where, name_buff); dynstr_append_checked(&where, "' AND TABLE_NAME IN ("); for (i=0 ; i<tables ; i++) { mysql_real_escape_string(mysql, name_buff, table_names[i], strlen(table_names[i])); dynstr_append_checked(&where, "'"); dynstr_append_checked(&where, name_buff); dynstr_append_checked(&where, "',"); } dynstr_trunc(&where, 1); dynstr_append_checked(&where,"))"); DBUG_PRINT("info",("Dump TS for Tables where: %s",where.str)); r= dump_tablespaces(where.str); dynstr_free(&where); return r; } static int dump_tablespaces_for_databases(char** databases) { DYNAMIC_STRING where; int r; int i; init_dynamic_string_checked(&where, " AND TABLESPACE_NAME IN (" "SELECT DISTINCT TABLESPACE_NAME FROM" " INFORMATION_SCHEMA.PARTITIONS" " WHERE" " TABLE_SCHEMA IN (", 256, 1024); for (i=0 ; databases[i]!=NULL ; i++) { char db_name_buff[NAME_LEN*2+3]; mysql_real_escape_string(mysql, db_name_buff, databases[i], strlen(databases[i])); dynstr_append_checked(&where, "'"); dynstr_append_checked(&where, db_name_buff); dynstr_append_checked(&where, "',"); } dynstr_trunc(&where, 1); dynstr_append_checked(&where,"))"); DBUG_PRINT("info",("Dump TS for DBs where: %s",where.str)); r= dump_tablespaces(where.str); dynstr_free(&where); return r; } static int dump_tablespaces(char* ts_where) { MYSQL_ROW row; MYSQL_RES *tableres; char buf[FN_REFLEN]; DYNAMIC_STRING sqlbuf; int first= 0; /* The following are used for parsing the EXTRA field */ char extra_format[]= "UNDO_BUFFER_SIZE="; char *ubs; char *endsemi; DBUG_ENTER("dump_tablespaces"); init_dynamic_string_checked(&sqlbuf, "SELECT LOGFILE_GROUP_NAME," " FILE_NAME," " TOTAL_EXTENTS," " INITIAL_SIZE," " ENGINE," " EXTRA" " FROM INFORMATION_SCHEMA.FILES" " WHERE FILE_TYPE = 'UNDO LOG'" " AND FILE_NAME IS NOT NULL", 256, 1024); if(ts_where) { dynstr_append_checked(&sqlbuf, " AND LOGFILE_GROUP_NAME IN (" "SELECT DISTINCT LOGFILE_GROUP_NAME" " FROM INFORMATION_SCHEMA.FILES" " WHERE FILE_TYPE = 'DATAFILE'" ); dynstr_append_checked(&sqlbuf, ts_where); dynstr_append_checked(&sqlbuf, ")"); } dynstr_append_checked(&sqlbuf, " GROUP BY LOGFILE_GROUP_NAME, FILE_NAME" ", ENGINE" " ORDER BY LOGFILE_GROUP_NAME"); if (mysql_query(mysql, sqlbuf.str) || !(tableres = mysql_store_result(mysql))) { dynstr_free(&sqlbuf); if (mysql_errno(mysql) == ER_BAD_TABLE_ERROR || mysql_errno(mysql) == ER_BAD_DB_ERROR || mysql_errno(mysql) == ER_UNKNOWN_TABLE) { fprintf(md_result_file, "\n--\n-- Not dumping tablespaces as no INFORMATION_SCHEMA.FILES" " table on this server\n--\n"); check_io(md_result_file); DBUG_RETURN(0); } my_printf_error(0, "Error: '%s' when trying to dump tablespaces", MYF(0), mysql_error(mysql)); DBUG_RETURN(1); } buf[0]= 0; while ((row= mysql_fetch_row(tableres))) { if (strcmp(buf, row[0]) != 0) first= 1; if (first) { print_comment(md_result_file, 0, "\n--\n-- Logfile group: %s\n--\n", row[0]); fprintf(md_result_file, "\nCREATE"); } else { fprintf(md_result_file, "\nALTER"); } fprintf(md_result_file, " LOGFILE GROUP %s\n" " ADD UNDOFILE '%s'\n", row[0], row[1]); if (first) { ubs= strstr(row[5],extra_format); if(!ubs) break; ubs+= strlen(extra_format); endsemi= strstr(ubs,";"); if(endsemi) endsemi[0]= '\0'; fprintf(md_result_file, " UNDO_BUFFER_SIZE %s\n", ubs); } fprintf(md_result_file, " INITIAL_SIZE %s\n" " ENGINE=%s;\n", row[3], row[4]); check_io(md_result_file); if (first) { first= 0; strxmov(buf, row[0], NullS); } } dynstr_free(&sqlbuf); mysql_free_result(tableres); init_dynamic_string_checked(&sqlbuf, "SELECT DISTINCT TABLESPACE_NAME," " FILE_NAME," " LOGFILE_GROUP_NAME," " EXTENT_SIZE," " INITIAL_SIZE," " ENGINE" " FROM INFORMATION_SCHEMA.FILES" " WHERE FILE_TYPE = 'DATAFILE'", 256, 1024); if(ts_where) dynstr_append_checked(&sqlbuf, ts_where); dynstr_append_checked(&sqlbuf, " ORDER BY TABLESPACE_NAME, LOGFILE_GROUP_NAME"); if (mysql_query_with_error_report(mysql, &tableres, sqlbuf.str)) { dynstr_free(&sqlbuf); DBUG_RETURN(1); } buf[0]= 0; while ((row= mysql_fetch_row(tableres))) { if (strcmp(buf, row[0]) != 0) first= 1; if (first) { print_comment(md_result_file, 0, "\n--\n-- Tablespace: %s\n--\n", row[0]); fprintf(md_result_file, "\nCREATE"); } else { fprintf(md_result_file, "\nALTER"); } fprintf(md_result_file, " TABLESPACE %s\n" " ADD DATAFILE '%s'\n", row[0], row[1]); if (first) { fprintf(md_result_file, " USE LOGFILE GROUP %s\n" " EXTENT_SIZE %s\n", row[2], row[3]); } fprintf(md_result_file, " INITIAL_SIZE %s\n" " ENGINE=%s;\n", row[4], row[5]); check_io(md_result_file); if (first) { first= 0; strxmov(buf, row[0], NullS); } } mysql_free_result(tableres); dynstr_free(&sqlbuf); DBUG_RETURN(0); } static int is_ndbinfo(MYSQL* mysql, const char* dbname) { static int checked_ndbinfo= 0; static int have_ndbinfo= 0; if (!checked_ndbinfo) { MYSQL_RES *res; MYSQL_ROW row; char buf[32], query[64]; my_snprintf(query, sizeof(query), "SHOW VARIABLES LIKE %s", quote_for_like("ndbinfo_version", buf)); checked_ndbinfo= 1; if (mysql_query_with_error_report(mysql, &res, query)) return 0; if (!(row= mysql_fetch_row(res))) { mysql_free_result(res); return 0; } have_ndbinfo= 1; mysql_free_result(res); } if (!have_ndbinfo) return 0; if (my_strcasecmp(&my_charset_latin1, dbname, "ndbinfo") == 0) return 1; return 0; } static int dump_all_databases() { MYSQL_ROW row; MYSQL_RES *tableres; int result=0; if (mysql_query_with_error_report(mysql, &tableres, "SHOW DATABASES")) return 1; while ((row= mysql_fetch_row(tableres))) { if (mysql_get_server_version(mysql) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], INFORMATION_SCHEMA_DB_NAME)) continue; if (mysql_get_server_version(mysql) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], PERFORMANCE_SCHEMA_DB_NAME)) continue; if (is_ndbinfo(mysql, row[0])) continue; if (dump_all_tables_in_db(row[0])) result=1; } if (seen_views) { if (mysql_query(mysql, "SHOW DATABASES") || !(tableres= mysql_store_result(mysql))) { my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s", MYF(0), mysql_error(mysql)); return 1; } while ((row= mysql_fetch_row(tableres))) { if (mysql_get_server_version(mysql) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], INFORMATION_SCHEMA_DB_NAME)) continue; if (mysql_get_server_version(mysql) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], PERFORMANCE_SCHEMA_DB_NAME)) continue; if (is_ndbinfo(mysql, row[0])) continue; if (dump_all_views_in_db(row[0])) result=1; } } return result; } /* dump_all_databases */ static int dump_databases(char **db_names) { int result=0; char **db; DBUG_ENTER("dump_databases"); for (db= db_names ; *db ; db++) { if (dump_all_tables_in_db(*db)) result=1; } if (!result && seen_views) { for (db= db_names ; *db ; db++) { if (dump_all_views_in_db(*db)) result=1; } } DBUG_RETURN(result); } /* dump_databases */ /* View Specific database initalization. SYNOPSIS init_dumping_views qdatabase quoted name of the database RETURN VALUES 0 Success. 1 Failure. */ int init_dumping_views(char *qdatabase __attribute__((unused))) { return 0; } /* init_dumping_views */ /* Table Specific database initalization. SYNOPSIS init_dumping_tables qdatabase quoted name of the database RETURN VALUES 0 Success. 1 Failure. */ int init_dumping_tables(char *qdatabase) { DBUG_ENTER("init_dumping_tables"); if (!opt_create_db) { char qbuf[256]; MYSQL_ROW row; MYSQL_RES *dbinfo; my_snprintf(qbuf, sizeof(qbuf), "SHOW CREATE DATABASE IF NOT EXISTS %s", qdatabase); if (mysql_query(mysql, qbuf) || !(dbinfo = mysql_store_result(mysql))) { /* Old server version, dump generic CREATE DATABASE */ if (opt_drop_database) fprintf(md_result_file, "\n/*!40000 DROP DATABASE IF EXISTS %s*/;\n", qdatabase); fprintf(md_result_file, "\nCREATE DATABASE /*!32312 IF NOT EXISTS*/ %s;\n", qdatabase); } else { if (opt_drop_database) fprintf(md_result_file, "\n/*!40000 DROP DATABASE IF EXISTS %s*/;\n", qdatabase); row = mysql_fetch_row(dbinfo); if (row[1]) { fprintf(md_result_file,"\n%s;\n",row[1]); } mysql_free_result(dbinfo); } } DBUG_RETURN(0); } /* init_dumping_tables */ static int init_dumping(char *database, int init_func(char*)) { if (is_ndbinfo(mysql, database)) { verbose_msg("-- Skipping dump of ndbinfo database\n"); return 0; } if (mysql_select_db(mysql, database)) { DB_error(mysql, "when selecting the database"); return 1; /* If --force */ } if (!path && !opt_xml) { if (opt_databases || opt_alldbs) { /* length of table name * 2 (if name contains quotes), 2 quotes and 0 */ char quoted_database_buf[NAME_LEN*2+3]; char *qdatabase= quote_name(database,quoted_database_buf,opt_quoted); print_comment(md_result_file, 0, "\n--\n-- Current Database: %s\n--\n", qdatabase); /* Call the view or table specific function */ init_func(qdatabase); fprintf(md_result_file,"\nUSE %s;\n", qdatabase); check_io(md_result_file); } } if (extended_insert) init_dynamic_string_checked(&extended_row, "", 1024, 1024); return 0; } /* init_dumping */ /* Return 1 if we should copy the table */ my_bool include_table(const uchar *hash_key, size_t len) { return ! my_hash_search(&ignore_table, hash_key, len); } static int dump_all_tables_in_db(char *database) { char *table; uint numrows; char table_buff[NAME_LEN*2+3]; char hash_key[2*NAME_LEN+2]; /* "db.tablename" */ char *afterdot; my_bool general_log_table_exists= 0, slow_log_table_exists=0; int using_mysql_db= !my_strcasecmp(charset_info, database, "mysql"); DBUG_ENTER("dump_all_tables_in_db"); afterdot= my_stpcpy(hash_key, database); *afterdot++= '.'; if (init_dumping(database, init_dumping_tables)) DBUG_RETURN(1); if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); if (lock_tables) { DYNAMIC_STRING query; init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024); for (numrows= 0 ; (table= getTableName(1)) ; ) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key,end - hash_key)) { numrows++; dynstr_append_checked(&query, quote_name(table, table_buff, 1)); dynstr_append_checked(&query, " READ /*!32311 LOCAL */,"); } } if (numrows && mysql_real_query(mysql, query.str, query.length-1)) DB_error(mysql, "when using LOCK TABLES"); /* We shall continue here, if --force was given */ dynstr_free(&query); } if (flush_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) DB_error(mysql, "when doing refresh"); /* We shall continue here, if --force was given */ else verbose_msg("-- dump_all_tables_in_db : logs flushed successfully!\n"); } while ((table= getTableName(0))) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key, end - hash_key)) { dump_table(table,database); my_free(order_by); order_by= 0; if (opt_dump_triggers && mysql_get_server_version(mysql) >= 50009) { if (dump_triggers_for_table(table, database)) { if (path) my_fclose(md_result_file, MYF(MY_WME)); maybe_exit(EX_MYSQLERR); } } } else { /* If general_log and slow_log exists in the 'mysql' database, we should dump the table structure. But we cannot call get_table_structure() here as 'LOCK TABLES' query got executed above on the session and that 'LOCK TABLES' query does not contain 'general_log' and 'slow_log' tables. (you cannot acquire lock on log tables). Hence mark the existence of these log tables here and after 'UNLOCK TABLES' query is executed on the session, get the table structure from server and dump it in the file. */ if (using_mysql_db) { if (!my_strcasecmp(charset_info, table, "general_log")) general_log_table_exists= 1; else if (!my_strcasecmp(charset_info, table, "slow_log")) slow_log_table_exists= 1; } } } if (opt_events && mysql_get_server_version(mysql) >= 50106) { DBUG_PRINT("info", ("Dumping events for database %s", database)); dump_events_for_db(database); } if (opt_routines && mysql_get_server_version(mysql) >= 50009) { DBUG_PRINT("info", ("Dumping routines for database %s", database)); dump_routines_for_db(database); } if (opt_xml) { fputs("</database>\n", md_result_file); check_io(md_result_file); } if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); if (using_mysql_db) { char table_type[NAME_LEN]; char ignore_flag; if (general_log_table_exists) { if (!get_table_structure((char *) "general_log", database, table_type, &ignore_flag) ) verbose_msg("-- Warning: get_table_structure() failed with some internal " "error for 'general_log' table\n"); } if (slow_log_table_exists) { if (!get_table_structure((char *) "slow_log", database, table_type, &ignore_flag) ) verbose_msg("-- Warning: get_table_structure() failed with some internal " "error for 'slow_log' table\n"); } } if (flush_privileges && using_mysql_db) { fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n"); fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n"); } DBUG_RETURN(0); } /* dump_all_tables_in_db */ /* dump structure of views of database SYNOPSIS dump_all_views_in_db() database database name RETURN 0 OK 1 ERROR */ static my_bool dump_all_views_in_db(char *database) { char *table; uint numrows; char table_buff[NAME_LEN*2+3]; char hash_key[2*NAME_LEN+2]; /* "db.tablename" */ char *afterdot; afterdot= my_stpcpy(hash_key, database); *afterdot++= '.'; if (init_dumping(database, init_dumping_views)) return 1; if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); if (lock_tables) { DYNAMIC_STRING query; init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024); for (numrows= 0 ; (table= getTableName(1)); ) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key,end - hash_key)) { numrows++; dynstr_append_checked(&query, quote_name(table, table_buff, 1)); dynstr_append_checked(&query, " READ /*!32311 LOCAL */,"); } } if (numrows && mysql_real_query(mysql, query.str, query.length-1)) DB_error(mysql, "when using LOCK TABLES"); /* We shall continue here, if --force was given */ dynstr_free(&query); } if (flush_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) DB_error(mysql, "when doing refresh"); /* We shall continue here, if --force was given */ else verbose_msg("-- dump_all_views_in_db : logs flushed successfully!\n"); } while ((table= getTableName(0))) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key, end - hash_key)) get_view_structure(table, database); } if (opt_xml) { fputs("</database>\n", md_result_file); check_io(md_result_file); } if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); return 0; } /* dump_all_tables_in_db */ /* get_actual_table_name -- executes a SHOW TABLES LIKE '%s' to get the actual table name from the server for the table name given on the command line. we do this because the table name given on the command line may be a different case (e.g. T1 vs t1) RETURN pointer to the table name 0 if error */ static char *get_actual_table_name(const char *old_table_name, MEM_ROOT *root) { char *name= 0; MYSQL_RES *table_res; MYSQL_ROW row; char query[50 + 2*NAME_LEN]; char show_name_buff[FN_REFLEN]; DBUG_ENTER("get_actual_table_name"); /* Check memory for quote_for_like() */ DBUG_ASSERT(2*sizeof(old_table_name) < sizeof(show_name_buff)); my_snprintf(query, sizeof(query), "SHOW TABLES LIKE %s", quote_for_like(old_table_name, show_name_buff)); if (mysql_query_with_error_report(mysql, 0, query)) DBUG_RETURN(NullS); if ((table_res= mysql_store_result(mysql))) { my_ulonglong num_rows= mysql_num_rows(table_res); if (num_rows > 0) { ulong *lengths; /* Return first row TODO: Return all matching rows */ row= mysql_fetch_row(table_res); lengths= mysql_fetch_lengths(table_res); name= strmake_root(root, row[0], lengths[0]); } mysql_free_result(table_res); } DBUG_PRINT("exit", ("new_table_name: %s", name)); DBUG_RETURN(name); } static int dump_selected_tables(char *db, char **table_names, int tables) { char table_buff[NAME_LEN*2+3]; DYNAMIC_STRING lock_tables_query; MEM_ROOT root; char **dump_tables, **pos, **end; DBUG_ENTER("dump_selected_tables"); if (init_dumping(db, init_dumping_tables)) DBUG_RETURN(1); init_alloc_root(PSI_NOT_INSTRUMENTED, &root, 8192, 0); if (!(dump_tables= pos= (char**) alloc_root(&root, tables * sizeof(char *)))) die(EX_EOM, "alloc_root failure."); init_dynamic_string_checked(&lock_tables_query, "LOCK TABLES ", 256, 1024); for (; tables > 0 ; tables-- , table_names++) { /* the table name passed on commandline may be wrong case */ if ((*pos= get_actual_table_name(*table_names, &root))) { /* Add found table name to lock_tables_query */ if (lock_tables) { dynstr_append_checked(&lock_tables_query, quote_name(*pos, table_buff, 1)); dynstr_append_checked(&lock_tables_query, " READ /*!32311 LOCAL */,"); } pos++; } else { if (!opt_force) { dynstr_free(&lock_tables_query); free_root(&root, MYF(0)); } maybe_die(EX_ILLEGAL_TABLE, "Couldn't find table: \"%s\"", *table_names); /* We shall countinue here, if --force was given */ } } end= pos; /* Can't LOCK TABLES in I_S / P_S, so don't try. */ if (lock_tables && !(mysql_get_server_version(mysql) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, db, INFORMATION_SCHEMA_DB_NAME)) && !(mysql_get_server_version(mysql) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, db, PERFORMANCE_SCHEMA_DB_NAME))) { if (mysql_real_query(mysql, lock_tables_query.str, lock_tables_query.length-1)) { if (!opt_force) { dynstr_free(&lock_tables_query); free_root(&root, MYF(0)); } DB_error(mysql, "when doing LOCK TABLES"); /* We shall countinue here, if --force was given */ } } dynstr_free(&lock_tables_query); if (flush_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) { if (!opt_force) free_root(&root, MYF(0)); DB_error(mysql, "when doing refresh"); } /* We shall countinue here, if --force was given */ else verbose_msg("-- dump_selected_tables : logs flushed successfully!\n"); } if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", db, NullS); /* Dump each selected table */ for (pos= dump_tables; pos < end; pos++) { DBUG_PRINT("info",("Dumping table %s", *pos)); dump_table(*pos, db); if (opt_dump_triggers && mysql_get_server_version(mysql) >= 50009) { if (dump_triggers_for_table(*pos, db)) { if (path) my_fclose(md_result_file, MYF(MY_WME)); maybe_exit(EX_MYSQLERR); } } } /* Dump each selected view */ if (seen_views) { for (pos= dump_tables; pos < end; pos++) get_view_structure(*pos, db); } if (opt_events && mysql_get_server_version(mysql) >= 50106) { DBUG_PRINT("info", ("Dumping events for database %s", db)); dump_events_for_db(db); } /* obtain dump of routines (procs/functions) */ if (opt_routines && mysql_get_server_version(mysql) >= 50009) { DBUG_PRINT("info", ("Dumping routines for database %s", db)); dump_routines_for_db(db); } free_root(&root, MYF(0)); my_free(order_by); order_by= 0; if (opt_xml) { fputs("</database>\n", md_result_file); check_io(md_result_file); } if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); DBUG_RETURN(0); } /* dump_selected_tables */ static int do_show_master_status(MYSQL *mysql_con) { MYSQL_ROW row; MYSQL_RES *master; const char *comment_prefix= (opt_master_data == MYSQL_OPT_MASTER_DATA_COMMENTED_SQL) ? "-- " : ""; if (mysql_query_with_error_report(mysql_con, &master, "SHOW MASTER STATUS")) { return 1; } else { row= mysql_fetch_row(master); if (row && row[0] && row[1]) { /* SHOW MASTER STATUS reports file and position */ print_comment(md_result_file, 0, "\n--\n-- Position to start replication or point-in-time " "recovery from\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", comment_prefix, row[0], row[1]); check_io(md_result_file); } else if (!opt_force) { /* SHOW MASTER STATUS reports nothing and --force is not enabled */ my_printf_error(0, "Error: Binlogging on server not active", MYF(0)); mysql_free_result(master); maybe_exit(EX_MYSQLERR); return 1; } mysql_free_result(master); } return 0; } static int do_stop_slave_sql(MYSQL *mysql_con) { MYSQL_RES *slave; /* We need to check if the slave sql is running in the first place */ if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS")) return(1); else { MYSQL_ROW row= mysql_fetch_row(slave); if (row && row[11]) { /* if SLAVE SQL is not running, we don't stop it */ if (!strcmp(row[11],"No")) { mysql_free_result(slave); /* Silently assume that they don't have the slave running */ return(0); } } } mysql_free_result(slave); /* now, stop slave if running */ if (mysql_query_with_error_report(mysql_con, 0, "STOP SLAVE SQL_THREAD")) return(1); return(0); } static int add_stop_slave(void) { if (opt_comments) fprintf(md_result_file, "\n--\n-- stop slave statement to make a recovery dump)\n--\n\n"); fprintf(md_result_file, "STOP SLAVE;\n"); return(0); } static int add_slave_statements(void) { if (opt_comments) fprintf(md_result_file, "\n--\n-- start slave statement to make a recovery dump)\n--\n\n"); fprintf(md_result_file, "START SLAVE;\n"); return(0); } static int do_show_slave_status(MYSQL *mysql_con) { MYSQL_RES *slave= NULL; const char *comment_prefix= (opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : ""; if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS")) { if (!opt_force) { /* SHOW SLAVE STATUS reports nothing and --force is not enabled */ my_printf_error(0, "Error: Slave not set up", MYF(0)); } mysql_free_result(slave); return 1; } else { MYSQL_ROW row= mysql_fetch_row(slave); if (row && row[9] && row[21]) { /* SHOW MASTER STATUS reports file and position */ if (opt_comments) fprintf(md_result_file, "\n--\n-- Position to start replication or point-in-time " "recovery from (the master of this slave)\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO ", comment_prefix); if (opt_include_master_host_port) { if (row[1]) fprintf(md_result_file, "MASTER_HOST='%s', ", row[1]); if (row[3]) fprintf(md_result_file, "MASTER_PORT=%s, ", row[3]); } fprintf(md_result_file, "MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", row[9], row[21]); check_io(md_result_file); } mysql_free_result(slave); } return 0; } static int do_start_slave_sql(MYSQL *mysql_con) { MYSQL_RES *slave; /* We need to check if the slave sql is stopped in the first place */ if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS")) return(1); else { MYSQL_ROW row= mysql_fetch_row(slave); if (row && row[11]) { /* if SLAVE SQL is not running, we don't start it */ if (!strcmp(row[11],"Yes")) { mysql_free_result(slave); /* Silently assume that they don't have the slave running */ return(0); } } } mysql_free_result(slave); /* now, start slave if stopped */ if (mysql_query_with_error_report(mysql_con, 0, "START SLAVE")) { my_printf_error(0, "Error: Unable to start slave", MYF(0)); return 1; } return(0); } static int do_flush_tables_read_lock(MYSQL *mysql_con) { /* We do first a FLUSH TABLES. If a long update is running, the FLUSH TABLES will wait but will not stall the whole mysqld, and when the long update is done the FLUSH TABLES WITH READ LOCK will start and succeed quickly. So, FLUSH TABLES is to lower the probability of a stage where both mysqldump and most client connections are stalled. Of course, if a second long update starts between the two FLUSHes, we have that bad stall. */ return ( mysql_query_with_error_report(mysql_con, 0, ((opt_master_data != 0) ? "FLUSH /*!40101 LOCAL */ TABLES" : "FLUSH TABLES")) || mysql_query_with_error_report(mysql_con, 0, "FLUSH TABLES WITH READ LOCK") ); } static int do_unlock_tables(MYSQL *mysql_con) { return mysql_query_with_error_report(mysql_con, 0, "UNLOCK TABLES"); } static int get_bin_log_name(MYSQL *mysql_con, char* buff_log_name, uint buff_len) { MYSQL_RES *res; MYSQL_ROW row; if (mysql_query(mysql_con, "SHOW MASTER STATUS") || !(res= mysql_store_result(mysql))) return 1; if (!(row= mysql_fetch_row(res))) { mysql_free_result(res); return 1; } /* Only one row is returned, and the first column is the name of the active log. */ strmake(buff_log_name, row[0], buff_len - 1); mysql_free_result(res); return 0; } static int purge_bin_logs_to(MYSQL *mysql_con, char* log_name) { DYNAMIC_STRING str; int err; init_dynamic_string_checked(&str, "PURGE BINARY LOGS TO '", 1024, 1024); dynstr_append_checked(&str, log_name); dynstr_append_checked(&str, "'"); err = mysql_query_with_error_report(mysql_con, 0, str.str); dynstr_free(&str); return err; } static int start_transaction(MYSQL *mysql_con) { verbose_msg("-- Starting transaction...\n"); /* We use BEGIN for old servers. --single-transaction --master-data will fail on old servers, but that's ok as it was already silently broken (it didn't do a consistent read, so better tell people frankly, with the error). We want the first consistent read to be used for all tables to dump so we need the REPEATABLE READ level (not anything lower, for example READ COMMITTED would give one new consistent read per dumped table). */ if ((mysql_get_server_version(mysql_con) < 40100) && opt_master_data) { fprintf(stderr, "-- %s: the combination of --single-transaction and " "--master-data requires a MySQL server version of at least 4.1 " "(current server's version is %s). %s\n", opt_force ? "Warning" : "Error", mysql_con->server_version ? mysql_con->server_version : "unknown", opt_force ? "Continuing due to --force, backup may not be " "consistent across all tables!" : "Aborting."); if (!opt_force) exit(EX_MYSQLERR); } return (mysql_query_with_error_report(mysql_con, 0, "SET SESSION TRANSACTION ISOLATION " "LEVEL REPEATABLE READ") || mysql_query_with_error_report(mysql_con, 0, "START TRANSACTION " "/*!40100 WITH CONSISTENT SNAPSHOT */")); } static ulong find_set(TYPELIB *lib, const char *x, uint length, char **err_pos, uint *err_len) { const char *end= x + length; ulong found= 0; uint find; char buff[255]; *err_pos= 0; /* No error yet */ while (end > x && my_isspace(charset_info, end[-1])) end--; *err_len= 0; if (x != end) { const char *start= x; for (;;) { const char *pos= start; uint var_len; for (; pos != end && *pos != ','; pos++) ; var_len= (uint) (pos - start); strmake(buff, start, MY_MIN(sizeof(buff) - 1, var_len)); find= find_type(buff, lib, FIND_TYPE_BASIC); if (!find) { *err_pos= (char*) start; *err_len= var_len; } else found|= ((longlong) 1 << (find - 1)); if (pos == end) break; start= pos + 1; } } return found; } /* Print a value with a prefix on file */ static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, const char *prefix, const char *name, int string_value) { MYSQL_FIELD *field; mysql_field_seek(result, 0); for ( ; (field= mysql_fetch_field(result)) ; row++) { if (!strcmp(field->name,name)) { if (row[0] && row[0][0] && strcmp(row[0],"0")) /* Skip default */ { fputc(' ',file); fputs(prefix, file); if (string_value) unescape(file,row[0],(uint) strlen(row[0])); else fputs(row[0], file); check_io(file); return; } } } return; /* This shouldn't happen */ } /* print_value */ /* SYNOPSIS Check if the table is one of the table types that should be ignored: MRG_ISAM, MRG_MYISAM. If the table should be altogether ignored, it returns a TRUE, FALSE if it should not be ignored. ARGS check_if_ignore_table() table_name Table name to check table_type Type of table GLOBAL VARIABLES mysql MySQL connection verbose Write warning messages RETURN char (bit value) See IGNORE_ values at top */ char check_if_ignore_table(const char *table_name, char *table_type) { char result= IGNORE_NONE; char buff[FN_REFLEN+80], show_name_buff[FN_REFLEN]; MYSQL_RES *res= NULL; MYSQL_ROW row; DBUG_ENTER("check_if_ignore_table"); /* Check memory for quote_for_like() */ DBUG_ASSERT(2*sizeof(table_name) < sizeof(show_name_buff)); my_snprintf(buff, sizeof(buff), "show table status like %s", quote_for_like(table_name, show_name_buff)); if (mysql_query_with_error_report(mysql, &res, buff)) { if (mysql_errno(mysql) != ER_PARSE_ERROR) { /* If old MySQL version */ verbose_msg("-- Warning: Couldn't get status information for " "table %s (%s)\n", table_name, mysql_error(mysql)); DBUG_RETURN(result); /* assume table is ok */ } } if (!(row= mysql_fetch_row(res))) { fprintf(stderr, "Error: Couldn't read status information for table %s (%s)\n", table_name, mysql_error(mysql)); mysql_free_result(res); DBUG_RETURN(result); /* assume table is ok */ } if (!(row[1])) strmake(table_type, "VIEW", NAME_LEN-1); else { strmake(table_type, row[1], NAME_LEN-1); /* If these two types, we want to skip dumping the table. */ if (!opt_no_data && (!my_strcasecmp(&my_charset_latin1, table_type, "MRG_MyISAM") || !strcmp(table_type,"MRG_ISAM") || !strcmp(table_type,"FEDERATED"))) result= IGNORE_DATA; } mysql_free_result(res); DBUG_RETURN(result); } /* Get string of comma-separated primary key field names SYNOPSIS char *primary_key_fields(const char *table_name) RETURNS pointer to allocated buffer (must be freed by caller) table_name quoted table name DESCRIPTION Use SHOW KEYS FROM table_name, allocate a buffer to hold the field names, and then build that string and return the pointer to that buffer. Returns NULL if there is no PRIMARY or UNIQUE key on the table, or if there is some failure. It is better to continue to dump the table unsorted, rather than exit without dumping the data. */ static char *primary_key_fields(const char *table_name) { MYSQL_RES *res= NULL; MYSQL_ROW row; /* SHOW KEYS FROM + table name * 2 (escaped) + 2 quotes + \0 */ char show_keys_buff[15 + NAME_LEN * 2 + 3]; uint result_length= 0; char *result= 0; char buff[NAME_LEN * 2 + 3]; char *quoted_field; my_snprintf(show_keys_buff, sizeof(show_keys_buff), "SHOW KEYS FROM %s", table_name); if (mysql_query(mysql, show_keys_buff) || !(res= mysql_store_result(mysql))) { fprintf(stderr, "Warning: Couldn't read keys from table %s;" " records are NOT sorted (%s)\n", table_name, mysql_error(mysql)); /* Don't exit, because it's better to print out unsorted records */ goto cleanup; } /* * Figure out the length of the ORDER BY clause result. * Note that SHOW KEYS is ordered: a PRIMARY key is always the first * row, and UNIQUE keys come before others. So we only need to check * the first key, not all keys. */ if ((row= mysql_fetch_row(res)) && atoi(row[1]) == 0) { /* Key is unique */ do { quoted_field= quote_name(row[4], buff, 0); result_length+= strlen(quoted_field) + 1; /* + 1 for ',' or \0 */ } while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1); } /* Build the ORDER BY clause result */ if (result_length) { char *end; /* result (terminating \0 is already in result_length) */ result= my_malloc(PSI_NOT_INSTRUMENTED, result_length + 10, MYF(MY_WME)); if (!result) { fprintf(stderr, "Error: Not enough memory to store ORDER BY clause\n"); goto cleanup; } mysql_data_seek(res, 0); row= mysql_fetch_row(res); quoted_field= quote_name(row[4], buff, 0); end= my_stpcpy(result, quoted_field); while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1) { quoted_field= quote_name(row[4], buff, 0); end= strxmov(end, ",", quoted_field, NullS); } } cleanup: if (res) mysql_free_result(res); return result; } /* Replace a substring SYNOPSIS replace ds_str The string to search and perform the replace in search_str The string to search for search_len Length of the string to search for replace_str The string to replace with replace_len Length of the string to replace with RETURN 0 String replaced 1 Could not find search_str in str */ static int replace(DYNAMIC_STRING *ds_str, const char *search_str, ulong search_len, const char *replace_str, ulong replace_len) { DYNAMIC_STRING ds_tmp; const char *start= strstr(ds_str->str, search_str); if (!start) return 1; init_dynamic_string_checked(&ds_tmp, "", ds_str->length + replace_len, 256); dynstr_append_mem_checked(&ds_tmp, ds_str->str, start - ds_str->str); dynstr_append_mem_checked(&ds_tmp, replace_str, replace_len); dynstr_append_checked(&ds_tmp, start + search_len); dynstr_set_checked(ds_str, ds_tmp.str); dynstr_free(&ds_tmp); return 0; } /** This function sets the session binlog in the dump file. When --set-gtid-purged is used, this function is called to disable the session binlog and at the end of the dump, to restore the session binlog. @note: md_result_file should have been opened, before this function is called. @param[in] flag If FALSE, disable binlog. If TRUE and binlog disabled previously, restore the session binlog. */ static void set_session_binlog(my_bool flag) { static my_bool is_binlog_disabled= FALSE; if (!flag && !is_binlog_disabled) { fprintf(md_result_file, "SET @MYSQLDUMP_TEMP_LOG_BIN = @@SESSION.SQL_LOG_BIN;\n"); fprintf(md_result_file, "SET @@SESSION.SQL_LOG_BIN= 0;\n"); is_binlog_disabled= 1; } else if (flag && is_binlog_disabled) { fprintf(md_result_file, "SET @@SESSION.SQL_LOG_BIN = @MYSQLDUMP_TEMP_LOG_BIN;\n"); is_binlog_disabled= 0; } } /** This function gets the GTID_EXECUTED sets from the server and assigns those sets to GTID_PURGED in the dump file. @param[in] mysql_con connection to the server @retval FALSE succesfully printed GTID_PURGED sets in the dump file. @retval TRUE failed. */ static my_bool add_set_gtid_purged(MYSQL *mysql_con) { MYSQL_RES *gtid_purged_res; MYSQL_ROW gtid_set; ulong num_sets, idx; /* query to get the GTID_EXECUTED */ if (mysql_query_with_error_report(mysql_con, &gtid_purged_res, "SELECT @@GLOBAL.GTID_EXECUTED")) return TRUE; /* Proceed only if gtid_purged_res is non empty */ if ((num_sets= mysql_num_rows(gtid_purged_res)) > 0) { if (opt_comments) fprintf(md_result_file, "\n--\n-- GTID state at the beginning of the backup \n--\n\n"); fprintf(md_result_file,"SET @@GLOBAL.GTID_PURGED='"); /* formatting is not required, even for multiple gtid sets */ for (idx= 0; idx< num_sets-1; idx++) { gtid_set= mysql_fetch_row(gtid_purged_res); fprintf(md_result_file,"%s,", (char*)gtid_set[0]); } /* for the last set */ gtid_set= mysql_fetch_row(gtid_purged_res); /* close the SET expression */ fprintf(md_result_file,"%s';\n", (char*)gtid_set[0]); } return FALSE; /*success */ } /** This function processes the opt_set_gtid_purged option. This function also calls set_session_binlog() function before setting the SET @@GLOBAL.GTID_PURGED in the output. @param[in] mysql_con the connection to the server @retval FALSE successful according to the value of opt_set_gtid_purged. @retval TRUE fail. */ static my_bool process_set_gtid_purged(MYSQL* mysql_con) { MYSQL_RES *gtid_mode_res; MYSQL_ROW gtid_mode_row; char *gtid_mode_val= 0; char buf[32], query[64]; if (opt_set_gtid_purged_mode == SET_GTID_PURGED_OFF) return FALSE; /* nothing to be done */ /* Check if the server has the knowledge of GTIDs(pre mysql-5.6) or if the gtid_mode is ON or OFF. */ my_snprintf(query, sizeof(query), "SHOW VARIABLES LIKE %s", quote_for_like("gtid_mode", buf)); if (mysql_query_with_error_report(mysql_con, &gtid_mode_res, query)) return TRUE; gtid_mode_row = mysql_fetch_row(gtid_mode_res); /* gtid_mode_row is NULL for pre 5.6 versions. For versions >= 5.6, get the gtid_mode value from the second column. */ gtid_mode_val = gtid_mode_row ? (char*)gtid_mode_row[1] : NULL; if (gtid_mode_val && strcmp(gtid_mode_val, "OFF")) { /* For any gtid_mode !=OFF and irrespective of --set-gtid-purged being AUTO or ON, add GTID_PURGED in the output. */ if (opt_databases || !opt_alldbs || !opt_dump_triggers || !opt_routines || !opt_events) { fprintf(stderr,"Warning: A partial dump from a server that has GTIDs will " "by default include the GTIDs of all transactions, even " "those that changed suppressed parts of the database. If " "you don't want to restore GTIDs, pass " "--set-gtid-purged=OFF. To make a complete dump, pass " "--all-databases --triggers --routines --events. \n"); } set_session_binlog(FALSE); if (add_set_gtid_purged(mysql_con)) return TRUE; } else /* gtid_mode is off */ { if (opt_set_gtid_purged_mode == SET_GTID_PURGED_ON) { fprintf(stderr, "Error: Server has GTIDs disabled.\n"); return TRUE; } } return FALSE; } /* Getting VIEW structure SYNOPSIS get_view_structure() table view name db db name RETURN 0 OK 1 ERROR */ static my_bool get_view_structure(char *table, char* db) { MYSQL_RES *table_res; MYSQL_ROW row; MYSQL_FIELD *field; char *result_table, *opt_quoted_table; char table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3]; char query[QUERY_LENGTH]; FILE *sql_file= md_result_file; DBUG_ENTER("get_view_structure"); if (opt_no_create_info) /* Don't write table creation info */ DBUG_RETURN(0); verbose_msg("-- Retrieving view structure for table %s...\n", table); result_table= quote_name(table, table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); if (switch_character_set_results(mysql, "binary")) DBUG_RETURN(1); my_snprintf(query, sizeof(query), "SHOW CREATE TABLE %s", result_table); if (mysql_query_with_error_report(mysql, &table_res, query)) { switch_character_set_results(mysql, default_charset); DBUG_RETURN(0); } /* Check if this is a view */ field= mysql_fetch_field_direct(table_res, 0); if (strcmp(field->name, "View") != 0) { switch_character_set_results(mysql, default_charset); verbose_msg("-- It's base table, skipped\n"); DBUG_RETURN(0); } /* If requested, open separate .sql file for this view */ if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) DBUG_RETURN(1); write_header(sql_file, db); } print_comment(sql_file, 0, "\n--\n-- Final view structure for view %s\n--\n\n", result_table); /* Table might not exist if this view was dumped with --tab. */ fprintf(sql_file, "/*!50001 DROP TABLE IF EXISTS %s*/;\n", opt_quoted_table); if (opt_drop) { fprintf(sql_file, "/*!50001 DROP VIEW IF EXISTS %s*/;\n", opt_quoted_table); check_io(sql_file); } my_snprintf(query, sizeof(query), "SELECT CHECK_OPTION, DEFINER, SECURITY_TYPE, " " CHARACTER_SET_CLIENT, COLLATION_CONNECTION " "FROM information_schema.views " "WHERE table_name=\"%s\" AND table_schema=\"%s\"", table, db); if (mysql_query(mysql, query)) { /* Use the raw output from SHOW CREATE TABLE if information_schema query fails. */ row= mysql_fetch_row(table_res); fprintf(sql_file, "/*!50001 %s */;\n", row[1]); check_io(sql_file); mysql_free_result(table_res); } else { char *ptr; ulong *lengths; char search_buf[256], replace_buf[256]; ulong search_len, replace_len; DYNAMIC_STRING ds_view; /* Save the result of SHOW CREATE TABLE in ds_view */ row= mysql_fetch_row(table_res); lengths= mysql_fetch_lengths(table_res); init_dynamic_string_checked(&ds_view, row[1], lengths[1] + 1, 1024); mysql_free_result(table_res); /* Get the result from "select ... information_schema" */ if (!(table_res= mysql_store_result(mysql)) || !(row= mysql_fetch_row(table_res))) { if (table_res) mysql_free_result(table_res); dynstr_free(&ds_view); DB_error(mysql, "when trying to save the result of SHOW CREATE TABLE in ds_view."); DBUG_RETURN(1); } lengths= mysql_fetch_lengths(table_res); /* "WITH %s CHECK OPTION" is available from 5.0.2 Surround it with !50002 comments */ if (strcmp(row[0], "NONE")) { ptr= search_buf; search_len= (ulong)(strxmov(ptr, "WITH ", row[0], " CHECK OPTION", NullS) - ptr); ptr= replace_buf; replace_len=(ulong)(strxmov(ptr, "*/\n/*!50002 WITH ", row[0], " CHECK OPTION", NullS) - ptr); replace(&ds_view, search_buf, search_len, replace_buf, replace_len); } /* "DEFINER=%s SQL SECURITY %s" is available from 5.0.13 Surround it with !50013 comments */ { size_t user_name_len; char user_name_str[USERNAME_LENGTH + 1]; char quoted_user_name_str[USERNAME_LENGTH * 2 + 3]; size_t host_name_len; char host_name_str[HOSTNAME_LENGTH + 1]; char quoted_host_name_str[HOSTNAME_LENGTH * 2 + 3]; parse_user(row[1], lengths[1], user_name_str, &user_name_len, host_name_str, &host_name_len); ptr= search_buf; search_len= (ulong)(strxmov(ptr, "DEFINER=", quote_name(user_name_str, quoted_user_name_str, FALSE), "@", quote_name(host_name_str, quoted_host_name_str, FALSE), " SQL SECURITY ", row[2], NullS) - ptr); ptr= replace_buf; replace_len= (ulong)(strxmov(ptr, "*/\n/*!50013 DEFINER=", quote_name(user_name_str, quoted_user_name_str, FALSE), "@", quote_name(host_name_str, quoted_host_name_str, FALSE), " SQL SECURITY ", row[2], " */\n/*!50001", NullS) - ptr); replace(&ds_view, search_buf, search_len, replace_buf, replace_len); } /* Dump view structure to file */ fprintf(sql_file, "/*!50001 SET @saved_cs_client = @@character_set_client */;\n" "/*!50001 SET @saved_cs_results = @@character_set_results */;\n" "/*!50001 SET @saved_col_connection = @@collation_connection */;\n" "/*!50001 SET character_set_client = %s */;\n" "/*!50001 SET character_set_results = %s */;\n" "/*!50001 SET collation_connection = %s */;\n" "/*!50001 %s */;\n" "/*!50001 SET character_set_client = @saved_cs_client */;\n" "/*!50001 SET character_set_results = @saved_cs_results */;\n" "/*!50001 SET collation_connection = @saved_col_connection */;\n", (const char *) row[3], (const char *) row[3], (const char *) row[4], (const char *) ds_view.str); check_io(sql_file); mysql_free_result(table_res); dynstr_free(&ds_view); } if (switch_character_set_results(mysql, default_charset)) DBUG_RETURN(1); /* If a separate .sql file was opened, close it now */ if (sql_file != md_result_file) { fputs("\n", sql_file); write_footer(sql_file); my_fclose(sql_file, MYF(MY_WME)); } DBUG_RETURN(0); } /* The following functions are wrappers for the dynamic string functions and if they fail, the wrappers will terminate the current process. */ #define DYNAMIC_STR_ERROR_MSG "Couldn't perform DYNAMIC_STRING operation" static void init_dynamic_string_checked(DYNAMIC_STRING *str, const char *init_str, uint init_alloc, uint alloc_increment) { if (init_dynamic_string(str, init_str, init_alloc, alloc_increment)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_append_checked(DYNAMIC_STRING* dest, const char* src) { if (dynstr_append(dest, src)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_set_checked(DYNAMIC_STRING *str, const char *init_str) { if (dynstr_set(str, init_str)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_append_mem_checked(DYNAMIC_STRING *str, const char *append, uint length) { if (dynstr_append_mem(str, append, length)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_realloc_checked(DYNAMIC_STRING *str, ulong additional_size) { if (dynstr_realloc(str, additional_size)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } int main(int argc, char **argv) { char bin_log_name[FN_REFLEN]; int exit_code; MY_INIT("mysqldump"); compatible_mode_normal_str[0]= 0; default_charset= (char *)mysql_universal_client_charset; memset(&ignore_table, 0, sizeof(ignore_table)); exit_code= get_options(&argc, &argv); if (exit_code) { free_resources(); exit(exit_code); } /* Disable comments in xml mode if 'comments' option is not explicitly used. */ if (opt_xml && !opt_comments_used) opt_comments= 0; if (log_error_file) { if(!(stderror_file= freopen(log_error_file, "a+", stderr))) { free_resources(); exit(EX_MYSQLERR); } } if (connect_to_db(current_host, current_user, opt_password)) { free_resources(); exit(EX_MYSQLERR); } if (!path) write_header(md_result_file, *argv); if (opt_slave_data && do_stop_slave_sql(mysql)) goto err; if ((opt_lock_all_tables || opt_master_data || (opt_single_transaction && flush_logs)) && do_flush_tables_read_lock(mysql)) goto err; /* Flush logs before starting transaction since this causes implicit commit starting mysql-5.5. */ if (opt_lock_all_tables || opt_master_data || (opt_single_transaction && flush_logs) || opt_delete_master_logs) { if (flush_logs || opt_delete_master_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) goto err; verbose_msg("-- main : logs flushed successfully!\n"); } /* Not anymore! That would not be sensible. */ flush_logs= 0; } if (opt_delete_master_logs) { if (get_bin_log_name(mysql, bin_log_name, sizeof(bin_log_name))) goto err; } if (opt_single_transaction && start_transaction(mysql)) goto err; /* Add 'STOP SLAVE to beginning of dump */ if (opt_slave_apply && add_stop_slave()) goto err; /* Process opt_set_gtid_purged and add SET @@GLOBAL.GTID_PURGED if required. */ if (process_set_gtid_purged(mysql)) goto err; if (opt_master_data && do_show_master_status(mysql)) goto err; if (opt_slave_data && do_show_slave_status(mysql)) goto err; if (opt_single_transaction && do_unlock_tables(mysql)) /* unlock but no commit! */ goto err; if (opt_alltspcs) dump_all_tablespaces(); if (opt_alldbs) { if (!opt_alltspcs && !opt_notspcs) dump_all_tablespaces(); dump_all_databases(); } else if (argc > 1 && !opt_databases) { /* Only one database and selected table(s) */ if (!opt_alltspcs && !opt_notspcs) dump_tablespaces_for_tables(*argv, (argv + 1), (argc -1)); dump_selected_tables(*argv, (argv + 1), (argc - 1)); } else { /* One or more databases, all tables */ if (!opt_alltspcs && !opt_notspcs) dump_tablespaces_for_databases(argv); dump_databases(argv); } /* if --dump-slave , start the slave sql thread */ if (opt_slave_data && do_start_slave_sql(mysql)) goto err; /* if --set-gtid-purged, restore binlog at the end of the session if required. */ set_session_binlog(TRUE); /* add 'START SLAVE' to end of dump */ if (opt_slave_apply && add_slave_statements()) goto err; /* ensure dumped data flushed */ if (md_result_file && fflush(md_result_file)) { if (!first_error) first_error= EX_MYSQLERR; goto err; } /* everything successful, purge the old logs files */ if (opt_delete_master_logs && purge_bin_logs_to(mysql, bin_log_name)) goto err; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif /* No reason to explicitely COMMIT the transaction, neither to explicitely UNLOCK TABLES: these will be automatically be done by the server when we disconnect now. Saves some code here, some network trips, adds nothing to server. */ err: dbDisconnect(current_host); if (!path) write_footer(md_result_file); free_resources(); if (stderror_file) fclose(stderror_file); return(first_error); } /* main */
./CrossVul/dataset_final_sorted/CWE-284/c/good_1571_6
crossvul-cpp_data_bad_4770_1
/*****************************************************************************\ * src/slurmd/slurmd/req.c - slurmd request handling ***************************************************************************** * Copyright (C) 2002-2007 The Regents of the University of California. * Copyright (C) 2008-2010 Lawrence Livermore National Security. * Portions Copyright (C) 2010-2016 SchedMD LLC. * Portions copyright (C) 2015 Mellanox Technologies Inc. * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). * Written by Mark Grondona <mgrondona@llnl.gov>. * CODE-OCEC-09-009. All rights reserved. * * This file is part of SLURM, a resource management program. * For details, see <http://slurm.schedmd.com/>. * Please also read the included file: DISCLAIMER. * * SLURM is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * In addition, as a special exception, the copyright holders give permission * to link the code of portions of this program with the OpenSSL library under * certain conditions as described in each individual source file, and * distribute linked combinations including the two. You must obey the GNU * General Public License in all respects for all of the code used other than * OpenSSL. If you modify file(s) with this exception, you may extend this * exception to your version of the file(s), but you are not obligated to do * so. If you do not wish to do so, delete this exception statement from your * version. If you delete this exception statement from all source files in * the program, then also delete it here. * * SLURM is distributed in the hope that it will be useful, but WITHOUT ANY * WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License along * with SLURM; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. \*****************************************************************************/ #if HAVE_CONFIG_H # include "config.h" #endif #include <fcntl.h> #include <grp.h> #include <pthread.h> #include <sched.h> #include <signal.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/param.h> #include <poll.h> #include <sys/stat.h> #include <sys/types.h> #include <sys/un.h> #include <sys/wait.h> #include <unistd.h> #include <utime.h> #include "src/common/callerid.h" #include "src/common/cpu_frequency.h" #include "src/common/env.h" #include "src/common/fd.h" #include "src/common/forward.h" #include "src/common/gres.h" #include "src/common/hostlist.h" #include "src/common/list.h" #include "src/common/log.h" #include "src/common/macros.h" #include "src/common/msg_aggr.h" #include "src/common/node_features.h" #include "src/common/node_select.h" #include "src/common/plugstack.h" #include "src/common/read_config.h" #include "src/common/siphash.h" #include "src/common/slurm_auth.h" #include "src/common/slurm_cred.h" #include "src/common/slurm_acct_gather_energy.h" #include "src/common/slurm_jobacct_gather.h" #include "src/common/slurm_protocol_defs.h" #include "src/common/slurm_protocol_api.h" #include "src/common/slurm_protocol_interface.h" #include "src/common/slurm_strcasestr.h" #include "src/common/stepd_api.h" #include "src/common/uid.h" #include "src/common/util-net.h" #include "src/common/xstring.h" #include "src/common/xmalloc.h" #include "src/bcast/file_bcast.h" #include "src/slurmd/slurmd/get_mach_stat.h" #include "src/slurmd/slurmd/slurmd.h" #include "src/slurmd/common/job_container_plugin.h" #include "src/slurmd/common/proctrack.h" #include "src/slurmd/common/run_script.h" #include "src/slurmd/common/reverse_tree_math.h" #include "src/slurmd/common/slurmstepd_init.h" #include "src/slurmd/common/task_plugin.h" #define _LIMIT_INFO 0 #define RETRY_DELAY 15 /* retry every 15 seconds */ #define MAX_RETRY 240 /* retry 240 times (one hour max) */ #define EPIL_RETRY_MAX 2 /* max retries of epilog complete message */ #ifndef MAXHOSTNAMELEN #define MAXHOSTNAMELEN 64 #endif typedef struct { int ngids; gid_t *gids; } gids_t; typedef struct { uint32_t job_id; uint32_t step_id; uint32_t job_mem; uint32_t step_mem; } job_mem_limits_t; typedef struct { uint32_t job_id; uint32_t step_id; } starting_step_t; typedef struct { uint32_t job_id; uint16_t msg_timeout; bool *prolog_fini; pthread_cond_t *timer_cond; pthread_mutex_t *timer_mutex; } timer_struct_t; typedef struct { uint32_t jobid; uint32_t step_id; char *node_list; char *partition; char *resv_id; char **spank_job_env; uint32_t spank_job_env_size; uid_t uid; char *user_name; } job_env_t; static int _abort_step(uint32_t job_id, uint32_t step_id); static char **_build_env(job_env_t *job_env); static void _delay_rpc(int host_inx, int host_cnt, int usec_per_rpc); static void _destroy_env(char **env); static bool _is_batch_job_finished(uint32_t job_id); static void _job_limits_free(void *x); static int _job_limits_match(void *x, void *key); static bool _job_still_running(uint32_t job_id); static int _kill_all_active_steps(uint32_t jobid, int sig, bool batch); static void _launch_complete_add(uint32_t job_id); static void _launch_complete_log(char *type, uint32_t job_id); static void _launch_complete_rm(uint32_t job_id); static void _launch_complete_wait(uint32_t job_id); static int _launch_job_fail(uint32_t job_id, uint32_t slurm_rc); static bool _launch_job_test(uint32_t job_id); static void _note_batch_job_finished(uint32_t job_id); static int _prolog_is_running (uint32_t jobid); static int _step_limits_match(void *x, void *key); static int _terminate_all_steps(uint32_t jobid, bool batch); static void _rpc_launch_tasks(slurm_msg_t *); static void _rpc_abort_job(slurm_msg_t *); static void _rpc_batch_job(slurm_msg_t *msg, bool new_msg); static void _rpc_prolog(slurm_msg_t *msg); static void _rpc_job_notify(slurm_msg_t *); static void _rpc_signal_tasks(slurm_msg_t *); static void _rpc_checkpoint_tasks(slurm_msg_t *); static void _rpc_complete_batch(slurm_msg_t *); static void _rpc_terminate_tasks(slurm_msg_t *); static void _rpc_timelimit(slurm_msg_t *); static void _rpc_reattach_tasks(slurm_msg_t *); static void _rpc_signal_job(slurm_msg_t *); static void _rpc_suspend_job(slurm_msg_t *msg); static void _rpc_terminate_job(slurm_msg_t *); static void _rpc_update_time(slurm_msg_t *); static void _rpc_shutdown(slurm_msg_t *msg); static void _rpc_reconfig(slurm_msg_t *msg); static void _rpc_reboot(slurm_msg_t *msg); static void _rpc_pid2jid(slurm_msg_t *msg); static int _rpc_file_bcast(slurm_msg_t *msg); static void _file_bcast_cleanup(void); static int _file_bcast_register_file(slurm_msg_t *msg, file_bcast_info_t *key); static int _rpc_ping(slurm_msg_t *); static int _rpc_health_check(slurm_msg_t *); static int _rpc_acct_gather_update(slurm_msg_t *); static int _rpc_acct_gather_energy(slurm_msg_t *); static int _rpc_step_complete(slurm_msg_t *msg); static int _rpc_step_complete_aggr(slurm_msg_t *msg); static int _rpc_stat_jobacct(slurm_msg_t *msg); static int _rpc_list_pids(slurm_msg_t *msg); static int _rpc_daemon_status(slurm_msg_t *msg); static int _run_epilog(job_env_t *job_env); static int _run_prolog(job_env_t *job_env, slurm_cred_t *cred); static void _rpc_forward_data(slurm_msg_t *msg); static int _rpc_network_callerid(slurm_msg_t *msg); static void _dealloc_gids(gids_t *p); static bool _pause_for_job_completion(uint32_t jobid, char *nodes, int maxtime); static bool _slurm_authorized_user(uid_t uid); static void _sync_messages_kill(kill_job_msg_t *req); static int _waiter_init (uint32_t jobid); static int _waiter_complete (uint32_t jobid); static bool _steps_completed_now(uint32_t jobid); static int _valid_sbcast_cred(file_bcast_msg_t *req, uid_t req_uid, uint16_t block_no, uint32_t *job_id); static void _wait_state_completed(uint32_t jobid, int max_delay); static uid_t _get_job_uid(uint32_t jobid); static gids_t *_gids_cache_lookup(char *user, gid_t gid); static int _add_starting_step(uint16_t type, void *req); static int _remove_starting_step(uint16_t type, void *req); static int _compare_starting_steps(void *s0, void *s1); static int _wait_for_starting_step(uint32_t job_id, uint32_t step_id); static bool _step_is_starting(uint32_t job_id, uint32_t step_id); static void _add_job_running_prolog(uint32_t job_id); static void _remove_job_running_prolog(uint32_t job_id); static int _match_jobid(void *s0, void *s1); static void _wait_for_job_running_prolog(uint32_t job_id); static bool _requeue_setup_env_fail(void); /* * List of threads waiting for jobs to complete */ static List waiters; static pthread_mutex_t launch_mutex = PTHREAD_MUTEX_INITIALIZER; static time_t startup = 0; /* daemon startup time */ static time_t last_slurmctld_msg = 0; static pthread_mutex_t job_limits_mutex = PTHREAD_MUTEX_INITIALIZER; static List job_limits_list = NULL; static bool job_limits_loaded = false; #define FINI_JOB_CNT 32 static pthread_mutex_t fini_mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t fini_job_id[FINI_JOB_CNT]; static int next_fini_job_inx = 0; /* NUM_PARALLEL_SUSP_JOBS controls the number of jobs that can be suspended or * resumed at one time. */ #define NUM_PARALLEL_SUSP_JOBS 64 /* NUM_PARALLEL_SUSP_STEPS controls the number of steps per job that can be * suspended at one time. */ #define NUM_PARALLEL_SUSP_STEPS 8 static pthread_mutex_t suspend_mutex = PTHREAD_MUTEX_INITIALIZER; static uint32_t job_suspend_array[NUM_PARALLEL_SUSP_JOBS]; static int job_suspend_size = 0; #define JOB_STATE_CNT 64 static pthread_mutex_t job_state_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t job_state_cond = PTHREAD_COND_INITIALIZER; static uint32_t active_job_id[JOB_STATE_CNT]; static pthread_mutex_t prolog_mutex = PTHREAD_MUTEX_INITIALIZER; #define FILE_BCAST_TIMEOUT 300 static pthread_mutex_t file_bcast_mutex = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t file_bcast_cond = PTHREAD_COND_INITIALIZER; static int fb_read_lock = 0, fb_write_wait_lock = 0, fb_write_lock = 0; static List file_bcast_list = NULL; void slurmd_req(slurm_msg_t *msg) { int rc; if (msg == NULL) { if (startup == 0) startup = time(NULL); FREE_NULL_LIST(waiters); slurm_mutex_lock(&job_limits_mutex); if (job_limits_list) { FREE_NULL_LIST(job_limits_list); job_limits_loaded = false; } slurm_mutex_unlock(&job_limits_mutex); return; } switch (msg->msg_type) { case REQUEST_LAUNCH_PROLOG: debug2("Processing RPC: REQUEST_LAUNCH_PROLOG"); _rpc_prolog(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_BATCH_JOB_LAUNCH: debug2("Processing RPC: REQUEST_BATCH_JOB_LAUNCH"); /* Mutex locking moved into _rpc_batch_job() due to * very slow prolog on Blue Gene system. Only batch * jobs are supported on Blue Gene (no job steps). */ _rpc_batch_job(msg, true); last_slurmctld_msg = time(NULL); break; case REQUEST_LAUNCH_TASKS: debug2("Processing RPC: REQUEST_LAUNCH_TASKS"); slurm_mutex_lock(&launch_mutex); _rpc_launch_tasks(msg); slurm_mutex_unlock(&launch_mutex); break; case REQUEST_SIGNAL_TASKS: debug2("Processing RPC: REQUEST_SIGNAL_TASKS"); _rpc_signal_tasks(msg); break; case REQUEST_CHECKPOINT_TASKS: debug2("Processing RPC: REQUEST_CHECKPOINT_TASKS"); _rpc_checkpoint_tasks(msg); break; case REQUEST_TERMINATE_TASKS: debug2("Processing RPC: REQUEST_TERMINATE_TASKS"); _rpc_terminate_tasks(msg); break; case REQUEST_KILL_PREEMPTED: debug2("Processing RPC: REQUEST_KILL_PREEMPTED"); last_slurmctld_msg = time(NULL); _rpc_timelimit(msg); break; case REQUEST_KILL_TIMELIMIT: debug2("Processing RPC: REQUEST_KILL_TIMELIMIT"); last_slurmctld_msg = time(NULL); _rpc_timelimit(msg); break; case REQUEST_REATTACH_TASKS: debug2("Processing RPC: REQUEST_REATTACH_TASKS"); _rpc_reattach_tasks(msg); break; case REQUEST_SIGNAL_JOB: debug2("Processing RPC: REQUEST_SIGNAL_JOB"); _rpc_signal_job(msg); break; case REQUEST_SUSPEND_INT: debug2("Processing RPC: REQUEST_SUSPEND_INT"); _rpc_suspend_job(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_ABORT_JOB: debug2("Processing RPC: REQUEST_ABORT_JOB"); last_slurmctld_msg = time(NULL); _rpc_abort_job(msg); break; case REQUEST_TERMINATE_JOB: debug2("Processing RPC: REQUEST_TERMINATE_JOB"); last_slurmctld_msg = time(NULL); _rpc_terminate_job(msg); break; case REQUEST_COMPLETE_BATCH_SCRIPT: debug2("Processing RPC: REQUEST_COMPLETE_BATCH_SCRIPT"); _rpc_complete_batch(msg); break; case REQUEST_UPDATE_JOB_TIME: debug2("Processing RPC: REQUEST_UPDATE_JOB_TIME"); _rpc_update_time(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_SHUTDOWN: debug2("Processing RPC: REQUEST_SHUTDOWN"); _rpc_shutdown(msg); break; case REQUEST_RECONFIGURE: debug2("Processing RPC: REQUEST_RECONFIGURE"); _rpc_reconfig(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_REBOOT_NODES: debug2("Processing RPC: REQUEST_REBOOT_NODES"); _rpc_reboot(msg); break; case REQUEST_NODE_REGISTRATION_STATUS: debug2("Processing RPC: REQUEST_NODE_REGISTRATION_STATUS"); /* Treat as ping (for slurmctld agent, just return SUCCESS) */ rc = _rpc_ping(msg); last_slurmctld_msg = time(NULL); /* Then initiate a separate node registration */ if (rc == SLURM_SUCCESS) send_registration_msg(SLURM_SUCCESS, true); break; case REQUEST_PING: _rpc_ping(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_HEALTH_CHECK: debug2("Processing RPC: REQUEST_HEALTH_CHECK"); _rpc_health_check(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_ACCT_GATHER_UPDATE: debug2("Processing RPC: REQUEST_ACCT_GATHER_UPDATE"); _rpc_acct_gather_update(msg); last_slurmctld_msg = time(NULL); break; case REQUEST_ACCT_GATHER_ENERGY: debug2("Processing RPC: REQUEST_ACCT_GATHER_ENERGY"); _rpc_acct_gather_energy(msg); break; case REQUEST_JOB_ID: _rpc_pid2jid(msg); break; case REQUEST_FILE_BCAST: rc = _rpc_file_bcast(msg); slurm_send_rc_msg(msg, rc); break; case REQUEST_STEP_COMPLETE: (void) _rpc_step_complete(msg); break; case REQUEST_STEP_COMPLETE_AGGR: (void) _rpc_step_complete_aggr(msg); break; case REQUEST_JOB_STEP_STAT: (void) _rpc_stat_jobacct(msg); break; case REQUEST_JOB_STEP_PIDS: (void) _rpc_list_pids(msg); break; case REQUEST_DAEMON_STATUS: _rpc_daemon_status(msg); break; case REQUEST_JOB_NOTIFY: _rpc_job_notify(msg); break; case REQUEST_FORWARD_DATA: _rpc_forward_data(msg); break; case REQUEST_NETWORK_CALLERID: debug2("Processing RPC: REQUEST_NETWORK_CALLERID"); _rpc_network_callerid(msg); break; case MESSAGE_COMPOSITE: error("Processing RPC: MESSAGE_COMPOSITE: " "This should never happen"); msg_aggr_add_msg(msg, 0, NULL); break; case RESPONSE_MESSAGE_COMPOSITE: debug2("Processing RPC: RESPONSE_MESSAGE_COMPOSITE"); msg_aggr_resp(msg); break; default: error("slurmd_req: invalid request msg type %d", msg->msg_type); slurm_send_rc_msg(msg, EINVAL); break; } return; } static int _send_slurmd_conf_lite (int fd, slurmd_conf_t *cf) { int len; Buf buffer = init_buf(0); slurm_mutex_lock(&cf->config_mutex); pack_slurmd_conf_lite(cf, buffer); slurm_mutex_unlock(&cf->config_mutex); len = get_buf_offset(buffer); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); return (0); rwfail: return (-1); } static int _send_slurmstepd_init(int fd, int type, void *req, slurm_addr_t *cli, slurm_addr_t *self, hostset_t step_hset, uint16_t protocol_version) { int len = 0; Buf buffer = NULL; slurm_msg_t msg; uid_t uid = (uid_t)-1; gid_t gid = (uid_t)-1; gids_t *gids = NULL; int rank, proto; int parent_rank, children, depth, max_depth; char *parent_alias = NULL; char *user_name = NULL; slurm_addr_t parent_addr = {0}; char pwd_buffer[PW_BUF_SIZE]; struct passwd pwd, *pwd_result; slurm_msg_t_init(&msg); /* send type over to slurmstepd */ safe_write(fd, &type, sizeof(int)); /* step_hset can be NULL for batch scripts OR if the job was submitted * by SlurmUser or root using the --no-allocate/-Z option and the job * job credential validation by _check_job_credential() failed. If the * job credential did not validate, then it did not come from slurmctld * and there is no reason to send step completion messages to slurmctld. */ if (step_hset == NULL) { bool send_error = false; if (type == LAUNCH_TASKS) { launch_tasks_request_msg_t *launch_req; launch_req = (launch_tasks_request_msg_t *) req; if (launch_req->job_step_id != SLURM_EXTERN_CONT) send_error = true; } if (send_error) { info("task rank unavailable due to invalid job " "credential, step completion RPC impossible"); } rank = -1; parent_rank = -1; children = 0; depth = 0; max_depth = 0; } else if ((type == LAUNCH_TASKS) && (((launch_tasks_request_msg_t *)req)->alias_list)) { /* In the cloud, each task talks directly to the slurmctld * since node addressing is abnormal */ rank = 0; parent_rank = -1; children = 0; depth = 0; max_depth = 0; } else { #ifndef HAVE_FRONT_END int count; count = hostset_count(step_hset); rank = hostset_find(step_hset, conf->node_name); reverse_tree_info(rank, count, REVERSE_TREE_WIDTH, &parent_rank, &children, &depth, &max_depth); if (rank > 0) { /* rank 0 talks directly to the slurmctld */ int rc; /* Find the slurm_addr_t of this node's parent slurmd * in the step host list */ parent_alias = hostset_nth(step_hset, parent_rank); rc = slurm_conf_get_addr(parent_alias, &parent_addr); if (rc != SLURM_SUCCESS) { error("Failed looking up address for " "NodeName %s", parent_alias); /* parent_rank = -1; */ } } #else /* In FRONT_END mode, one slurmd pretends to be all * NodeNames, so we can't compare conf->node_name * to the NodeNames in step_hset. Just send step complete * RPC directly to the controller. */ rank = 0; parent_rank = -1; children = 0; depth = 0; max_depth = 0; #endif } debug3("slurmstepd rank %d (%s), parent rank %d (%s), " "children %d, depth %d, max_depth %d", rank, conf->node_name, parent_rank, parent_alias ? parent_alias : "NONE", children, depth, max_depth); if (parent_alias) free(parent_alias); /* send reverse-tree info to the slurmstepd */ safe_write(fd, &rank, sizeof(int)); safe_write(fd, &parent_rank, sizeof(int)); safe_write(fd, &children, sizeof(int)); safe_write(fd, &depth, sizeof(int)); safe_write(fd, &max_depth, sizeof(int)); safe_write(fd, &parent_addr, sizeof(slurm_addr_t)); /* send conf over to slurmstepd */ if (_send_slurmd_conf_lite(fd, conf) < 0) goto rwfail; /* send cli address over to slurmstepd */ buffer = init_buf(0); slurm_pack_slurm_addr(cli, buffer); len = get_buf_offset(buffer); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); buffer = NULL; /* send self address over to slurmstepd */ if (self) { buffer = init_buf(0); slurm_pack_slurm_addr(self, buffer); len = get_buf_offset(buffer); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); buffer = NULL; } else { len = 0; safe_write(fd, &len, sizeof(int)); } /* Send GRES information to slurmstepd */ gres_plugin_send_stepd(fd); /* send cpu_frequency info to slurmstepd */ cpu_freq_send_info(fd); /* send req over to slurmstepd */ switch(type) { case LAUNCH_BATCH_JOB: gid = (uid_t)((batch_job_launch_msg_t *)req)->gid; uid = (uid_t)((batch_job_launch_msg_t *)req)->uid; user_name = ((batch_job_launch_msg_t *)req)->user_name; msg.msg_type = REQUEST_BATCH_JOB_LAUNCH; break; case LAUNCH_TASKS: /* * The validity of req->uid was verified against the * auth credential in _rpc_launch_tasks(). req->gid * has NOT yet been checked! */ gid = (uid_t)((launch_tasks_request_msg_t *)req)->gid; uid = (uid_t)((launch_tasks_request_msg_t *)req)->uid; user_name = ((launch_tasks_request_msg_t *)req)->user_name; msg.msg_type = REQUEST_LAUNCH_TASKS; break; default: error("Was sent a task I didn't understand"); break; } buffer = init_buf(0); msg.data = req; if (protocol_version == (uint16_t)NO_VAL) proto = SLURM_PROTOCOL_VERSION; else proto = protocol_version; msg.protocol_version = (uint16_t)proto; pack_msg(&msg, buffer); len = get_buf_offset(buffer); safe_write(fd, &proto, sizeof(int)); safe_write(fd, &len, sizeof(int)); safe_write(fd, get_buf_data(buffer), len); free_buf(buffer); buffer = NULL; #ifdef HAVE_NATIVE_CRAY /* Try to avoid calling this on a system which is a native * cray. getpwuid_r is slow on the compute nodes and this has * in theory been verified earlier. */ if (!user_name) { #endif /* send cached group ids array for the relevant uid */ debug3("_send_slurmstepd_init: call to getpwuid_r"); if (slurm_getpwuid_r(uid, &pwd, pwd_buffer, PW_BUF_SIZE, &pwd_result) || (pwd_result == NULL)) { error("%s: getpwuid_r: %m", __func__); len = 0; safe_write(fd, &len, sizeof(int)); errno = ESLURMD_UID_NOT_FOUND; return errno; } debug3("%s: return from getpwuid_r", __func__); if (gid != pwd_result->pw_gid) { debug("%s: Changing gid from %d to %d", __func__, gid, pwd_result->pw_gid); } gid = pwd_result->pw_gid; if (!user_name) user_name = pwd_result->pw_name; #ifdef HAVE_NATIVE_CRAY } #endif if (!user_name) { /* Sanity check since gids_cache_lookup will fail * with a NULL. */ error("%s: No user name for %d: %m", __func__, uid); len = 0; safe_write(fd, &len, sizeof(int)); errno = ESLURMD_UID_NOT_FOUND; return errno; } if ((gids = _gids_cache_lookup(user_name, gid))) { int i; uint32_t tmp32; safe_write(fd, &gids->ngids, sizeof(int)); for (i = 0; i < gids->ngids; i++) { tmp32 = (uint32_t)gids->gids[i]; safe_write(fd, &tmp32, sizeof(uint32_t)); } _dealloc_gids(gids); } else { len = 0; safe_write(fd, &len, sizeof(int)); } return 0; rwfail: if (buffer) free_buf(buffer); error("_send_slurmstepd_init failed"); return errno; } /* * Fork and exec the slurmstepd, then send the slurmstepd its * initialization data. Then wait for slurmstepd to send an "ok" * message before returning. When the "ok" message is received, * the slurmstepd has created and begun listening on its unix * domain socket. * * Note that this code forks twice and it is the grandchild that * becomes the slurmstepd process, so the slurmstepd's parent process * will be init, not slurmd. */ static int _forkexec_slurmstepd(uint16_t type, void *req, slurm_addr_t *cli, slurm_addr_t *self, const hostset_t step_hset, uint16_t protocol_version) { pid_t pid; int to_stepd[2] = {-1, -1}; int to_slurmd[2] = {-1, -1}; if (pipe(to_stepd) < 0 || pipe(to_slurmd) < 0) { error("_forkexec_slurmstepd pipe failed: %m"); return SLURM_FAILURE; } if (_add_starting_step(type, req)) { error("_forkexec_slurmstepd failed in _add_starting_step: %m"); return SLURM_FAILURE; } if ((pid = fork()) < 0) { error("_forkexec_slurmstepd: fork: %m"); close(to_stepd[0]); close(to_stepd[1]); close(to_slurmd[0]); close(to_slurmd[1]); _remove_starting_step(type, req); return SLURM_FAILURE; } else if (pid > 0) { int rc = SLURM_SUCCESS; #if (SLURMSTEPD_MEMCHECK == 0) int i; time_t start_time = time(NULL); #endif /* * Parent sends initialization data to the slurmstepd * over the to_stepd pipe, and waits for the return code * reply on the to_slurmd pipe. */ if (close(to_stepd[0]) < 0) error("Unable to close read to_stepd in parent: %m"); if (close(to_slurmd[1]) < 0) error("Unable to close write to_slurmd in parent: %m"); if ((rc = _send_slurmstepd_init(to_stepd[1], type, req, cli, self, step_hset, protocol_version)) != 0) { error("Unable to init slurmstepd"); goto done; } /* If running under valgrind/memcheck, this pipe doesn't work * correctly so just skip it. */ #if (SLURMSTEPD_MEMCHECK == 0) i = read(to_slurmd[0], &rc, sizeof(int)); if (i < 0) { error("%s: Can not read return code from slurmstepd " "got %d: %m", __func__, i); rc = SLURM_FAILURE; } else if (i != sizeof(int)) { error("%s: slurmstepd failed to send return code " "got %d: %m", __func__, i); rc = SLURM_FAILURE; } else { int delta_time = time(NULL) - start_time; int cc; if (delta_time > 5) { info("Warning: slurmstepd startup took %d sec, " "possible file system problem or full " "memory", delta_time); } if (rc != SLURM_SUCCESS) error("slurmstepd return code %d", rc); cc = SLURM_SUCCESS; cc = write(to_stepd[1], &cc, sizeof(int)); if (cc != sizeof(int)) { error("%s: failed to send ack to stepd %d: %m", __func__, cc); } } #endif done: if (_remove_starting_step(type, req)) error("Error cleaning up starting_step list"); /* Reap child */ if (waitpid(pid, NULL, 0) < 0) error("Unable to reap slurmd child process"); if (close(to_stepd[1]) < 0) error("close write to_stepd in parent: %m"); if (close(to_slurmd[0]) < 0) error("close read to_slurmd in parent: %m"); return rc; } else { #if (SLURMSTEPD_MEMCHECK == 1) /* memcheck test of slurmstepd, option #1 */ char *const argv[3] = {"memcheck", (char *)conf->stepd_loc, NULL}; #elif (SLURMSTEPD_MEMCHECK == 2) /* valgrind test of slurmstepd, option #2 */ uint32_t job_id = 0, step_id = 0; char log_file[256]; char *const argv[13] = {"valgrind", "--tool=memcheck", "--error-limit=no", "--leak-check=summary", "--show-reachable=yes", "--max-stackframe=16777216", "--num-callers=20", "--child-silent-after-fork=yes", "--track-origins=yes", log_file, (char *)conf->stepd_loc, NULL}; if (type == LAUNCH_BATCH_JOB) { job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; } else if (type == LAUNCH_TASKS) { job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; } snprintf(log_file, sizeof(log_file), "--log-file=/tmp/slurmstepd_valgrind_%u.%u", job_id, step_id); #elif (SLURMSTEPD_MEMCHECK == 3) /* valgrind/drd test of slurmstepd, option #3 */ uint32_t job_id = 0, step_id = 0; char log_file[256]; char *const argv[10] = {"valgrind", "--tool=drd", "--error-limit=no", "--max-stackframe=16777216", "--num-callers=20", "--child-silent-after-fork=yes", log_file, (char *)conf->stepd_loc, NULL}; if (type == LAUNCH_BATCH_JOB) { job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; } else if (type == LAUNCH_TASKS) { job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; } snprintf(log_file, sizeof(log_file), "--log-file=/tmp/slurmstepd_valgrind_%u.%u", job_id, step_id); #elif (SLURMSTEPD_MEMCHECK == 4) /* valgrind/helgrind test of slurmstepd, option #4 */ uint32_t job_id = 0, step_id = 0; char log_file[256]; char *const argv[10] = {"valgrind", "--tool=helgrind", "--error-limit=no", "--max-stackframe=16777216", "--num-callers=20", "--child-silent-after-fork=yes", log_file, (char *)conf->stepd_loc, NULL}; if (type == LAUNCH_BATCH_JOB) { job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; } else if (type == LAUNCH_TASKS) { job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; } snprintf(log_file, sizeof(log_file), "--log-file=/tmp/slurmstepd_valgrind_%u.%u", job_id, step_id); #else /* no memory checking, default */ char *const argv[2] = { (char *)conf->stepd_loc, NULL}; #endif int i; int failed = 0; /* inform slurmstepd about our config */ setenv("SLURM_CONF", conf->conffile, 1); /* * Child forks and exits */ if (setsid() < 0) { error("_forkexec_slurmstepd: setsid: %m"); failed = 1; } if ((pid = fork()) < 0) { error("_forkexec_slurmstepd: " "Unable to fork grandchild: %m"); failed = 2; } else if (pid > 0) { /* child */ exit(0); } /* * Just incase we (or someone we are linking to) * opened a file and didn't do a close on exec. This * is needed mostly to protect us against libs we link * to that don't set the flag as we should already be * setting it for those that we open. The number 256 * is an arbitrary number based off test7.9. */ for (i=3; i<256; i++) { (void) fcntl(i, F_SETFD, FD_CLOEXEC); } /* * Grandchild exec's the slurmstepd * * If the slurmd is being shutdown/restarted before * the pipe happens the old conf->lfd could be reused * and if we close it the dup2 below will fail. */ if ((to_stepd[0] != conf->lfd) && (to_slurmd[1] != conf->lfd)) slurm_shutdown_msg_engine(conf->lfd); if (close(to_stepd[1]) < 0) error("close write to_stepd in grandchild: %m"); if (close(to_slurmd[0]) < 0) error("close read to_slurmd in parent: %m"); (void) close(STDIN_FILENO); /* ignore return */ if (dup2(to_stepd[0], STDIN_FILENO) == -1) { error("dup2 over STDIN_FILENO: %m"); exit(1); } fd_set_close_on_exec(to_stepd[0]); (void) close(STDOUT_FILENO); /* ignore return */ if (dup2(to_slurmd[1], STDOUT_FILENO) == -1) { error("dup2 over STDOUT_FILENO: %m"); exit(1); } fd_set_close_on_exec(to_slurmd[1]); (void) close(STDERR_FILENO); /* ignore return */ if (dup2(devnull, STDERR_FILENO) == -1) { error("dup2 /dev/null to STDERR_FILENO: %m"); exit(1); } fd_set_noclose_on_exec(STDERR_FILENO); log_fini(); if (!failed) { if (conf->chos_loc && !access(conf->chos_loc, X_OK)) execvp(conf->chos_loc, argv); else execvp(argv[0], argv); error("exec of slurmstepd failed: %m"); } exit(2); } } /* * The job(step) credential is the only place to get a definitive * list of the nodes allocated to a job step. We need to return * a hostset_t of the nodes. Validate the incoming RPC, updating * job_mem needed. */ static int _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid, int node_id, hostset_t *step_hset, uint16_t protocol_version) { slurm_cred_arg_t arg; hostset_t s_hset = NULL; bool user_ok = _slurm_authorized_user(uid); bool verified = true; int host_index = -1; int rc; slurm_cred_t *cred = req->cred; uint32_t jobid = req->job_id; uint32_t stepid = req->job_step_id; int tasks_to_launch = req->tasks_to_launch[node_id]; uint32_t job_cpus = 0, step_cpus = 0; /* * First call slurm_cred_verify() so that all valid * credentials are checked */ rc = slurm_cred_verify(conf->vctx, cred, &arg, protocol_version); if (rc < 0) { verified = false; if ((!user_ok) || (errno != ESLURMD_INVALID_JOB_CREDENTIAL)) return SLURM_ERROR; else { debug("_check_job_credential slurm_cred_verify failed:" " %m, but continuing anyway."); } } /* If uid is the SlurmUser or root and the credential is bad, * then do not attempt validating the credential */ if (!verified) { *step_hset = NULL; if (rc >= 0) { if ((s_hset = hostset_create(arg.step_hostlist))) *step_hset = s_hset; slurm_cred_free_args(&arg); } return SLURM_SUCCESS; } if ((arg.jobid != jobid) || (arg.stepid != stepid)) { error("job credential for %u.%u, expected %u.%u", arg.jobid, arg.stepid, jobid, stepid); goto fail; } if (arg.uid != uid) { error("job credential created for uid %ld, expected %ld", (long) arg.uid, (long) uid); goto fail; } /* * Check that credential is valid for this host */ if (!(s_hset = hostset_create(arg.step_hostlist))) { error("Unable to parse credential hostlist: `%s'", arg.step_hostlist); goto fail; } if (!hostset_within(s_hset, conf->node_name)) { error("Invalid job %u.%u credential for user %u: " "host %s not in hostset %s", arg.jobid, arg.stepid, arg.uid, conf->node_name, arg.step_hostlist); goto fail; } if ((arg.job_nhosts > 0) && (tasks_to_launch > 0)) { uint32_t hi, i, i_first_bit=0, i_last_bit=0, j; bool cpu_log = slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND; #ifdef HAVE_FRONT_END host_index = 0; /* It is always 0 for front end systems */ #else hostset_t j_hset; /* Determine the CPU count based upon this node's index into * the _job's_ allocation (job's hostlist and core_bitmap) */ if (!(j_hset = hostset_create(arg.job_hostlist))) { error("Unable to parse credential hostlist: `%s'", arg.job_hostlist); goto fail; } host_index = hostset_find(j_hset, conf->node_name); hostset_destroy(j_hset); if ((host_index < 0) || (host_index >= arg.job_nhosts)) { error("job cr credential invalid host_index %d for " "job %u", host_index, arg.jobid); goto fail; } #endif if (cpu_log) { char *per_job = "", *per_step = ""; uint32_t job_mem = arg.job_mem_limit; uint32_t step_mem = arg.step_mem_limit; if (job_mem & MEM_PER_CPU) { job_mem &= (~MEM_PER_CPU); per_job = "_per_CPU"; } if (step_mem & MEM_PER_CPU) { step_mem &= (~MEM_PER_CPU); per_step = "_per_CPU"; } info("===================="); info("step_id:%u.%u job_mem:%uMB%s step_mem:%uMB%s", arg.jobid, arg.stepid, job_mem, per_job, step_mem, per_step); } hi = host_index + 1; /* change from 0-origin to 1-origin */ for (i=0; hi; i++) { if (hi > arg.sock_core_rep_count[i]) { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * arg.sock_core_rep_count[i]; hi -= arg.sock_core_rep_count[i]; } else { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * (hi - 1); i_last_bit = i_first_bit + arg.sockets_per_node[i] * arg.cores_per_socket[i]; break; } } /* Now count the allocated processors */ for (i=i_first_bit, j=0; i<i_last_bit; i++, j++) { char *who_has = NULL; if (bit_test(arg.job_core_bitmap, i)) { job_cpus++; who_has = "Job"; } if (bit_test(arg.step_core_bitmap, i)) { step_cpus++; who_has = "Step"; } if (cpu_log && who_has) { info("JobNode[%u] CPU[%u] %s alloc", host_index, j, who_has); } } if (cpu_log) info("===================="); if (step_cpus == 0) { error("cons_res: zero processors allocated to step"); step_cpus = 1; } /* NOTE: step_cpus is the count of allocated resources * (typically cores). Convert to CPU count as needed */ if (i_last_bit <= i_first_bit) error("step credential has no CPUs selected"); else { i = conf->cpus / (i_last_bit - i_first_bit); if (i > 1) { if (cpu_log) info("Scaling CPU count by factor of " "%d (%u/(%u-%u))", i, conf->cpus, i_last_bit, i_first_bit); step_cpus *= i; job_cpus *= i; } } if (tasks_to_launch > step_cpus) { /* This is expected with the --overcommit option * or hyperthreads */ debug("cons_res: More than one tasks per logical " "processor (%d > %u) on host [%u.%u %ld %s] ", tasks_to_launch, step_cpus, arg.jobid, arg.stepid, (long) arg.uid, arg.step_hostlist); } } else { step_cpus = 1; job_cpus = 1; } /* Overwrite any memory limits in the RPC with contents of the * memory limit within the credential. * Reset the CPU count on this node to correct value. */ if (arg.step_mem_limit) { if (arg.step_mem_limit & MEM_PER_CPU) { req->step_mem_lim = arg.step_mem_limit & (~MEM_PER_CPU); req->step_mem_lim *= step_cpus; } else req->step_mem_lim = arg.step_mem_limit; } else { if (arg.job_mem_limit & MEM_PER_CPU) { req->step_mem_lim = arg.job_mem_limit & (~MEM_PER_CPU); req->step_mem_lim *= job_cpus; } else req->step_mem_lim = arg.job_mem_limit; } if (arg.job_mem_limit & MEM_PER_CPU) { req->job_mem_lim = arg.job_mem_limit & (~MEM_PER_CPU); req->job_mem_lim *= job_cpus; } else req->job_mem_lim = arg.job_mem_limit; req->job_core_spec = arg.job_core_spec; req->node_cpus = step_cpus; #if 0 info("%u.%u node_id:%d mem orig:%u cpus:%u limit:%u", jobid, stepid, node_id, arg.job_mem_limit, step_cpus, req->job_mem_lim); #endif *step_hset = s_hset; slurm_cred_free_args(&arg); return SLURM_SUCCESS; fail: if (s_hset) hostset_destroy(s_hset); *step_hset = NULL; slurm_cred_free_args(&arg); slurm_seterrno_ret(ESLURMD_INVALID_JOB_CREDENTIAL); } static void _rpc_launch_tasks(slurm_msg_t *msg) { int errnum = SLURM_SUCCESS; uint16_t port; char host[MAXHOSTNAMELEN]; uid_t req_uid; launch_tasks_request_msg_t *req = msg->data; bool super_user = false; #ifndef HAVE_FRONT_END bool first_job_run; #endif slurm_addr_t self; slurm_addr_t *cli = &msg->orig_addr; hostset_t step_hset = NULL; job_mem_limits_t *job_limits_ptr; int nodeid = 0; #ifndef HAVE_FRONT_END /* It is always 0 for front end systems */ nodeid = nodelist_find(req->complete_nodelist, conf->node_name); #endif req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); memcpy(&req->orig_addr, &msg->orig_addr, sizeof(slurm_addr_t)); super_user = _slurm_authorized_user(req_uid); if ((super_user == false) && (req_uid != req->uid)) { error("launch task request from uid %u", (unsigned int) req_uid); errnum = ESLURM_USER_ID_MISSING; /* or invalid user */ goto done; } slurm_get_ip_str(cli, &port, host, sizeof(host)); info("launch task %u.%u request from %u.%u@%s (port %hu)", req->job_id, req->job_step_id, req->uid, req->gid, host, port); /* this could be set previously and needs to be overwritten by * this call for messages to work correctly for the new call */ env_array_overwrite(&req->env, "SLURM_SRUN_COMM_HOST", host); req->envc = envcount(req->env); #ifndef HAVE_FRONT_END slurm_mutex_lock(&prolog_mutex); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); #endif if (_check_job_credential(req, req_uid, nodeid, &step_hset, msg->protocol_version) < 0) { errnum = errno; error("Invalid job credential from %ld@%s: %m", (long) req_uid, host); #ifndef HAVE_FRONT_END slurm_mutex_unlock(&prolog_mutex); #endif goto done; } /* Must follow _check_job_credential(), which sets some req fields */ task_g_slurmd_launch_request(req->job_id, req, nodeid); #ifndef HAVE_FRONT_END if (first_job_run) { int rc; job_env_t job_env; slurm_cred_insert_jobid(conf->vctx, req->job_id); _add_job_running_prolog(req->job_id); slurm_mutex_unlock(&prolog_mutex); if (container_g_create(req->job_id)) error("container_g_create(%u): %m", req->job_id); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.step_id = req->job_step_id; job_env.node_list = req->complete_nodelist; job_env.partition = req->partition; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->uid; job_env.user_name = req->user_name; rc = _run_prolog(&job_env, req->cred); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog failed status=%d:%d", req->job_id, exit_status, term_sig); errnum = ESLURMD_PROLOG_FAILED; goto done; } /* Since the job could have been killed while the prolog was * running, test if the credential has since been revoked * and exit as needed. */ if (slurm_cred_revoked(conf->vctx, req->cred)) { info("Job %u already killed, do not launch step %u.%u", req->job_id, req->job_id, req->job_step_id); errnum = ESLURMD_CREDENTIAL_REVOKED; goto done; } } else { slurm_mutex_unlock(&prolog_mutex); _wait_for_job_running_prolog(req->job_id); } #endif if (req->job_mem_lim || req->step_mem_lim) { step_loc_t step_info; slurm_mutex_lock(&job_limits_mutex); if (!job_limits_list) job_limits_list = list_create(_job_limits_free); step_info.jobid = req->job_id; step_info.stepid = req->job_step_id; job_limits_ptr = list_find_first (job_limits_list, _step_limits_match, &step_info); if (!job_limits_ptr) { job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); job_limits_ptr->job_id = req->job_id; job_limits_ptr->job_mem = req->job_mem_lim; job_limits_ptr->step_id = req->job_step_id; job_limits_ptr->step_mem = req->step_mem_lim; #if _LIMIT_INFO info("AddLim step:%u.%u job_mem:%u step_mem:%u", job_limits_ptr->job_id, job_limits_ptr->step_id, job_limits_ptr->job_mem, job_limits_ptr->step_mem); #endif list_append(job_limits_list, job_limits_ptr); } slurm_mutex_unlock(&job_limits_mutex); } slurm_get_stream_addr(msg->conn_fd, &self); debug3("_rpc_launch_tasks: call to _forkexec_slurmstepd"); errnum = _forkexec_slurmstepd(LAUNCH_TASKS, (void *)req, cli, &self, step_hset, msg->protocol_version); debug3("_rpc_launch_tasks: return from _forkexec_slurmstepd"); _launch_complete_add(req->job_id); done: if (step_hset) hostset_destroy(step_hset); if (slurm_send_rc_msg(msg, errnum) < 0) { char addr_str[32]; slurm_print_slurm_addr(&msg->address, addr_str, sizeof(addr_str)); error("_rpc_launch_tasks: unable to send return code to " "address:port=%s msg_type=%u: %m", addr_str, msg->msg_type); /* * Rewind credential so that srun may perform retry */ slurm_cred_rewind(conf->vctx, req->cred); /* ignore errors */ } else if (errnum == SLURM_SUCCESS) { save_cred_state(conf->vctx); task_g_slurmd_reserve_resources(req->job_id, req, nodeid); } /* * If job prolog failed, indicate failure to slurmctld */ if (errnum == ESLURMD_PROLOG_FAILED) send_registration_msg(errnum, false); } static void _prolog_error(batch_job_launch_msg_t *req, int rc) { char *err_name_ptr, err_name[256], path_name[MAXPATHLEN]; char *fmt_char; int fd; if (req->std_err || req->std_out) { if (req->std_err) strncpy(err_name, req->std_err, sizeof(err_name)); else strncpy(err_name, req->std_out, sizeof(err_name)); if ((fmt_char = strchr(err_name, (int) '%')) && (fmt_char[1] == 'j') && !strchr(fmt_char+1, (int) '%')) { char tmp_name[256]; fmt_char[1] = 'u'; snprintf(tmp_name, sizeof(tmp_name), err_name, req->job_id); strncpy(err_name, tmp_name, sizeof(err_name)); } } else { snprintf(err_name, sizeof(err_name), "slurm-%u.out", req->job_id); } err_name_ptr = err_name; if (err_name_ptr[0] == '/') snprintf(path_name, MAXPATHLEN, "%s", err_name_ptr); else if (req->work_dir) snprintf(path_name, MAXPATHLEN, "%s/%s", req->work_dir, err_name_ptr); else snprintf(path_name, MAXPATHLEN, "/%s", err_name_ptr); if ((fd = open(path_name, (O_CREAT|O_APPEND|O_WRONLY), 0644)) == -1) { error("Unable to open %s: %s", path_name, slurm_strerror(errno)); return; } snprintf(err_name, sizeof(err_name), "Error running slurm prolog: %d\n", WEXITSTATUS(rc)); safe_write(fd, err_name, strlen(err_name)); if (fchown(fd, (uid_t) req->uid, (gid_t) req->gid) == -1) { snprintf(err_name, sizeof(err_name), "Couldn't change fd owner to %u:%u: %m\n", req->uid, req->gid); } rwfail: close(fd); } /* load the user's environment on this machine if requested * SLURM_GET_USER_ENV environment variable is set */ static int _get_user_env(batch_job_launch_msg_t *req) { struct passwd pwd, *pwd_ptr = NULL; char pwd_buf[PW_BUF_SIZE]; char **new_env; int i; static time_t config_update = 0; static bool no_env_cache = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); no_env_cache = (sched_params && strstr(sched_params, "no_env_cache")); xfree(sched_params); config_update = conf->last_update; } for (i=0; i<req->envc; i++) { if (xstrcmp(req->environment[i], "SLURM_GET_USER_ENV=1") == 0) break; } if (i >= req->envc) return 0; /* don't need to load env */ if (slurm_getpwuid_r(req->uid, &pwd, pwd_buf, PW_BUF_SIZE, &pwd_ptr) || (pwd_ptr == NULL)) { error("%s: getpwuid_r(%u):%m", __func__, req->uid); return -1; } verbose("%s: get env for user %s here", __func__, pwd.pw_name); /* Permit up to 120 second delay before using cache file */ new_env = env_array_user_default(pwd.pw_name, 120, 0, no_env_cache); if (! new_env) { error("%s: Unable to get user's local environment%s", __func__, no_env_cache ? "" : ", running only with passed environment"); return -1; } env_array_merge(&new_env, (const char **) req->environment); env_array_free(req->environment); req->environment = new_env; req->envc = envcount(new_env); return 0; } /* The RPC currently contains a memory size limit, but we load the * value from the job credential to be certain it has not been * altered by the user */ static void _set_batch_job_limits(slurm_msg_t *msg) { int i; uint32_t alloc_lps = 0, last_bit = 0; bool cpu_log = slurm_get_debug_flags() & DEBUG_FLAG_CPU_BIND; slurm_cred_arg_t arg; batch_job_launch_msg_t *req = (batch_job_launch_msg_t *)msg->data; if (slurm_cred_get_args(req->cred, &arg) != SLURM_SUCCESS) return; req->job_core_spec = arg.job_core_spec; /* Prevent user reset */ if (cpu_log) { char *per_job = ""; uint32_t job_mem = arg.job_mem_limit; if (job_mem & MEM_PER_CPU) { job_mem &= (~MEM_PER_CPU); per_job = "_per_CPU"; } info("===================="); info("batch_job:%u job_mem:%uMB%s", req->job_id, job_mem, per_job); } if (cpu_log || (arg.job_mem_limit & MEM_PER_CPU)) { if (arg.job_nhosts > 0) { last_bit = arg.sockets_per_node[0] * arg.cores_per_socket[0]; for (i=0; i<last_bit; i++) { if (!bit_test(arg.job_core_bitmap, i)) continue; if (cpu_log) info("JobNode[0] CPU[%u] Job alloc",i); alloc_lps++; } } if (cpu_log) info("===================="); if (alloc_lps == 0) { error("_set_batch_job_limit: alloc_lps is zero"); alloc_lps = 1; } /* NOTE: alloc_lps is the count of allocated resources * (typically cores). Convert to CPU count as needed */ if (last_bit < 1) error("Batch job credential allocates no CPUs"); else { i = conf->cpus / last_bit; if (i > 1) alloc_lps *= i; } } if (arg.job_mem_limit & MEM_PER_CPU) { req->job_mem = arg.job_mem_limit & (~MEM_PER_CPU); req->job_mem *= alloc_lps; } else req->job_mem = arg.job_mem_limit; slurm_cred_free_args(&arg); } /* These functions prevent a possible race condition if the batch script's * complete RPC is processed before it's launch_successful response. This * */ static bool _is_batch_job_finished(uint32_t job_id) { bool found_job = false; int i; slurm_mutex_lock(&fini_mutex); for (i = 0; i < FINI_JOB_CNT; i++) { if (fini_job_id[i] == job_id) { found_job = true; break; } } slurm_mutex_unlock(&fini_mutex); return found_job; } static void _note_batch_job_finished(uint32_t job_id) { slurm_mutex_lock(&fini_mutex); fini_job_id[next_fini_job_inx] = job_id; if (++next_fini_job_inx >= FINI_JOB_CNT) next_fini_job_inx = 0; slurm_mutex_unlock(&fini_mutex); } /* Send notification to slurmctld we are finished running the prolog. * This is needed on system that don't use srun to launch their tasks. */ static void _notify_slurmctld_prolog_fini( uint32_t job_id, uint32_t prolog_return_code) { int rc; slurm_msg_t req_msg; complete_prolog_msg_t req; slurm_msg_t_init(&req_msg); req.job_id = job_id; req.prolog_rc = prolog_return_code; req_msg.msg_type= REQUEST_COMPLETE_PROLOG; req_msg.data = &req; if ((slurm_send_recv_controller_rc_msg(&req_msg, &rc) < 0) || (rc != SLURM_SUCCESS)) error("Error sending prolog completion notification: %m"); } /* Convert memory limits from per-CPU to per-node */ static void _convert_job_mem(slurm_msg_t *msg) { prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; slurm_cred_arg_t arg; hostset_t j_hset = NULL; int rc, hi, host_index, job_cpus; int i, i_first_bit = 0, i_last_bit = 0; rc = slurm_cred_verify(conf->vctx, req->cred, &arg, msg->protocol_version); if (rc < 0) { error("%s: slurm_cred_verify failed: %m", __func__); req->nnodes = 1; /* best guess */ return; } req->nnodes = arg.job_nhosts; if (arg.job_mem_limit == 0) goto fini; if ((arg.job_mem_limit & MEM_PER_CPU) == 0) { req->job_mem_limit = arg.job_mem_limit; goto fini; } /* Assume 1 CPU on error */ req->job_mem_limit = arg.job_mem_limit & (~MEM_PER_CPU); if (!(j_hset = hostset_create(arg.job_hostlist))) { error("%s: Unable to parse credential hostlist: `%s'", __func__, arg.step_hostlist); goto fini; } host_index = hostset_find(j_hset, conf->node_name); hostset_destroy(j_hset); hi = host_index + 1; /* change from 0-origin to 1-origin */ for (i = 0; hi; i++) { if (hi > arg.sock_core_rep_count[i]) { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * arg.sock_core_rep_count[i]; i_last_bit = i_first_bit + arg.sockets_per_node[i] * arg.cores_per_socket[i] * arg.sock_core_rep_count[i]; hi -= arg.sock_core_rep_count[i]; } else { i_first_bit += arg.sockets_per_node[i] * arg.cores_per_socket[i] * (hi - 1); i_last_bit = i_first_bit + arg.sockets_per_node[i] * arg.cores_per_socket[i]; break; } } /* Now count the allocated processors on this node */ job_cpus = 0; for (i = i_first_bit; i < i_last_bit; i++) { if (bit_test(arg.job_core_bitmap, i)) job_cpus++; } /* NOTE: alloc_lps is the count of allocated resources * (typically cores). Convert to CPU count as needed */ if (i_last_bit > i_first_bit) { i = conf->cpus / (i_last_bit - i_first_bit); if (i > 1) job_cpus *= i; } req->job_mem_limit *= job_cpus; fini: slurm_cred_free_args(&arg); } static void _make_prolog_mem_container(slurm_msg_t *msg) { prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; job_mem_limits_t *job_limits_ptr; step_loc_t step_info; _convert_job_mem(msg); /* Convert per-CPU mem limit */ if (req->job_mem_limit) { slurm_mutex_lock(&job_limits_mutex); if (!job_limits_list) job_limits_list = list_create(_job_limits_free); step_info.jobid = req->job_id; step_info.stepid = SLURM_EXTERN_CONT; job_limits_ptr = list_find_first (job_limits_list, _step_limits_match, &step_info); if (!job_limits_ptr) { job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); job_limits_ptr->job_id = req->job_id; job_limits_ptr->job_mem = req->job_mem_limit; job_limits_ptr->step_id = SLURM_EXTERN_CONT; job_limits_ptr->step_mem = req->job_mem_limit; #if _LIMIT_INFO info("AddLim step:%u.%u job_mem:%u step_mem:%u", job_limits_ptr->job_id, job_limits_ptr->step_id, job_limits_ptr->job_mem, job_limits_ptr->step_mem); #endif list_append(job_limits_list, job_limits_ptr); } slurm_mutex_unlock(&job_limits_mutex); } } static void _spawn_prolog_stepd(slurm_msg_t *msg) { prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; launch_tasks_request_msg_t *launch_req; slurm_addr_t self; slurm_addr_t *cli = &msg->orig_addr; int i; launch_req = xmalloc(sizeof(launch_tasks_request_msg_t)); launch_req->alias_list = req->alias_list; launch_req->complete_nodelist = req->nodes; launch_req->cpus_per_task = 1; launch_req->cred = req->cred; launch_req->cwd = req->work_dir; launch_req->efname = "/dev/null"; launch_req->gid = req->gid; launch_req->global_task_ids = xmalloc(sizeof(uint32_t *) * req->nnodes); launch_req->ifname = "/dev/null"; launch_req->job_id = req->job_id; launch_req->job_mem_lim = req->job_mem_limit; launch_req->job_step_id = SLURM_EXTERN_CONT; launch_req->nnodes = req->nnodes; launch_req->ntasks = req->nnodes; launch_req->ofname = "/dev/null"; launch_req->partition = req->partition; launch_req->spank_job_env_size = req->spank_job_env_size; launch_req->spank_job_env = req->spank_job_env; launch_req->step_mem_lim = req->job_mem_limit; launch_req->tasks_to_launch = xmalloc(sizeof(uint16_t) * req->nnodes); launch_req->uid = req->uid; for (i = 0; i < req->nnodes; i++) { uint32_t *tmp32 = xmalloc(sizeof(uint32_t)); *tmp32 = i; launch_req->global_task_ids[i] = tmp32; launch_req->tasks_to_launch[i] = 1; } slurm_get_stream_addr(msg->conn_fd, &self); /* Since job could have been killed while the prolog was * running (especially on BlueGene, which can take minutes * for partition booting). Test if the credential has since * been revoked and exit as needed. */ if (slurm_cred_revoked(conf->vctx, req->cred)) { info("Job %u already killed, do not launch extern step", req->job_id); } else { hostset_t step_hset = hostset_create(req->nodes); debug3("%s: call to _forkexec_slurmstepd", __func__); (void) _forkexec_slurmstepd( LAUNCH_TASKS, (void *)launch_req, cli, &self, step_hset, msg->protocol_version); debug3("%s: return from _forkexec_slurmstepd", __func__); if (step_hset) hostset_destroy(step_hset); } for (i = 0; i < req->nnodes; i++) xfree(launch_req->global_task_ids[i]); xfree(launch_req->global_task_ids); xfree(launch_req->tasks_to_launch); xfree(launch_req); } static void _rpc_prolog(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data; job_env_t job_env; bool first_job_run; uid_t req_uid; if (req == NULL) return; req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { error("REQUEST_LAUNCH_PROLOG request from uid %u", (unsigned int) req_uid); return; } if (slurm_send_rc_msg(msg, rc) < 0) { error("Error starting prolog: %m"); } if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog start failed status=%d:%d", req->job_id, exit_status, term_sig); rc = ESLURMD_PROLOG_FAILED; } slurm_mutex_lock(&prolog_mutex); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); if (first_job_run) { if (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN) _make_prolog_mem_container(msg); if (container_g_create(req->job_id)) error("container_g_create(%u): %m", req->job_id); slurm_cred_insert_jobid(conf->vctx, req->job_id); _add_job_running_prolog(req->job_id); slurm_mutex_unlock(&prolog_mutex); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.step_id = 0; /* not available */ job_env.node_list = req->nodes; job_env.partition = req->partition; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->uid; job_env.user_name = req->user_name; #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup( req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif rc = _run_prolog(&job_env, req->cred); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog failed status=%d:%d", req->job_id, exit_status, term_sig); rc = ESLURMD_PROLOG_FAILED; } } else slurm_mutex_unlock(&prolog_mutex); if (!(slurmctld_conf.prolog_flags & PROLOG_FLAG_NOHOLD)) _notify_slurmctld_prolog_fini(req->job_id, rc); if (rc == SLURM_SUCCESS) { if (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN) _spawn_prolog_stepd(msg); } else { _launch_job_fail(req->job_id, rc); /* * If job prolog failed or we could not reply, * initiate message to slurmctld with current state */ if ((rc == ESLURMD_PROLOG_FAILED) || (rc == SLURM_COMMUNICATIONS_SEND_ERROR) || (rc == ESLURMD_SETUP_ENVIRONMENT_ERROR)) send_registration_msg(rc, false); } } static void _rpc_batch_job(slurm_msg_t *msg, bool new_msg) { batch_job_launch_msg_t *req = (batch_job_launch_msg_t *)msg->data; bool first_job_run; int rc = SLURM_SUCCESS; bool replied = false, revoked; slurm_addr_t *cli = &msg->orig_addr; if (new_msg) { uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { error("Security violation, batch launch RPC from uid %d", req_uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done; } } if (_launch_job_test(req->job_id)) { error("Job %u already running, do not launch second copy", req->job_id); rc = ESLURM_DUPLICATE_JOB_ID; /* job already running */ _launch_job_fail(req->job_id, rc); goto done; } slurm_cred_handle_reissue(conf->vctx, req->cred); if (slurm_cred_revoked(conf->vctx, req->cred)) { error("Job %u already killed, do not launch batch job", req->job_id); rc = ESLURMD_CREDENTIAL_REVOKED; /* job already ran */ goto done; } task_g_slurmd_batch_request(req->job_id, req); /* determine task affinity */ slurm_mutex_lock(&prolog_mutex); first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id); /* BlueGene prolog waits for partition boot and is very slow. * On any system we might need to load environment variables * for Moab (see --get-user-env), which could also be slow. * Just reply now and send a separate kill job request if the * prolog or launch fail. */ replied = true; if (new_msg && (slurm_send_rc_msg(msg, rc) < 1)) { /* The slurmctld is no longer waiting for a reply. * This typically indicates that the slurmd was * blocked from memory and/or CPUs and the slurmctld * has requeued the batch job request. */ error("Could not confirm batch launch for job %u, " "aborting request", req->job_id); rc = SLURM_COMMUNICATIONS_SEND_ERROR; slurm_mutex_unlock(&prolog_mutex); goto done; } /* * Insert jobid into credential context to denote that * we've now "seen" an instance of the job */ if (first_job_run) { job_env_t job_env; slurm_cred_insert_jobid(conf->vctx, req->job_id); _add_job_running_prolog(req->job_id); slurm_mutex_unlock(&prolog_mutex); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.step_id = req->step_id; job_env.node_list = req->nodes; job_env.partition = req->partition; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->uid; job_env.user_name = req->user_name; /* * Run job prolog on this node */ #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup( req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif if (container_g_create(req->job_id)) error("container_g_create(%u): %m", req->job_id); rc = _run_prolog(&job_env, req->cred); xfree(job_env.resv_id); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] prolog failed status=%d:%d", req->job_id, exit_status, term_sig); _prolog_error(req, rc); rc = ESLURMD_PROLOG_FAILED; goto done; } } else { slurm_mutex_unlock(&prolog_mutex); _wait_for_job_running_prolog(req->job_id); } if (_get_user_env(req) < 0) { bool requeue = _requeue_setup_env_fail(); if (requeue) { rc = ESLURMD_SETUP_ENVIRONMENT_ERROR; goto done; } } _set_batch_job_limits(msg); /* Since job could have been killed while the prolog was * running (especially on BlueGene, which can take minutes * for partition booting). Test if the credential has since * been revoked and exit as needed. */ if (slurm_cred_revoked(conf->vctx, req->cred)) { info("Job %u already killed, do not launch batch job", req->job_id); rc = ESLURMD_CREDENTIAL_REVOKED; /* job already ran */ goto done; } slurm_mutex_lock(&launch_mutex); if (req->step_id == SLURM_BATCH_SCRIPT) info("Launching batch job %u for UID %d", req->job_id, req->uid); else info("Launching batch job %u.%u for UID %d", req->job_id, req->step_id, req->uid); debug3("_rpc_batch_job: call to _forkexec_slurmstepd"); rc = _forkexec_slurmstepd(LAUNCH_BATCH_JOB, (void *)req, cli, NULL, (hostset_t)NULL, SLURM_PROTOCOL_VERSION); debug3("_rpc_batch_job: return from _forkexec_slurmstepd: %d", rc); slurm_mutex_unlock(&launch_mutex); _launch_complete_add(req->job_id); /* On a busy system, slurmstepd may take a while to respond, * if the job was cancelled in the interim, run through the * abort logic below. */ revoked = slurm_cred_revoked(conf->vctx, req->cred); if (revoked) _launch_complete_rm(req->job_id); if (revoked && _is_batch_job_finished(req->job_id)) { /* If configured with select/serial and the batch job already * completed, consider the job sucessfully launched and do * not repeat termination logic below, which in the worst case * just slows things down with another message. */ revoked = false; } if (revoked) { info("Job %u killed while launch was in progress", req->job_id); sleep(1); /* give slurmstepd time to create * the communication socket */ _terminate_all_steps(req->job_id, true); rc = ESLURMD_CREDENTIAL_REVOKED; goto done; } done: if (!replied) { if (new_msg && (slurm_send_rc_msg(msg, rc) < 1)) { /* The slurmctld is no longer waiting for a reply. * This typically indicates that the slurmd was * blocked from memory and/or CPUs and the slurmctld * has requeued the batch job request. */ error("Could not confirm batch launch for job %u, " "aborting request", req->job_id); rc = SLURM_COMMUNICATIONS_SEND_ERROR; } else { /* No need to initiate separate reply below */ rc = SLURM_SUCCESS; } } if (rc != SLURM_SUCCESS) { /* prolog or job launch failure, * tell slurmctld that the job failed */ if (req->step_id == SLURM_BATCH_SCRIPT) _launch_job_fail(req->job_id, rc); else _abort_step(req->job_id, req->step_id); } /* * If job prolog failed or we could not reply, * initiate message to slurmctld with current state */ if ((rc == ESLURMD_PROLOG_FAILED) || (rc == SLURM_COMMUNICATIONS_SEND_ERROR) || (rc == ESLURMD_SETUP_ENVIRONMENT_ERROR)) { send_registration_msg(rc, false); } } /* * Send notification message to batch job */ static void _rpc_job_notify(slurm_msg_t *msg) { job_notify_msg_t *req = msg->data; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); uid_t job_uid; List steps; ListIterator i; step_loc_t *stepd = NULL; int step_cnt = 0; int fd; debug("_rpc_job_notify, uid = %d, jobid = %u", req_uid, req->job_id); job_uid = _get_job_uid(req->job_id); if ((int)job_uid < 0) goto no_job; /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != job_uid) && (!_slurm_authorized_user(req_uid))) { error("Security violation: job_notify(%u) from uid %d", req->job_id, req_uid); return; } steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if ((stepd->jobid != req->job_id) || (stepd->stepid != SLURM_BATCH_SCRIPT)) { continue; } step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } info("send notification to job %u.%u", stepd->jobid, stepd->stepid); if (stepd_notify_job(fd, stepd->protocol_version, req->message) < 0) debug("notify jobid=%u failed: %m", stepd->jobid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); no_job: if (step_cnt == 0) { debug2("Can't find jobid %u to send notification message", req->job_id); } } static int _launch_job_fail(uint32_t job_id, uint32_t slurm_rc) { complete_batch_script_msg_t comp_msg; struct requeue_msg req_msg; slurm_msg_t resp_msg; int rc = 0, rpc_rc; static time_t config_update = 0; static bool requeue_no_hold = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); requeue_no_hold = (sched_params && strstr( sched_params, "nohold_on_prolog_fail")); xfree(sched_params); config_update = conf->last_update; } slurm_msg_t_init(&resp_msg); if (slurm_rc == ESLURMD_CREDENTIAL_REVOKED) { comp_msg.job_id = job_id; comp_msg.job_rc = INFINITE; comp_msg.slurm_rc = slurm_rc; comp_msg.node_name = conf->node_name; comp_msg.jobacct = NULL; /* unused */ resp_msg.msg_type = REQUEST_COMPLETE_BATCH_SCRIPT; resp_msg.data = &comp_msg; } else { req_msg.job_id = job_id; req_msg.job_id_str = NULL; if (requeue_no_hold) { req_msg.state = JOB_PENDING; } else { req_msg.state = (JOB_REQUEUE_HOLD|JOB_LAUNCH_FAILED); } resp_msg.msg_type = REQUEST_JOB_REQUEUE; resp_msg.data = &req_msg; } rpc_rc = slurm_send_recv_controller_rc_msg(&resp_msg, &rc); if ((resp_msg.msg_type == REQUEST_JOB_REQUEUE) && ((rc == ESLURM_DISABLED) || (rc == ESLURM_BATCH_ONLY))) { info("Could not launch job %u and not able to requeue it, " "cancelling job", job_id); if ((slurm_rc == ESLURMD_PROLOG_FAILED) && (rc == ESLURM_BATCH_ONLY)) { char *buf = NULL; xstrfmtcat(buf, "Prolog failure on node %s", conf->node_name); slurm_notify_job(job_id, buf); xfree(buf); } comp_msg.job_id = job_id; comp_msg.job_rc = INFINITE; comp_msg.slurm_rc = slurm_rc; comp_msg.node_name = conf->node_name; comp_msg.jobacct = NULL; /* unused */ resp_msg.msg_type = REQUEST_COMPLETE_BATCH_SCRIPT; resp_msg.data = &comp_msg; rpc_rc = slurm_send_recv_controller_rc_msg(&resp_msg, &rc); } return rpc_rc; } static int _abort_step(uint32_t job_id, uint32_t step_id) { step_complete_msg_t resp; slurm_msg_t resp_msg; slurm_msg_t_init(&resp_msg); int rc, rc2; resp.job_id = job_id; resp.job_step_id = step_id; resp.range_first = 0; resp.range_last = 0; resp.step_rc = 1; resp.jobacct = jobacctinfo_create(NULL); resp_msg.msg_type = REQUEST_STEP_COMPLETE; resp_msg.data = &resp; rc2 = slurm_send_recv_controller_rc_msg(&resp_msg, &rc); /* Note: we are ignoring the RPC return code */ jobacctinfo_destroy(resp.jobacct); return rc2; } static void _rpc_reconfig(slurm_msg_t *msg) { uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) error("Security violation, reconfig RPC from uid %d", req_uid); else kill(conf->pid, SIGHUP); forward_wait(msg); /* Never return a message, slurmctld does not expect one */ } static void _rpc_shutdown(slurm_msg_t *msg) { uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); forward_wait(msg); if (!_slurm_authorized_user(req_uid)) error("Security violation, shutdown RPC from uid %d", req_uid); else { if (kill(conf->pid, SIGTERM) != 0) error("kill(%u,SIGTERM): %m", conf->pid); } /* Never return a message, slurmctld does not expect one */ } static void _rpc_reboot(slurm_msg_t *msg) { char *reboot_program, *cmd = NULL, *sp; reboot_msg_t *reboot_msg; slurm_ctl_conf_t *cfg; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); int exit_code; if (!_slurm_authorized_user(req_uid)) error("Security violation, reboot RPC from uid %d", req_uid); else { cfg = slurm_conf_lock(); reboot_program = cfg->reboot_program; if (reboot_program) { sp = strchr(reboot_program, ' '); if (sp) sp = xstrndup(reboot_program, (sp - reboot_program)); else sp = xstrdup(reboot_program); reboot_msg = (reboot_msg_t *) msg->data; if (reboot_msg && reboot_msg->features) { info("Node reboot request with features %s being processed", reboot_msg->features); (void) node_features_g_node_set( reboot_msg->features); if (reboot_msg->features[0]) { xstrfmtcat(cmd, "%s %s", sp, reboot_msg->features); } else { cmd = xstrdup(sp); } } else { cmd = xstrdup(sp); info("Node reboot request being processed"); } if (access(sp, R_OK | X_OK) < 0) error("Cannot run RebootProgram [%s]: %m", sp); else if ((exit_code = system(cmd))) error("system(%s) returned %d", reboot_program, exit_code); xfree(sp); xfree(cmd); } else error("RebootProgram isn't defined in config"); slurm_conf_unlock(); } /* Never return a message, slurmctld does not expect one */ /* slurm_send_rc_msg(msg, rc); */ } static void _job_limits_free(void *x) { xfree(x); } static int _job_limits_match(void *x, void *key) { job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; uint32_t *job_id = (uint32_t *) key; if (job_limits_ptr->job_id == *job_id) return 1; return 0; } static int _step_limits_match(void *x, void *key) { job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x; step_loc_t *step_ptr = (step_loc_t *) key; if ((job_limits_ptr->job_id == step_ptr->jobid) && (job_limits_ptr->step_id == step_ptr->stepid)) return 1; return 0; } /* Call only with job_limits_mutex locked */ static void _load_job_limits(void) { List steps; ListIterator step_iter; step_loc_t *stepd; int fd; job_mem_limits_t *job_limits_ptr; slurmstepd_mem_info_t stepd_mem_info; if (!job_limits_list) job_limits_list = list_create(_job_limits_free); job_limits_loaded = true; steps = stepd_available(conf->spooldir, conf->node_name); step_iter = list_iterator_create(steps); while ((stepd = list_next(step_iter))) { job_limits_ptr = list_find_first(job_limits_list, _step_limits_match, stepd); if (job_limits_ptr) /* already processed */ continue; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; /* step completed */ if (stepd_get_mem_limits(fd, stepd->protocol_version, &stepd_mem_info) != SLURM_SUCCESS) { error("Error reading step %u.%u memory limits from " "slurmstepd", stepd->jobid, stepd->stepid); close(fd); continue; } if ((stepd_mem_info.job_mem_limit || stepd_mem_info.step_mem_limit)) { /* create entry for this job */ job_limits_ptr = xmalloc(sizeof(job_mem_limits_t)); job_limits_ptr->job_id = stepd->jobid; job_limits_ptr->step_id = stepd->stepid; job_limits_ptr->job_mem = stepd_mem_info.job_mem_limit; job_limits_ptr->step_mem = stepd_mem_info.step_mem_limit; #if _LIMIT_INFO info("RecLim step:%u.%u job_mem:%u step_mem:%u", job_limits_ptr->job_id, job_limits_ptr->step_id, job_limits_ptr->job_mem, job_limits_ptr->step_mem); #endif list_append(job_limits_list, job_limits_ptr); } close(fd); } list_iterator_destroy(step_iter); FREE_NULL_LIST(steps); } static void _cancel_step_mem_limit(uint32_t job_id, uint32_t step_id) { slurm_msg_t msg; job_notify_msg_t notify_req; job_step_kill_msg_t kill_req; /* NOTE: Batch jobs may have no srun to get this message */ slurm_msg_t_init(&msg); notify_req.job_id = job_id; notify_req.job_step_id = step_id; notify_req.message = "Exceeded job memory limit"; msg.msg_type = REQUEST_JOB_NOTIFY; msg.data = &notify_req; slurm_send_only_controller_msg(&msg); memset(&kill_req, 0, sizeof(job_step_kill_msg_t)); kill_req.job_id = job_id; kill_req.job_step_id = step_id; kill_req.signal = SIGKILL; kill_req.flags = (uint16_t) 0; msg.msg_type = REQUEST_CANCEL_JOB_STEP; msg.data = &kill_req; slurm_send_only_controller_msg(&msg); } /* Enforce job memory limits here in slurmd. Step memory limits are * enforced within slurmstepd (using jobacct_gather plugin). */ static void _enforce_job_mem_limit(void) { List steps; ListIterator step_iter, job_limits_iter; job_mem_limits_t *job_limits_ptr; step_loc_t *stepd; int fd, i, job_inx, job_cnt; uint16_t vsize_factor; uint64_t step_rss, step_vsize; job_step_id_msg_t acct_req; job_step_stat_t *resp = NULL; struct job_mem_info { uint32_t job_id; uint32_t mem_limit; /* MB */ uint32_t mem_used; /* MB */ uint32_t vsize_limit; /* MB */ uint32_t vsize_used; /* MB */ }; struct job_mem_info *job_mem_info_ptr = NULL; /* If users have configured MemLimitEnforce=no * in their slurm.conf keep going. */ if (conf->mem_limit_enforce == false) return; slurm_mutex_lock(&job_limits_mutex); if (!job_limits_loaded) _load_job_limits(); if (list_count(job_limits_list) == 0) { slurm_mutex_unlock(&job_limits_mutex); return; } /* Build table of job limits, use highest mem limit recorded */ job_mem_info_ptr = xmalloc((list_count(job_limits_list) + 1) * sizeof(struct job_mem_info)); job_cnt = 0; job_limits_iter = list_iterator_create(job_limits_list); while ((job_limits_ptr = list_next(job_limits_iter))) { if (job_limits_ptr->job_mem == 0) /* no job limit */ continue; for (i=0; i<job_cnt; i++) { if (job_mem_info_ptr[i].job_id != job_limits_ptr->job_id) continue; job_mem_info_ptr[i].mem_limit = MAX( job_mem_info_ptr[i].mem_limit, job_limits_ptr->job_mem); break; } if (i < job_cnt) /* job already found & recorded */ continue; job_mem_info_ptr[job_cnt].job_id = job_limits_ptr->job_id; job_mem_info_ptr[job_cnt].mem_limit = job_limits_ptr->job_mem; job_cnt++; } list_iterator_destroy(job_limits_iter); slurm_mutex_unlock(&job_limits_mutex); vsize_factor = slurm_get_vsize_factor(); for (i=0; i<job_cnt; i++) { job_mem_info_ptr[i].vsize_limit = job_mem_info_ptr[i]. mem_limit; job_mem_info_ptr[i].vsize_limit *= (vsize_factor / 100.0); } steps = stepd_available(conf->spooldir, conf->node_name); step_iter = list_iterator_create(steps); while ((stepd = list_next(step_iter))) { for (job_inx=0; job_inx<job_cnt; job_inx++) { if (job_mem_info_ptr[job_inx].job_id == stepd->jobid) break; } if (job_inx >= job_cnt) continue; /* job/step not being tracked */ fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; /* step completed */ acct_req.job_id = stepd->jobid; acct_req.step_id = stepd->stepid; resp = xmalloc(sizeof(job_step_stat_t)); if ((!stepd_stat_jobacct( fd, stepd->protocol_version, &acct_req, resp)) && (resp->jobacct)) { /* resp->jobacct is NULL if account is disabled */ jobacctinfo_getinfo((struct jobacctinfo *) resp->jobacct, JOBACCT_DATA_TOT_RSS, &step_rss, stepd->protocol_version); jobacctinfo_getinfo((struct jobacctinfo *) resp->jobacct, JOBACCT_DATA_TOT_VSIZE, &step_vsize, stepd->protocol_version); #if _LIMIT_INFO info("Step:%u.%u RSS:%"PRIu64" KB VSIZE:%"PRIu64" KB", stepd->jobid, stepd->stepid, step_rss, step_vsize); #endif step_rss /= 1024; /* KB to MB */ step_rss = MAX(step_rss, 1); job_mem_info_ptr[job_inx].mem_used += step_rss; step_vsize /= 1024; /* KB to MB */ step_vsize = MAX(step_vsize, 1); job_mem_info_ptr[job_inx].vsize_used += step_vsize; } slurm_free_job_step_stat(resp); close(fd); } list_iterator_destroy(step_iter); FREE_NULL_LIST(steps); for (i=0; i<job_cnt; i++) { if (job_mem_info_ptr[i].mem_used == 0) { /* no steps found, * purge records for all steps of this job */ slurm_mutex_lock(&job_limits_mutex); list_delete_all(job_limits_list, _job_limits_match, &job_mem_info_ptr[i].job_id); slurm_mutex_unlock(&job_limits_mutex); break; } if ((job_mem_info_ptr[i].mem_limit != 0) && (job_mem_info_ptr[i].mem_used > job_mem_info_ptr[i].mem_limit)) { info("Job %u exceeded memory limit (%u>%u), " "cancelling it", job_mem_info_ptr[i].job_id, job_mem_info_ptr[i].mem_used, job_mem_info_ptr[i].mem_limit); _cancel_step_mem_limit(job_mem_info_ptr[i].job_id, NO_VAL); } else if ((job_mem_info_ptr[i].vsize_limit != 0) && (job_mem_info_ptr[i].vsize_used > job_mem_info_ptr[i].vsize_limit)) { info("Job %u exceeded virtual memory limit (%u>%u), " "cancelling it", job_mem_info_ptr[i].job_id, job_mem_info_ptr[i].vsize_used, job_mem_info_ptr[i].vsize_limit); _cancel_step_mem_limit(job_mem_info_ptr[i].job_id, NO_VAL); } } xfree(job_mem_info_ptr); } static int _rpc_ping(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); static bool first_msg = true; if (!_slurm_authorized_user(req_uid)) { error("Security violation, ping RPC from uid %d", req_uid); if (first_msg) { error("Do you have SlurmUser configured as uid %d?", req_uid); } rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } first_msg = false; if (rc != SLURM_SUCCESS) { /* Return result. If the reply can't be sent this indicates * 1. The network is broken OR * 2. slurmctld has died OR * 3. slurmd was paged out due to full memory * If the reply request fails, we send an registration message * to slurmctld in hopes of avoiding having the node set DOWN * due to slurmd paging and not being able to respond in a * timely fashion. */ if (slurm_send_rc_msg(msg, rc) < 0) { error("Error responding to ping: %m"); send_registration_msg(SLURM_SUCCESS, false); } } else { slurm_msg_t resp_msg; ping_slurmd_resp_msg_t ping_resp; get_cpu_load(&ping_resp.cpu_load); get_free_mem(&ping_resp.free_mem); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_PING_SLURMD; resp_msg.data = &ping_resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); } /* Take this opportunity to enforce any job memory limits */ _enforce_job_mem_limit(); /* Clear up any stalled file transfers as well */ _file_bcast_cleanup(); return rc; } static int _rpc_health_check(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { error("Security violation, health check RPC from uid %d", req_uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } /* Return result. If the reply can't be sent this indicates that * 1. The network is broken OR * 2. slurmctld has died OR * 3. slurmd was paged out due to full memory * If the reply request fails, we send an registration message to * slurmctld in hopes of avoiding having the node set DOWN due to * slurmd paging and not being able to respond in a timely fashion. */ if (slurm_send_rc_msg(msg, rc) < 0) { error("Error responding to health check: %m"); send_registration_msg(SLURM_SUCCESS, false); } if (rc == SLURM_SUCCESS) rc = run_script_health_check(); /* Take this opportunity to enforce any job memory limits */ _enforce_job_mem_limit(); /* Clear up any stalled file transfers as well */ _file_bcast_cleanup(); return rc; } static int _rpc_acct_gather_update(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); static bool first_msg = true; if (!_slurm_authorized_user(req_uid)) { error("Security violation, acct_gather_update RPC from uid %d", req_uid); if (first_msg) { error("Do you have SlurmUser configured as uid %d?", req_uid); } rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } first_msg = false; if (rc != SLURM_SUCCESS) { /* Return result. If the reply can't be sent this indicates * 1. The network is broken OR * 2. slurmctld has died OR * 3. slurmd was paged out due to full memory * If the reply request fails, we send an registration message * to slurmctld in hopes of avoiding having the node set DOWN * due to slurmd paging and not being able to respond in a * timely fashion. */ if (slurm_send_rc_msg(msg, rc) < 0) { error("Error responding to account gather: %m"); send_registration_msg(SLURM_SUCCESS, false); } } else { slurm_msg_t resp_msg; acct_gather_node_resp_msg_t acct_msg; /* Update node energy usage data */ acct_gather_energy_g_update_node_energy(); memset(&acct_msg, 0, sizeof(acct_gather_node_resp_msg_t)); acct_msg.node_name = conf->node_name; acct_msg.sensor_cnt = 1; acct_msg.energy = acct_gather_energy_alloc(acct_msg.sensor_cnt); acct_gather_energy_g_get_data( ENERGY_DATA_NODE_ENERGY, acct_msg.energy); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_ACCT_GATHER_UPDATE; resp_msg.data = &acct_msg; slurm_send_node_msg(msg->conn_fd, &resp_msg); acct_gather_energy_destroy(acct_msg.energy); } return rc; } static int _rpc_acct_gather_energy(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); static bool first_msg = true; if (!_slurm_authorized_user(req_uid)) { error("Security violation, acct_gather_update RPC from uid %d", req_uid); if (first_msg) { error("Do you have SlurmUser configured as uid %d?", req_uid); } rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ } first_msg = false; if (rc != SLURM_SUCCESS) { if (slurm_send_rc_msg(msg, rc) < 0) error("Error responding to energy request: %m"); } else { slurm_msg_t resp_msg; acct_gather_node_resp_msg_t acct_msg; time_t now = time(NULL), last_poll = 0; int data_type = ENERGY_DATA_STRUCT; uint16_t sensor_cnt; acct_gather_energy_req_msg_t *req = msg->data; acct_gather_energy_g_get_data(ENERGY_DATA_LAST_POLL, &last_poll); acct_gather_energy_g_get_data(ENERGY_DATA_SENSOR_CNT, &sensor_cnt); /* If we polled later than delta seconds then force a new poll. */ if ((now - last_poll) > req->delta) data_type = ENERGY_DATA_JOULES_TASK; memset(&acct_msg, 0, sizeof(acct_gather_node_resp_msg_t)); acct_msg.sensor_cnt = sensor_cnt; acct_msg.energy = acct_gather_energy_alloc(acct_msg.sensor_cnt); acct_gather_energy_g_get_data(data_type, acct_msg.energy); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_ACCT_GATHER_ENERGY; resp_msg.data = &acct_msg; slurm_send_node_msg(msg->conn_fd, &resp_msg); acct_gather_energy_destroy(acct_msg.energy); } return rc; } static int _signal_jobstep(uint32_t jobid, uint32_t stepid, uid_t req_uid, uint32_t signal) { int fd, rc = SLURM_SUCCESS; uid_t uid; uint16_t protocol_version; /* There will be no stepd if the prolog is still running * Return failure so caller can retry. */ if (_prolog_is_running (jobid)) { info ("signal %d req for %u.%u while prolog is running." " Returning failure.", signal, jobid, stepid); return SLURM_FAILURE; } fd = stepd_connect(conf->spooldir, conf->node_name, jobid, stepid, &protocol_version); if (fd == -1) { debug("signal for nonexistent %u.%u stepd_connect failed: %m", jobid, stepid); return ESLURM_INVALID_JOB_ID; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("_signal_jobstep: couldn't read from the step %u.%u: %m", jobid, stepid); rc = ESLURM_INVALID_JOB_ID; goto done2; } if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { debug("kill req from uid %ld for job %u.%u owned by uid %ld", (long) req_uid, jobid, stepid, (long) uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } #ifdef HAVE_AIX # ifdef SIGMIGRATE # ifdef SIGSOUND /* SIGMIGRATE and SIGSOUND are used to initiate job checkpoint on AIX. * These signals are not sent to the entire process group, but just a * single process, namely the PMD. */ if (signal == SIGMIGRATE || signal == SIGSOUND) { rc = stepd_signal_task_local(fd, protocol_version, signal, 0); goto done2; } # endif # endif #endif rc = stepd_signal_container(fd, protocol_version, signal); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); return rc; } static void _rpc_signal_tasks(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); kill_tasks_msg_t *req = (kill_tasks_msg_t *) msg->data; uint32_t flag; uint32_t sig; flag = req->signal >> 24; sig = req->signal & 0xfff; if (flag & KILL_FULL_JOB) { debug("%s: sending signal %u to entire job %u flag %u", __func__, sig, req->job_id, flag); _kill_all_active_steps(req->job_id, sig, true); } else if (flag & KILL_STEPS_ONLY) { debug("%s: sending signal %u to all steps job %u flag %u", __func__, sig, req->job_id, flag); _kill_all_active_steps(req->job_id, sig, false); } else { debug("%s: sending signal %u to step %u.%u flag %u", __func__, sig, req->job_id, req->job_step_id, flag); rc = _signal_jobstep(req->job_id, req->job_step_id, req_uid, req->signal); } slurm_send_rc_msg(msg, rc); } static void _rpc_checkpoint_tasks(slurm_msg_t *msg) { int fd; int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); checkpoint_tasks_msg_t *req = (checkpoint_tasks_msg_t *) msg->data; uint16_t protocol_version; uid_t uid; fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { debug("checkpoint for nonexistent %u.%u stepd_connect " "failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("_rpc_checkpoint_tasks: couldn't read from the " "step %u.%u: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done2; } if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { debug("checkpoint req from uid %ld for job %u.%u owned by " "uid %ld", (long) req_uid, req->job_id, req->job_step_id, (long) uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } rc = stepd_checkpoint(fd, protocol_version, req->timestamp, req->image_dir); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); done: slurm_send_rc_msg(msg, rc); } static void _rpc_terminate_tasks(slurm_msg_t *msg) { kill_tasks_msg_t *req = (kill_tasks_msg_t *) msg->data; int rc = SLURM_SUCCESS; int fd; uid_t req_uid, uid; uint16_t protocol_version; debug3("Entering _rpc_terminate_tasks"); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { debug("kill for nonexistent job %u.%u stepd_connect " "failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("terminate_tasks couldn't read from the step %u.%u: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done2; } req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { debug("kill req from uid %ld for job %u.%u owned by uid %ld", (long) req_uid, req->job_id, req->job_step_id, (long) uid); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } rc = stepd_terminate(fd, protocol_version); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); done: slurm_send_rc_msg(msg, rc); } static int _rpc_step_complete(slurm_msg_t *msg) { step_complete_msg_t *req = (step_complete_msg_t *)msg->data; int rc = SLURM_SUCCESS; int fd; uid_t req_uid; uint16_t protocol_version; debug3("Entering _rpc_step_complete"); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { error("stepd_connect to %u.%u failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } /* step completion messages are only allowed from other slurmstepd, so only root or SlurmUser is allowed here */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { debug("step completion from uid %ld for job %u.%u", (long) req_uid, req->job_id, req->job_step_id); rc = ESLURM_USER_ID_MISSING; /* or bad in this case */ goto done2; } rc = stepd_completion(fd, protocol_version, req); if (rc == -1) rc = ESLURMD_JOB_NOTRUNNING; done2: close(fd); done: slurm_send_rc_msg(msg, rc); return rc; } static void _setup_step_complete_msg(slurm_msg_t *msg, void *data) { slurm_msg_t_init(msg); msg->msg_type = REQUEST_STEP_COMPLETE; msg->data = data; } /* This step_complete RPC came from slurmstepd because we are using * message aggregation configured and we are at the head of the tree. * This just adds the message to the list and goes on it's merry way. */ static int _rpc_step_complete_aggr(slurm_msg_t *msg) { int rc; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(uid)) { error("Security violation: step_complete_aggr from uid %d", uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return SLURM_ERROR; } if (conf->msg_aggr_window_msgs > 1) { slurm_msg_t *req = xmalloc_nz(sizeof(slurm_msg_t)); _setup_step_complete_msg(req, msg->data); msg->data = NULL; msg_aggr_add_msg(req, 1, NULL); } else { slurm_msg_t req; _setup_step_complete_msg(&req, msg->data); while (slurm_send_recv_controller_rc_msg(&req, &rc) < 0) { error("Unable to send step complete, " "trying again in a minute: %m"); } } /* Finish communication with the stepd, we have to wait for * the message back from the slurmctld or we will cause a race * condition with srun. */ slurm_send_rc_msg(msg, SLURM_SUCCESS); return SLURM_SUCCESS; } /* Get list of active jobs and steps, xfree returned value */ static char * _get_step_list(void) { char tmp[64]; char *step_list = NULL; List steps; ListIterator i; step_loc_t *stepd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { int fd; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; if (stepd_state(fd, stepd->protocol_version) == SLURMSTEPD_NOT_RUNNING) { debug("stale domain socket for stepd %u.%u ", stepd->jobid, stepd->stepid); close(fd); continue; } close(fd); if (step_list) xstrcat(step_list, ", "); if (stepd->stepid == NO_VAL) { snprintf(tmp, sizeof(tmp), "%u", stepd->jobid); xstrcat(step_list, tmp); } else { snprintf(tmp, sizeof(tmp), "%u.%u", stepd->jobid, stepd->stepid); xstrcat(step_list, tmp); } } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (step_list == NULL) xstrcat(step_list, "NONE"); return step_list; } static int _rpc_daemon_status(slurm_msg_t *msg) { slurm_msg_t resp_msg; slurmd_status_t *resp = NULL; resp = xmalloc(sizeof(slurmd_status_t)); resp->actual_cpus = conf->actual_cpus; resp->actual_boards = conf->actual_boards; resp->actual_sockets = conf->actual_sockets; resp->actual_cores = conf->actual_cores; resp->actual_threads = conf->actual_threads; resp->actual_real_mem = conf->real_memory_size; resp->actual_tmp_disk = conf->tmp_disk_space; resp->booted = startup; resp->hostname = xstrdup(conf->node_name); resp->step_list = _get_step_list(); resp->last_slurmctld_msg = last_slurmctld_msg; resp->pid = conf->pid; resp->slurmd_debug = conf->debug_level; resp->slurmd_logfile = xstrdup(conf->logfile); resp->version = xstrdup(SLURM_VERSION_STRING); slurm_msg_t_copy(&resp_msg, msg); resp_msg.msg_type = RESPONSE_SLURMD_STATUS; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_slurmd_status(resp); return SLURM_SUCCESS; } static int _rpc_stat_jobacct(slurm_msg_t *msg) { job_step_id_msg_t *req = (job_step_id_msg_t *)msg->data; slurm_msg_t resp_msg; job_step_stat_t *resp = NULL; int fd; uid_t req_uid, uid; uint16_t protocol_version; debug3("Entering _rpc_stat_jobacct"); /* step completion messages are only allowed from other slurmstepd, so only root or SlurmUser is allowed here */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->step_id, &protocol_version); if (fd == -1) { error("stepd_connect to %u.%u failed: %m", req->job_id, req->step_id); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return ESLURM_INVALID_JOB_ID; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("stat_jobacct couldn't read from the step %u.%u: %m", req->job_id, req->step_id); close(fd); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return ESLURM_INVALID_JOB_ID; } /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { error("stat_jobacct from uid %ld for job %u " "owned by uid %ld", (long) req_uid, req->job_id, (long) uid); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); close(fd); return ESLURM_USER_ID_MISSING;/* or bad in this case */ } } resp = xmalloc(sizeof(job_step_stat_t)); resp->step_pids = xmalloc(sizeof(job_step_pids_t)); resp->step_pids->node_name = xstrdup(conf->node_name); slurm_msg_t_copy(&resp_msg, msg); resp->return_code = SLURM_SUCCESS; if (stepd_stat_jobacct(fd, protocol_version, req, resp) == SLURM_ERROR) { debug("accounting for nonexistent job %u.%u requested", req->job_id, req->step_id); } /* FIX ME: This should probably happen in the stepd_stat_jobacct to get more information about the pids. */ if (stepd_list_pids(fd, protocol_version, &resp->step_pids->pid, &resp->step_pids->pid_cnt) == SLURM_ERROR) { debug("No pids for nonexistent job %u.%u requested", req->job_id, req->step_id); } close(fd); resp_msg.msg_type = RESPONSE_JOB_STEP_STAT; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_job_step_stat(resp); return SLURM_SUCCESS; } static int _callerid_find_job(callerid_conn_t conn, uint32_t *job_id) { ino_t inode; pid_t pid; int rc; rc = callerid_find_inode_by_conn(conn, &inode); if (rc != SLURM_SUCCESS) { debug3("network_callerid inode not found"); return ESLURM_INVALID_JOB_ID; } debug3("network_callerid found inode %lu", (long unsigned int)inode); rc = find_pid_by_inode(&pid, inode); if (rc != SLURM_SUCCESS) { debug3("network_callerid process not found"); return ESLURM_INVALID_JOB_ID; } debug3("network_callerid found process %d", (pid_t)pid); rc = slurm_pid2jobid(pid, job_id); if (rc != SLURM_SUCCESS) { debug3("network_callerid job not found"); return ESLURM_INVALID_JOB_ID; } debug3("network_callerid found job %u", *job_id); return SLURM_SUCCESS; } static int _rpc_network_callerid(slurm_msg_t *msg) { network_callerid_msg_t *req = (network_callerid_msg_t *)msg->data; slurm_msg_t resp_msg; network_callerid_resp_t *resp = NULL; uid_t req_uid = -1; uid_t job_uid = -1; uint32_t job_id = (uint32_t)NO_VAL; callerid_conn_t conn; int rc = ESLURM_INVALID_JOB_ID; char ip_src_str[INET6_ADDRSTRLEN]; char ip_dst_str[INET6_ADDRSTRLEN]; debug3("Entering _rpc_network_callerid"); resp = xmalloc(sizeof(network_callerid_resp_t)); slurm_msg_t_copy(&resp_msg, msg); /* Ideally this would be in an if block only when debug3 is enabled */ inet_ntop(req->af, req->ip_src, ip_src_str, INET6_ADDRSTRLEN); inet_ntop(req->af, req->ip_dst, ip_dst_str, INET6_ADDRSTRLEN); debug3("network_callerid checking %s:%u => %s:%u", ip_src_str, req->port_src, ip_dst_str, req->port_dst); /* My remote is the other's source */ memcpy((void*)&conn.ip_dst, (void*)&req->ip_src, 16); memcpy((void*)&conn.ip_src, (void*)&req->ip_dst, 16); conn.port_src = req->port_dst; conn.port_dst = req->port_src; conn.af = req->af; /* Find the job id */ rc = _callerid_find_job(conn, &job_id); if (rc == SLURM_SUCCESS) { /* We found the job */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if (!_slurm_authorized_user(req_uid)) { /* Requestor is not root or SlurmUser */ job_uid = _get_job_uid(job_id); if (job_uid != req_uid) { /* RPC call sent by non-root user who does not * own this job. Do not send them the job ID. */ error("Security violation, REQUEST_NETWORK_CALLERID from uid=%d", req_uid); job_id = NO_VAL; rc = ESLURM_INVALID_JOB_ID; } } } resp->job_id = job_id; resp->node_name = xstrdup(conf->node_name); resp_msg.msg_type = RESPONSE_NETWORK_CALLERID; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_network_callerid_resp(resp); return rc; } static int _rpc_list_pids(slurm_msg_t *msg) { job_step_id_msg_t *req = (job_step_id_msg_t *)msg->data; slurm_msg_t resp_msg; job_step_pids_t *resp = NULL; int fd; uid_t req_uid; uid_t job_uid; uint16_t protocol_version = 0; debug3("Entering _rpc_list_pids"); /* step completion messages are only allowed from other slurmstepd, * so only root or SlurmUser is allowed here */ req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); job_uid = _get_job_uid(req->job_id); if ((int)job_uid < 0) { error("stat_pid for invalid job_id: %u", req->job_id); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); return ESLURM_INVALID_JOB_ID; } /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != job_uid) && (!_slurm_authorized_user(req_uid))) { error("stat_pid from uid %ld for job %u " "owned by uid %ld", (long) req_uid, req->job_id, (long) job_uid); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return ESLURM_USER_ID_MISSING;/* or bad in this case */ } } resp = xmalloc(sizeof(job_step_pids_t)); slurm_msg_t_copy(&resp_msg, msg); resp->node_name = xstrdup(conf->node_name); resp->pid_cnt = 0; resp->pid = NULL; fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->step_id, &protocol_version); if (fd == -1) { error("stepd_connect to %u.%u failed: %m", req->job_id, req->step_id); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); slurm_free_job_step_pids(resp); return ESLURM_INVALID_JOB_ID; } if (stepd_list_pids(fd, protocol_version, &resp->pid, &resp->pid_cnt) == SLURM_ERROR) { debug("No pids for nonexistent job %u.%u requested", req->job_id, req->step_id); } close(fd); resp_msg.msg_type = RESPONSE_JOB_STEP_PIDS; resp_msg.data = resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_job_step_pids(resp); return SLURM_SUCCESS; } /* * For the specified job_id: reply to slurmctld, * sleep(configured kill_wait), then send SIGKILL */ static void _rpc_timelimit(slurm_msg_t *msg) { uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); kill_job_msg_t *req = msg->data; int nsteps, rc; if (!_slurm_authorized_user(uid)) { error ("Security violation: rpc_timelimit req from uid %d", uid); slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } /* * Indicate to slurmctld that we've received the message */ slurm_send_rc_msg(msg, SLURM_SUCCESS); slurm_close(msg->conn_fd); msg->conn_fd = -1; if (req->step_id != NO_VAL) { slurm_ctl_conf_t *cf; int delay; /* A jobstep has timed out: * - send the container a SIG_TIME_LIMIT or SIG_PREEMPTED * to log the event * - send a SIGCONT to resume any suspended tasks * - send a SIGTERM to begin termination * - sleep KILL_WAIT * - send a SIGKILL to clean up */ if (msg->msg_type == REQUEST_KILL_TIMELIMIT) { rc = _signal_jobstep(req->job_id, req->step_id, uid, SIG_TIME_LIMIT); } else { rc = _signal_jobstep(req->job_id, req->step_id, uid, SIG_PREEMPTED); } if (rc != SLURM_SUCCESS) return; rc = _signal_jobstep(req->job_id, req->step_id, uid, SIGCONT); if (rc != SLURM_SUCCESS) return; rc = _signal_jobstep(req->job_id, req->step_id, uid, SIGTERM); if (rc != SLURM_SUCCESS) return; cf = slurm_conf_lock(); delay = MAX(cf->kill_wait, 5); slurm_conf_unlock(); sleep(delay); _signal_jobstep(req->job_id, req->step_id, uid, SIGKILL); return; } if (msg->msg_type == REQUEST_KILL_TIMELIMIT) _kill_all_active_steps(req->job_id, SIG_TIME_LIMIT, true); else /* (msg->type == REQUEST_KILL_PREEMPTED) */ _kill_all_active_steps(req->job_id, SIG_PREEMPTED, true); nsteps = _kill_all_active_steps(req->job_id, SIGTERM, false); verbose( "Job %u: timeout: sent SIGTERM to %d active steps", req->job_id, nsteps ); /* Revoke credential, send SIGKILL, run epilog, etc. */ _rpc_terminate_job(msg); } static void _rpc_pid2jid(slurm_msg_t *msg) { job_id_request_msg_t *req = (job_id_request_msg_t *) msg->data; slurm_msg_t resp_msg; job_id_response_msg_t resp; bool found = false; List steps; ListIterator i; step_loc_t *stepd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { int fd; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; if (stepd_pid_in_container( fd, stepd->protocol_version, req->job_pid) || req->job_pid == stepd_daemon_pid( fd, stepd->protocol_version)) { slurm_msg_t_copy(&resp_msg, msg); resp.job_id = stepd->jobid; resp.return_code = SLURM_SUCCESS; found = true; close(fd); break; } close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (found) { debug3("_rpc_pid2jid: pid(%u) found in %u", req->job_pid, resp.job_id); resp_msg.address = msg->address; resp_msg.msg_type = RESPONSE_JOB_ID; resp_msg.data = &resp; slurm_send_node_msg(msg->conn_fd, &resp_msg); } else { debug3("_rpc_pid2jid: pid(%u) not found", req->job_pid); slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID); } } /* Validate sbcast credential. * NOTE: We can only perform the full credential validation once with * Munge without generating a credential replay error * RET SLURM_SUCCESS or an error code */ static int _valid_sbcast_cred(file_bcast_msg_t *req, uid_t req_uid, uint16_t block_no, uint32_t *job_id) { int rc = SLURM_SUCCESS; char *nodes = NULL; hostset_t hset = NULL; *job_id = NO_VAL; rc = extract_sbcast_cred(conf->vctx, req->cred, block_no, job_id, &nodes); if (rc != 0) { error("Security violation: Invalid sbcast_cred from uid %d", req_uid); return ESLURMD_INVALID_JOB_CREDENTIAL; } if (!(hset = hostset_create(nodes))) { error("Unable to parse sbcast_cred hostlist %s", nodes); rc = ESLURMD_INVALID_JOB_CREDENTIAL; } else if (!hostset_within(hset, conf->node_name)) { error("Security violation: sbcast_cred from %d has " "bad hostset %s", req_uid, nodes); rc = ESLURMD_INVALID_JOB_CREDENTIAL; } if (hset) hostset_destroy(hset); xfree(nodes); /* print_sbcast_cred(req->cred); */ return rc; } static void _fb_rdlock(void) { slurm_mutex_lock(&file_bcast_mutex); while (1) { if ((fb_write_wait_lock == 0) && (fb_write_lock == 0)) { fb_read_lock++; break; } else { /* wait for state change and retry */ pthread_cond_wait(&file_bcast_cond, &file_bcast_mutex); } } slurm_mutex_unlock(&file_bcast_mutex); } static void _fb_rdunlock(void) { slurm_mutex_lock(&file_bcast_mutex); fb_read_lock--; pthread_cond_broadcast(&file_bcast_cond); slurm_mutex_unlock(&file_bcast_mutex); } static void _fb_wrlock(void) { slurm_mutex_lock(&file_bcast_mutex); fb_write_wait_lock++; while (1) { if ((fb_read_lock == 0) && (fb_write_lock == 0)) { fb_write_lock++; fb_write_wait_lock--; break; } else { /* wait for state change and retry */ pthread_cond_wait(&file_bcast_cond, &file_bcast_mutex); } } slurm_mutex_unlock(&file_bcast_mutex); } static void _fb_wrunlock(void) { slurm_mutex_lock(&file_bcast_mutex); fb_write_lock--; pthread_cond_broadcast(&file_bcast_cond); slurm_mutex_unlock(&file_bcast_mutex); } static int _bcast_find_in_list(void *x, void *y) { file_bcast_info_t *info = (file_bcast_info_t *)x; file_bcast_info_t *key = (file_bcast_info_t *)y; /* uid, job_id, and fname must match */ return ((info->uid == key->uid) && (info->job_id == key->job_id) && (!xstrcmp(info->fname, key->fname))); } /* must have read lock */ static file_bcast_info_t *_bcast_lookup_file(file_bcast_info_t *key) { return list_find_first(file_bcast_list, _bcast_find_in_list, key); } /* must not have read lock, will get write lock */ static void _file_bcast_close_file(file_bcast_info_t *key) { _fb_wrlock(); list_delete_all(file_bcast_list, _bcast_find_in_list, key); _fb_wrunlock(); } static void _free_file_bcast_info_t(file_bcast_info_t *f) { xfree(f->fname); if (f->fd) close(f->fd); xfree(f); } static int _bcast_find_in_list_to_remove(void *x, void *y) { file_bcast_info_t *f = (file_bcast_info_t *)x; time_t *now = (time_t *) y; if (f->last_update + FILE_BCAST_TIMEOUT < *now) { error("Removing stalled file_bcast transfer from uid " "%u to file `%s`", f->uid, f->fname); return true; } return false; } /* remove transfers that have stalled */ static void _file_bcast_cleanup(void) { time_t now = time(NULL); _fb_wrlock(); list_delete_all(file_bcast_list, _bcast_find_in_list_to_remove, &now); _fb_wrunlock(); } void file_bcast_init(void) { /* skip locks during slurmd init */ file_bcast_list = list_create((ListDelF) _free_file_bcast_info_t); } void file_bcast_purge(void) { _fb_wrlock(); list_destroy(file_bcast_list); /* destroying list before exit, no need to unlock */ } static int _rpc_file_bcast(slurm_msg_t *msg) { int rc, offset, inx; file_bcast_info_t *file_info; file_bcast_msg_t *req = msg->data; file_bcast_info_t key; key.uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); key.gid = g_slurm_auth_get_gid(msg->auth_cred, conf->auth_info); key.fname = req->fname; rc = _valid_sbcast_cred(req, key.uid, req->block_no, &key.job_id); if ((rc != SLURM_SUCCESS) && !_slurm_authorized_user(key.uid)) return rc; #if 0 info("last_block=%u force=%u modes=%o", req->last_block, req->force, req->modes); info("uid=%u gid=%u atime=%lu mtime=%lu block_len[0]=%u", req->uid, req->gid, req->atime, req->mtime, req->block_len); #if 0 /* when the file being transferred is binary, the following line * can break the terminal output for slurmd */ info("req->block[0]=%s, @ %lu", \ req->block[0], (unsigned long) &req->block); #endif #endif if (req->block_no == 1) { info("sbcast req_uid=%u job_id=%u fname=%s block_no=%u", key.uid, key.job_id, key.fname, req->block_no); } else { debug("sbcast req_uid=%u job_id=%u fname=%s block_no=%u", key.uid, key.job_id, key.fname, req->block_no); } /* first block must register the file and open fd/mmap */ if (req->block_no == 1) { if ((rc = _file_bcast_register_file(msg, &key))) return rc; } _fb_rdlock(); if (!(file_info = _bcast_lookup_file(&key))) { error("No registered file transfer for uid %u file `%s`.", key.uid, key.fname); _fb_rdunlock(); return SLURM_ERROR; } /* now decompress file */ if (bcast_decompress_data(req) < 0) { error("sbcast: data decompression error for UID %u, file %s", key.uid, key.fname); _fb_rdunlock(); return SLURM_FAILURE; } offset = 0; while (req->block_len - offset) { inx = write(file_info->fd, &req->block[offset], (req->block_len - offset)); if (inx == -1) { if ((errno == EINTR) || (errno == EAGAIN)) continue; error("sbcast: uid:%u can't write `%s`: %m", key.uid, key.fname); _fb_rdunlock(); return SLURM_FAILURE; } offset += inx; } file_info->last_update = time(NULL); if (req->last_block && fchmod(file_info->fd, (req->modes & 0777))) { error("sbcast: uid:%u can't chmod `%s`: %m", key.uid, key.fname); } if (req->last_block && fchown(file_info->fd, key.uid, key.gid)) { error("sbcast: uid:%u gid:%u can't chown `%s`: %m", key.uid, key.gid, key.fname); } if (req->last_block && req->atime) { struct utimbuf time_buf; time_buf.actime = req->atime; time_buf.modtime = req->mtime; if (utime(key.fname, &time_buf)) { error("sbcast: uid:%u can't utime `%s`: %m", key.uid, key.fname); } } _fb_rdunlock(); if (req->last_block) { _file_bcast_close_file(&key); } return SLURM_SUCCESS; } /* pass an open file descriptor back to the parent process */ static void _send_back_fd(int socket, int fd) { struct msghdr msg = { 0 }; struct cmsghdr *cmsg; char buf[CMSG_SPACE(sizeof(fd))]; memset(buf, '\0', sizeof(buf)); msg.msg_iov = NULL; msg.msg_iovlen = 0; msg.msg_control = buf; msg.msg_controllen = sizeof(buf); cmsg = CMSG_FIRSTHDR(&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN(sizeof(fd)); memmove(CMSG_DATA(cmsg), &fd, sizeof(fd)); msg.msg_controllen = cmsg->cmsg_len; if (sendmsg(socket, &msg, 0) < 0) error("%s: failed to send fd: %m", __func__); } /* receive an open file descriptor from fork()'d child over unix socket */ static int _receive_fd(int socket) { struct msghdr msg = {0}; struct cmsghdr *cmsg; int fd; msg.msg_iov = NULL; msg.msg_iovlen = 0; char c_buffer[256]; msg.msg_control = c_buffer; msg.msg_controllen = sizeof(c_buffer); if (recvmsg(socket, &msg, 0) < 0) { error("%s: failed to receive fd: %m", __func__); return -1; } cmsg = CMSG_FIRSTHDR(&msg); memmove(&fd, CMSG_DATA(cmsg), sizeof(fd)); return fd; } static int _file_bcast_register_file(slurm_msg_t *msg, file_bcast_info_t *key) { file_bcast_msg_t *req = msg->data; int fd, flags, rc; int pipe[2]; gids_t *gids; pid_t child; file_bcast_info_t *file_info; if (!(gids = _gids_cache_lookup(req->user_name, key->gid))) { error("sbcast: gids_cache_lookup for %s failed", req->user_name); return SLURM_ERROR; } if ((rc = container_g_create(key->job_id))) { error("sbcast: container_g_create(%u): %m", key->job_id); _dealloc_gids(gids); return rc; } /* child process will setuid to the user, register the process * with the container, and open the file for us. */ if (socketpair(AF_UNIX, SOCK_DGRAM, 0, pipe) != 0) { error("%s: Failed to open pipe: %m", __func__); _dealloc_gids(gids); return SLURM_ERROR; } child = fork(); if (child == -1) { error("sbcast: fork failure"); _dealloc_gids(gids); close(pipe[0]); close(pipe[1]); return errno; } else if (child > 0) { /* get fd back from pipe */ close(pipe[0]); waitpid(child, &rc, 0); _dealloc_gids(gids); if (rc) { close(pipe[1]); return WEXITSTATUS(rc); } fd = _receive_fd(pipe[1]); close(pipe[1]); file_info = xmalloc(sizeof(file_bcast_info_t)); file_info->fd = fd; file_info->fname = xstrdup(req->fname); file_info->uid = key->uid; file_info->gid = key->gid; file_info->job_id = key->job_id; file_info->start_time = time(NULL); //TODO: mmap the file here _fb_wrlock(); list_append(file_bcast_list, file_info); _fb_wrunlock(); return SLURM_SUCCESS; } /* child process below here */ close(pipe[1]); /* container_g_add_pid needs to be called in the forked process part of the fork to avoid a race condition where if this process makes a file or detacts itself from a child before we add the pid to the container in the parent of the fork. */ if (container_g_add_pid(key->job_id, getpid(), key->uid)) { error("container_g_add_pid(%u): %m", key->job_id); exit(SLURM_ERROR); } /* The child actually performs the I/O and exits with * a return code, do not return! */ /*********************************************************************\ * NOTE: It would be best to do an exec() immediately after the fork() * in order to help prevent a possible deadlock in the child process * due to locks being set at the time of the fork and being freed by * the parent process, but not freed by the child process. Performing * the work inline is done for simplicity. Note that the logging * performed by error() should be safe due to the use of * atfork_install_handlers() as defined in src/common/log.c. * Change the code below with caution. \*********************************************************************/ if (setgroups(gids->ngids, gids->gids) < 0) { error("sbcast: uid: %u setgroups failed: %m", key->uid); exit(errno); } _dealloc_gids(gids); if (setgid(key->gid) < 0) { error("sbcast: uid:%u setgid(%u): %m", key->uid, key->gid); exit(errno); } if (setuid(key->uid) < 0) { error("sbcast: getuid(%u): %m", key->uid); exit(errno); } flags = O_WRONLY | O_CREAT; if (req->force) flags |= O_TRUNC; else flags |= O_EXCL; fd = open(key->fname, flags, 0700); if (fd == -1) { error("sbcast: uid:%u can't open `%s`: %m", key->uid, key->fname); exit(errno); } _send_back_fd(pipe[0], fd); close(fd); exit(SLURM_SUCCESS); } static void _rpc_reattach_tasks(slurm_msg_t *msg) { reattach_tasks_request_msg_t *req = msg->data; reattach_tasks_response_msg_t *resp = xmalloc(sizeof(reattach_tasks_response_msg_t)); slurm_msg_t resp_msg; int rc = SLURM_SUCCESS; uint16_t port = 0; char host[MAXHOSTNAMELEN]; slurm_addr_t ioaddr; void *job_cred_sig; uint32_t len; int fd; uid_t req_uid; slurm_addr_t *cli = &msg->orig_addr; uint32_t nodeid = (uint32_t)NO_VAL; uid_t uid = -1; uint16_t protocol_version; slurm_msg_t_copy(&resp_msg, msg); fd = stepd_connect(conf->spooldir, conf->node_name, req->job_id, req->job_step_id, &protocol_version); if (fd == -1) { debug("reattach for nonexistent job %u.%u stepd_connect" " failed: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done; } if ((int)(uid = stepd_get_uid(fd, protocol_version)) < 0) { debug("_rpc_reattach_tasks couldn't read from the " "step %u.%u: %m", req->job_id, req->job_step_id); rc = ESLURM_INVALID_JOB_ID; goto done2; } nodeid = stepd_get_nodeid(fd, protocol_version); debug2("_rpc_reattach_tasks: nodeid %d in the job step", nodeid); req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) { error("uid %ld attempt to attach to job %u.%u owned by %ld", (long) req_uid, req->job_id, req->job_step_id, (long) uid); rc = EPERM; goto done2; } memset(resp, 0, sizeof(reattach_tasks_response_msg_t)); slurm_get_ip_str(cli, &port, host, sizeof(host)); /* * Set response address by resp_port and client address */ memcpy(&resp_msg.address, cli, sizeof(slurm_addr_t)); if (req->num_resp_port > 0) { port = req->resp_port[nodeid % req->num_resp_port]; slurm_set_addr(&resp_msg.address, port, NULL); } /* * Set IO address by io_port and client address */ memcpy(&ioaddr, cli, sizeof(slurm_addr_t)); if (req->num_io_port > 0) { port = req->io_port[nodeid % req->num_io_port]; slurm_set_addr(&ioaddr, port, NULL); } /* * Get the signature of the job credential. slurmstepd will need * this to prove its identity when it connects back to srun. */ slurm_cred_get_signature(req->cred, (char **)(&job_cred_sig), &len); if (len != SLURM_IO_KEY_SIZE) { error("Incorrect slurm cred signature length"); goto done2; } resp->gtids = NULL; resp->local_pids = NULL; /* NOTE: We need to use the protocol_version from * sattach here since responses will be sent back to it. */ if (msg->protocol_version < protocol_version) protocol_version = msg->protocol_version; /* Following call fills in gtids and local_pids when successful. */ rc = stepd_attach(fd, protocol_version, &ioaddr, &resp_msg.address, job_cred_sig, resp); if (rc != SLURM_SUCCESS) { debug2("stepd_attach call failed"); goto done2; } done2: close(fd); done: debug2("update step addrs rc = %d", rc); resp_msg.data = resp; resp_msg.msg_type = RESPONSE_REATTACH_TASKS; resp->node_name = xstrdup(conf->node_name); resp->return_code = rc; debug2("node %s sending rc = %d", conf->node_name, rc); slurm_send_node_msg(msg->conn_fd, &resp_msg); slurm_free_reattach_tasks_response_msg(resp); } static uid_t _get_job_uid(uint32_t jobid) { List steps; ListIterator i; step_loc_t *stepd; uid_t uid = -1; int fd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != jobid) { /* multiple jobs expected on shared nodes */ continue; } fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } uid = stepd_get_uid(fd, stepd->protocol_version); close(fd); if ((int)uid < 0) { debug("stepd_get_uid failed %u.%u: %m", stepd->jobid, stepd->stepid); continue; } break; } list_iterator_destroy(i); FREE_NULL_LIST(steps); return uid; } /* * _kill_all_active_steps - signals the container of all steps of a job * jobid IN - id of job to signal * sig IN - signal to send * batch IN - if true signal batch script, otherwise skip it * RET count of signaled job steps (plus batch script, if applicable) */ static int _kill_all_active_steps(uint32_t jobid, int sig, bool batch) { List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; int fd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != jobid) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u (this jobid=%u)", stepd->jobid, jobid); continue; } if ((stepd->stepid == SLURM_BATCH_SCRIPT) && (!batch)) continue; step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } debug2("container signal %d to job %u.%u", sig, jobid, stepd->stepid); if (stepd_signal_container( fd, stepd->protocol_version, sig) < 0) debug("kill jobid=%u failed: %m", jobid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (step_cnt == 0) debug2("No steps in jobid %u to send signal %d", jobid, sig); return step_cnt; } /* * _terminate_all_steps - signals the container of all steps of a job * jobid IN - id of job to signal * batch IN - if true signal batch script, otherwise skip it * RET count of signaled job steps (plus batch script, if applicable) */ static int _terminate_all_steps(uint32_t jobid, bool batch) { List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; int fd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != jobid) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u (this jobid=%u)", stepd->jobid, jobid); continue; } if ((stepd->stepid == SLURM_BATCH_SCRIPT) && (!batch)) continue; step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } debug2("terminate job step %u.%u", jobid, stepd->stepid); if (stepd_terminate(fd, stepd->protocol_version) < 0) debug("kill jobid=%u.%u failed: %m", jobid, stepd->stepid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); if (step_cnt == 0) debug2("No steps in job %u to terminate", jobid); return step_cnt; } static bool _job_still_running(uint32_t job_id) { bool retval = false; List steps; ListIterator i; step_loc_t *s = NULL; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((s = list_next(i))) { if (s->jobid == job_id) { int fd; fd = stepd_connect(s->directory, s->nodename, s->jobid, s->stepid, &s->protocol_version); if (fd == -1) continue; if (stepd_state(fd, s->protocol_version) != SLURMSTEPD_NOT_RUNNING) { retval = true; close(fd); break; } close(fd); } } list_iterator_destroy(i); FREE_NULL_LIST(steps); return retval; } /* * Wait until all job steps are in SLURMSTEPD_NOT_RUNNING state. * This indicates that switch_g_job_postfini has completed and * freed the switch windows (as needed only for Federation switch). */ static void _wait_state_completed(uint32_t jobid, int max_delay) { int i; for (i=0; i<max_delay; i++) { if (_steps_completed_now(jobid)) break; sleep(1); } if (i >= max_delay) error("timed out waiting for job %u to complete", jobid); } static bool _steps_completed_now(uint32_t jobid) { List steps; ListIterator i; step_loc_t *stepd; bool rc = true; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid == jobid) { int fd; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) continue; if (stepd_state(fd, stepd->protocol_version) != SLURMSTEPD_NOT_RUNNING) { rc = false; close(fd); break; } close(fd); } } list_iterator_destroy(i); FREE_NULL_LIST(steps); return rc; } static void _epilog_complete_msg_setup( slurm_msg_t *msg, epilog_complete_msg_t *req, uint32_t jobid, int rc) { slurm_msg_t_init(msg); memset(req, 0, sizeof(epilog_complete_msg_t)); req->job_id = jobid; req->return_code = rc; req->node_name = conf->node_name; msg->msg_type = MESSAGE_EPILOG_COMPLETE; msg->data = req; } /* * Send epilog complete message to currently active controller. * If enabled, use message aggregation. * Returns SLURM_SUCCESS if message sent successfully, * SLURM_FAILURE if epilog complete message fails to be sent. */ static int _epilog_complete(uint32_t jobid, int rc) { int ret = SLURM_SUCCESS; if (conf->msg_aggr_window_msgs > 1) { /* message aggregation is enabled */ slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t)); epilog_complete_msg_t *req = xmalloc(sizeof(epilog_complete_msg_t)); _epilog_complete_msg_setup(msg, req, jobid, rc); /* we need to copy this symbol */ req->node_name = xstrdup(conf->node_name); msg_aggr_add_msg(msg, 0, NULL); } else { slurm_msg_t msg; epilog_complete_msg_t req; _epilog_complete_msg_setup(&msg, &req, jobid, rc); /* Note: No return code to message, slurmctld will resend * TERMINATE_JOB request if message send fails */ if (slurm_send_only_controller_msg(&msg) < 0) { error("Unable to send epilog complete message: %m"); ret = SLURM_ERROR; } else { debug("Job %u: sent epilog complete msg: rc = %d", jobid, rc); } } return ret; } /* * Send a signal through the appropriate slurmstepds for each job step * belonging to a given job allocation. */ static void _rpc_signal_job(slurm_msg_t *msg) { signal_job_msg_t *req = msg->data; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); uid_t job_uid; List steps; ListIterator i; step_loc_t *stepd = NULL; int step_cnt = 0; int fd; debug("_rpc_signal_job, uid = %d, signal = %d", req_uid, req->signal); job_uid = _get_job_uid(req->job_id); if ((int)job_uid < 0) goto no_job; /* * check that requesting user ID is the SLURM UID or root */ if ((req_uid != job_uid) && (!_slurm_authorized_user(req_uid))) { error("Security violation: kill_job(%u) from uid %d", req->job_id, req_uid); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); if (slurm_close(msg->conn_fd) < 0) error ("_rpc_signal_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } return; } /* * Loop through all job steps for this job and signal the * step's process group through the slurmstepd. */ steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { if (stepd->jobid != req->job_id) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u (this jobid=%u)", stepd->jobid, req->job_id); continue; } if (stepd->stepid == SLURM_BATCH_SCRIPT) { debug2("batch script itself not signalled"); continue; } step_cnt++; fd = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &stepd->protocol_version); if (fd == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } debug2(" signal %d to job %u.%u", req->signal, stepd->jobid, stepd->stepid); if (stepd_signal_container( fd, stepd->protocol_version, req->signal) < 0) debug("signal jobid=%u failed: %m", stepd->jobid); close(fd); } list_iterator_destroy(i); FREE_NULL_LIST(steps); no_job: if (step_cnt == 0) { debug2("No steps in jobid %u to send signal %d", req->job_id, req->signal); } /* * At this point, if connection still open, we send controller * a "success" reply to indicate that we've recvd the msg. */ if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ("_rpc_signal_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } } /* if a lock is granted to the job then return 1; else return 0 if * the lock for the job is already taken or there's no more locks */ static int _get_suspend_job_lock(uint32_t job_id) { static bool logged = false; int i, empty_loc = -1, rc = 0; slurm_mutex_lock(&suspend_mutex); for (i = 0; i < job_suspend_size; i++) { if (job_suspend_array[i] == 0) { empty_loc = i; continue; } if (job_suspend_array[i] == job_id) { /* another thread already a lock for this job ID */ slurm_mutex_unlock(&suspend_mutex); return rc; } } if (empty_loc != -1) { /* nobody has the lock and here's an available used lock */ job_suspend_array[empty_loc] = job_id; rc = 1; } else if (job_suspend_size < NUM_PARALLEL_SUSP_JOBS) { /* a new lock is available */ job_suspend_array[job_suspend_size++] = job_id; rc = 1; } else if (!logged) { error("Simultaneous job suspend/resume limit reached (%d). " "Configure SchedulerTimeSlice higher.", NUM_PARALLEL_SUSP_JOBS); logged = true; } slurm_mutex_unlock(&suspend_mutex); return rc; } static void _unlock_suspend_job(uint32_t job_id) { int i; slurm_mutex_lock(&suspend_mutex); for (i = 0; i < job_suspend_size; i++) { if (job_suspend_array[i] == job_id) job_suspend_array[i] = 0; } slurm_mutex_unlock(&suspend_mutex); } /* Add record for every launched job so we know they are ready for suspend */ extern void record_launched_jobs(void) { List steps; ListIterator i; step_loc_t *stepd; steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while ((stepd = list_next(i))) { _launch_complete_add(stepd->jobid); } list_iterator_destroy(i); FREE_NULL_LIST(steps); } /* * Send a job suspend/resume request through the appropriate slurmstepds for * each job step belonging to a given job allocation. */ static void _rpc_suspend_job(slurm_msg_t *msg) { int time_slice = -1; suspend_int_msg_t *req = msg->data; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); List steps; ListIterator i; step_loc_t *stepd; int step_cnt = 0; int rc = SLURM_SUCCESS; DEF_TIMERS; if (time_slice == -1) time_slice = slurm_get_time_slice(); if ((req->op != SUSPEND_JOB) && (req->op != RESUME_JOB)) { error("REQUEST_SUSPEND_INT: bad op code %u", req->op); rc = ESLURM_NOT_SUPPORTED; } /* * check that requesting user ID is the SLURM UID or root */ if (!_slurm_authorized_user(req_uid)) { error("Security violation: suspend_job(%u) from uid %d", req->job_id, req_uid); rc = ESLURM_USER_ID_MISSING; } /* send a response now, which will include any errors * detected with the request */ if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, rc); if (slurm_close(msg->conn_fd) < 0) error("_rpc_suspend_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } if (rc != SLURM_SUCCESS) return; /* now we can focus on performing the requested action, * which could take a few seconds to complete */ debug("_rpc_suspend_job jobid=%u uid=%d action=%s", req->job_id, req_uid, req->op == SUSPEND_JOB ? "suspend" : "resume"); /* Try to get a thread lock for this job. If the lock * is not available then sleep and try again */ while (!_get_suspend_job_lock(req->job_id)) { debug3("suspend lock sleep for %u", req->job_id); usleep(10000); } START_TIMER; /* Defer suspend until job prolog and launch complete */ if (req->op == SUSPEND_JOB) _launch_complete_wait(req->job_id); if ((req->op == SUSPEND_JOB) && (req->indf_susp)) switch_g_job_suspend(req->switch_info, 5); /* Release or reclaim resources bound to these tasks (task affinity) */ if (req->op == SUSPEND_JOB) { (void) task_g_slurmd_suspend_job(req->job_id); } else { (void) task_g_slurmd_resume_job(req->job_id); } /* * Loop through all job steps and call stepd_suspend or stepd_resume * as appropriate. Since the "suspend" action may contains a sleep * (if the launch is in progress) suspend multiple jobsteps in parallel. */ steps = stepd_available(conf->spooldir, conf->node_name); i = list_iterator_create(steps); while (1) { int x, fdi, fd[NUM_PARALLEL_SUSP_STEPS]; uint16_t protocol_version[NUM_PARALLEL_SUSP_STEPS]; fdi = 0; while ((stepd = list_next(i))) { if (stepd->jobid != req->job_id) { /* multiple jobs expected on shared nodes */ debug3("Step from other job: jobid=%u " "(this jobid=%u)", stepd->jobid, req->job_id); continue; } step_cnt++; fd[fdi] = stepd_connect(stepd->directory, stepd->nodename, stepd->jobid, stepd->stepid, &protocol_version[fdi]); if (fd[fdi] == -1) { debug3("Unable to connect to step %u.%u", stepd->jobid, stepd->stepid); continue; } fdi++; if (fdi >= NUM_PARALLEL_SUSP_STEPS) break; } /* check for open connections */ if (fdi == 0) break; if (req->op == SUSPEND_JOB) { int susp_fail_count = 0; /* The suspend RPCs are processed in parallel for * every step in the job */ for (x = 0; x < fdi; x++) { (void) stepd_suspend(fd[x], protocol_version[x], req, 0); } for (x = 0; x < fdi; x++) { if (stepd_suspend(fd[x], protocol_version[x], req, 1) < 0) { susp_fail_count++; } else { close(fd[x]); fd[x] = -1; } } /* Suspend RPCs can fail at step startup, so retry */ if (susp_fail_count) { sleep(1); for (x = 0; x < fdi; x++) { if (fd[x] == -1) continue; (void) stepd_suspend( fd[x], protocol_version[x], req, 0); if (stepd_suspend( fd[x], protocol_version[x], req, 1) >= 0) continue; debug("Suspend of job %u failed: %m", req->job_id); } } } else { /* The resume RPCs are processed in parallel for * every step in the job */ for (x = 0; x < fdi; x++) { (void) stepd_resume(fd[x], protocol_version[x], req, 0); } for (x = 0; x < fdi; x++) { if (stepd_resume(fd[x], protocol_version[x], req, 1) < 0) { debug("Resume of job %u failed: %m", req->job_id); } } } for (x = 0; x < fdi; x++) { /* fd may have been closed by stepd_suspend */ if (fd[x] != -1) close(fd[x]); } /* check for no more jobs */ if (fdi < NUM_PARALLEL_SUSP_STEPS) break; } list_iterator_destroy(i); FREE_NULL_LIST(steps); if ((req->op == RESUME_JOB) && (req->indf_susp)) switch_g_job_resume(req->switch_info, 5); _unlock_suspend_job(req->job_id); END_TIMER; if (DELTA_TIMER >= (time_slice * 1000000)) { if (req->op == SUSPEND_JOB) { info("Suspend time for job_id %u was %s. " "Configure SchedulerTimeSlice higher.", req->job_id, TIME_STR); } else { info("Resume time for job_id %u was %s. " "Configure SchedulerTimeSlice higher.", req->job_id, TIME_STR); } } if (step_cnt == 0) { debug2("No steps in jobid %u to suspend/resume", req->job_id); } } /* Job shouldn't even be running here, abort it immediately */ static void _rpc_abort_job(slurm_msg_t *msg) { kill_job_msg_t *req = msg->data; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); job_env_t job_env; debug("_rpc_abort_job, uid = %d", uid); /* * check that requesting user ID is the SLURM UID */ if (!_slurm_authorized_user(uid)) { error("Security violation: abort_job(%u) from uid %d", req->job_id, uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } task_g_slurmd_release_resources(req->job_id); /* * "revoke" all future credentials for this jobid */ if (slurm_cred_revoke(conf->vctx, req->job_id, req->time, req->start_time) < 0) { debug("revoking cred for job %u: %m", req->job_id); } else { save_cred_state(conf->vctx); debug("credential for job %u revoked", req->job_id); } /* * At this point, if connection still open, we send controller * a "success" reply to indicate that we've recvd the msg. */ if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ("rpc_abort_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } if (_kill_all_active_steps(req->job_id, SIG_ABORT, true)) { /* * Block until all user processes are complete. */ _pause_for_job_completion (req->job_id, req->nodes, 0); } /* * Begin expiration period for cached information about job. * If expiration period has already begun, then do not run * the epilog again, as that script has already been executed * for this job. */ if (slurm_cred_begin_expiration(conf->vctx, req->job_id) < 0) { debug("Not running epilog for jobid %d: %m", req->job_id); return; } save_cred_state(conf->vctx); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.node_list = req->nodes; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->job_uid; #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif _run_epilog(&job_env); if (container_g_delete(req->job_id)) error("container_g_delete(%u): %m", req->job_id); _launch_complete_rm(req->job_id); xfree(job_env.resv_id); } /* This is a variant of _rpc_terminate_job for use with select/serial */ static void _rpc_terminate_batch_job(uint32_t job_id, uint32_t user_id, char *node_name) { int rc = SLURM_SUCCESS; int nsteps = 0; int delay; time_t now = time(NULL); slurm_ctl_conf_t *cf; job_env_t job_env; task_g_slurmd_release_resources(job_id); if (_waiter_init(job_id) == SLURM_ERROR) return; /* * "revoke" all future credentials for this jobid */ _note_batch_job_finished(job_id); if (slurm_cred_revoke(conf->vctx, job_id, now, now) < 0) { debug("revoking cred for job %u: %m", job_id); } else { save_cred_state(conf->vctx); debug("credential for job %u revoked", job_id); } /* * Tasks might be stopped (possibly by a debugger) * so send SIGCONT first. */ _kill_all_active_steps(job_id, SIGCONT, true); if (errno == ESLURMD_STEP_SUSPENDED) { /* * If the job step is currently suspended, we don't * bother with a "nice" termination. */ debug2("Job is currently suspended, terminating"); nsteps = _terminate_all_steps(job_id, true); } else { nsteps = _kill_all_active_steps(job_id, SIGTERM, true); } #ifndef HAVE_AIX if ((nsteps == 0) && !conf->epilog) { slurm_cred_begin_expiration(conf->vctx, job_id); save_cred_state(conf->vctx); _waiter_complete(job_id); if (container_g_delete(job_id)) error("container_g_delete(%u): %m", job_id); _launch_complete_rm(job_id); return; } #endif /* * Check for corpses */ cf = slurm_conf_lock(); delay = MAX(cf->kill_wait, 5); slurm_conf_unlock(); if (!_pause_for_job_completion(job_id, NULL, delay) && _terminate_all_steps(job_id, true) ) { /* * Block until all user processes are complete. */ _pause_for_job_completion(job_id, NULL, 0); } /* * Begin expiration period for cached information about job. * If expiration period has already begun, then do not run * the epilog again, as that script has already been executed * for this job. */ if (slurm_cred_begin_expiration(conf->vctx, job_id) < 0) { debug("Not running epilog for jobid %d: %m", job_id); goto done; } save_cred_state(conf->vctx); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = job_id; job_env.node_list = node_name; job_env.uid = (uid_t)user_id; /* NOTE: We lack the job's SPANK environment variables */ rc = _run_epilog(&job_env); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] epilog failed status=%d:%d", job_id, exit_status, term_sig); } else debug("completed epilog for jobid %u", job_id); if (container_g_delete(job_id)) error("container_g_delete(%u): %m", job_id); _launch_complete_rm(job_id); done: _wait_state_completed(job_id, 5); _waiter_complete(job_id); } static void _handle_old_batch_job_launch(slurm_msg_t *msg) { if (msg->msg_type != REQUEST_BATCH_JOB_LAUNCH) { error("_handle_batch_job_launch: " "Invalid response msg_type (%u)", msg->msg_type); return; } /* (resp_msg.msg_type == REQUEST_BATCH_JOB_LAUNCH) */ debug2("Processing RPC: REQUEST_BATCH_JOB_LAUNCH"); last_slurmctld_msg = time(NULL); _rpc_batch_job(msg, false); slurm_free_job_launch_msg(msg->data); msg->data = NULL; } /* This complete batch RPC came from slurmstepd because we have select/serial * configured. Terminate the job here. Forward the batch completion RPC to * slurmctld and possible get a new batch launch RPC in response. */ static void _rpc_complete_batch(slurm_msg_t *msg) { int i, rc, msg_rc; slurm_msg_t resp_msg; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); complete_batch_script_msg_t *req = msg->data; static int running_serial = -1; uint16_t msg_type; if (running_serial == -1) { char *select_type = slurm_get_select_type(); if (!xstrcmp(select_type, "select/serial")) running_serial = 1; else running_serial = 0; xfree(select_type); } if (!_slurm_authorized_user(uid)) { error("Security violation: complete_batch(%u) from uid %d", req->job_id, uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } slurm_send_rc_msg(msg, SLURM_SUCCESS); if (running_serial) { _rpc_terminate_batch_job( req->job_id, req->user_id, req->node_name); msg_type = REQUEST_COMPLETE_BATCH_JOB; } else msg_type = msg->msg_type; for (i = 0; i <= MAX_RETRY; i++) { if (conf->msg_aggr_window_msgs > 1) { slurm_msg_t *req_msg = xmalloc_nz(sizeof(slurm_msg_t)); slurm_msg_t_init(req_msg); req_msg->msg_type = msg_type; req_msg->data = msg->data; msg->data = NULL; msg_aggr_add_msg(req_msg, 1, _handle_old_batch_job_launch); return; } else { slurm_msg_t req_msg; slurm_msg_t_init(&req_msg); req_msg.msg_type = msg_type; req_msg.data = msg->data; msg_rc = slurm_send_recv_controller_msg( &req_msg, &resp_msg); if (msg_rc == SLURM_SUCCESS) break; } info("Retrying job complete RPC for job %u", req->job_id); sleep(RETRY_DELAY); } if (i > MAX_RETRY) { error("Unable to send job complete message: %m"); return; } if (resp_msg.msg_type == RESPONSE_SLURM_RC) { last_slurmctld_msg = time(NULL); rc = ((return_code_msg_t *) resp_msg.data)->return_code; slurm_free_return_code_msg(resp_msg.data); if (rc) { error("complete_batch for job %u: %s", req->job_id, slurm_strerror(rc)); } return; } _handle_old_batch_job_launch(&resp_msg); } static void _rpc_terminate_job(slurm_msg_t *msg) { #ifndef HAVE_AIX bool have_spank = false; #endif int rc = SLURM_SUCCESS; kill_job_msg_t *req = msg->data; uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); int nsteps = 0; int delay; // slurm_ctl_conf_t *cf; // struct stat stat_buf; job_env_t job_env; debug("_rpc_terminate_job, uid = %d", uid); /* * check that requesting user ID is the SLURM UID */ if (!_slurm_authorized_user(uid)) { error("Security violation: kill_job(%u) from uid %d", req->job_id, uid); if (msg->conn_fd >= 0) slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING); return; } task_g_slurmd_release_resources(req->job_id); /* * Initialize a "waiter" thread for this jobid. If another * thread is already waiting on termination of this job, * _waiter_init() will return SLURM_ERROR. In this case, just * notify slurmctld that we recvd the message successfully, * then exit this thread. */ if (_waiter_init(req->job_id) == SLURM_ERROR) { if (msg->conn_fd >= 0) { /* No matter if the step hasn't started yet or * not just send a success to let the * controller know we got this request. */ slurm_send_rc_msg (msg, SLURM_SUCCESS); } return; } /* * "revoke" all future credentials for this jobid */ if (slurm_cred_revoke(conf->vctx, req->job_id, req->time, req->start_time) < 0) { debug("revoking cred for job %u: %m", req->job_id); } else { save_cred_state(conf->vctx); debug("credential for job %u revoked", req->job_id); } /* * Before signalling steps, if the job has any steps that are still * in the process of fork/exec/check in with slurmd, wait on a condition * var for the start. Otherwise a slow-starting step can miss the * job termination message and run indefinitely. */ if (_step_is_starting(req->job_id, NO_VAL)) { if (msg->conn_fd >= 0) { /* If the step hasn't started yet just send a * success to let the controller know we got * this request. */ debug("sent SUCCESS, waiting for step to start"); slurm_send_rc_msg (msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ( "rpc_kill_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } if (_wait_for_starting_step(req->job_id, NO_VAL)) { /* * There's currently no case in which we enter this * error condition. If there was, it's hard to say * whether to to proceed with the job termination. */ error("Error in _wait_for_starting_step"); } } if (IS_JOB_NODE_FAILED(req)) _kill_all_active_steps(req->job_id, SIG_NODE_FAIL, true); if (IS_JOB_PENDING(req)) _kill_all_active_steps(req->job_id, SIG_REQUEUED, true); else if (IS_JOB_FAILED(req)) _kill_all_active_steps(req->job_id, SIG_FAILURE, true); /* * Tasks might be stopped (possibly by a debugger) * so send SIGCONT first. */ _kill_all_active_steps(req->job_id, SIGCONT, true); if (errno == ESLURMD_STEP_SUSPENDED) { /* * If the job step is currently suspended, we don't * bother with a "nice" termination. */ debug2("Job is currently suspended, terminating"); nsteps = _terminate_all_steps(req->job_id, true); } else { nsteps = _kill_all_active_steps(req->job_id, SIGTERM, true); } #ifndef HAVE_AIX if ((nsteps == 0) && !conf->epilog) { struct stat stat_buf; if (conf->plugstack && (stat(conf->plugstack, &stat_buf) == 0)) have_spank = true; } /* * If there are currently no active job steps and no * configured epilog to run, bypass asynchronous reply and * notify slurmctld that we have already completed this * request. We need to send current switch state on AIX * systems, so this bypass can not be used. */ if ((nsteps == 0) && !conf->epilog && !have_spank) { debug4("sent ALREADY_COMPLETE"); if (msg->conn_fd >= 0) { slurm_send_rc_msg(msg, ESLURMD_KILL_JOB_ALREADY_COMPLETE); } slurm_cred_begin_expiration(conf->vctx, req->job_id); save_cred_state(conf->vctx); _waiter_complete(req->job_id); /* * The controller needs to get MESSAGE_EPILOG_COMPLETE to bring * the job out of "completing" state. Otherwise, the job * could remain "completing" unnecessarily, until the request * to terminate is resent. */ _sync_messages_kill(req); if (msg->conn_fd < 0) { /* The epilog complete message processing on * slurmctld is equivalent to that of a * ESLURMD_KILL_JOB_ALREADY_COMPLETE reply above */ _epilog_complete(req->job_id, rc); } if (container_g_delete(req->job_id)) error("container_g_delete(%u): %m", req->job_id); _launch_complete_rm(req->job_id); return; } #endif /* * At this point, if connection still open, we send controller * a "success" reply to indicate that we've recvd the msg. */ if (msg->conn_fd >= 0) { debug4("sent SUCCESS"); slurm_send_rc_msg(msg, SLURM_SUCCESS); if (slurm_close(msg->conn_fd) < 0) error ("rpc_kill_job: close(%d): %m", msg->conn_fd); msg->conn_fd = -1; } /* * Check for corpses */ delay = MAX(conf->kill_wait, 5); if ( !_pause_for_job_completion (req->job_id, req->nodes, delay) && _terminate_all_steps(req->job_id, true) ) { /* * Block until all user processes are complete. */ _pause_for_job_completion (req->job_id, req->nodes, 0); } /* * Begin expiration period for cached information about job. * If expiration period has already begun, then do not run * the epilog again, as that script has already been executed * for this job. */ if (slurm_cred_begin_expiration(conf->vctx, req->job_id) < 0) { debug("Not running epilog for jobid %d: %m", req->job_id); goto done; } save_cred_state(conf->vctx); memset(&job_env, 0, sizeof(job_env_t)); job_env.jobid = req->job_id; job_env.node_list = req->nodes; job_env.spank_job_env = req->spank_job_env; job_env.spank_job_env_size = req->spank_job_env_size; job_env.uid = req->job_uid; #if defined(HAVE_BG) select_g_select_jobinfo_get(req->select_jobinfo, SELECT_JOBDATA_BLOCK_ID, &job_env.resv_id); #elif defined(HAVE_ALPS_CRAY) job_env.resv_id = select_g_select_jobinfo_xstrdup( req->select_jobinfo, SELECT_PRINT_RESV_ID); #endif rc = _run_epilog(&job_env); xfree(job_env.resv_id); if (rc) { int term_sig, exit_status; if (WIFSIGNALED(rc)) { exit_status = 0; term_sig = WTERMSIG(rc); } else { exit_status = WEXITSTATUS(rc); term_sig = 0; } error("[job %u] epilog failed status=%d:%d", req->job_id, exit_status, term_sig); rc = ESLURMD_EPILOG_FAILED; } else debug("completed epilog for jobid %u", req->job_id); if (container_g_delete(req->job_id)) error("container_g_delete(%u): %m", req->job_id); _launch_complete_rm(req->job_id); done: _wait_state_completed(req->job_id, 5); _waiter_complete(req->job_id); _sync_messages_kill(req); _epilog_complete(req->job_id, rc); } /* On a parallel job, every slurmd may send the EPILOG_COMPLETE * message to the slurmctld at the same time, resulting in lost * messages. We add a delay here to spead out the message traffic * assuming synchronized clocks across the cluster. * Allow 10 msec processing time in slurmctld for each RPC. */ static void _sync_messages_kill(kill_job_msg_t *req) { int host_cnt, host_inx; char *host; hostset_t hosts; int epilog_msg_time; hosts = hostset_create(req->nodes); host_cnt = hostset_count(hosts); if (host_cnt <= 64) goto fini; if (conf->hostname == NULL) goto fini; /* should never happen */ for (host_inx=0; host_inx<host_cnt; host_inx++) { host = hostset_shift(hosts); if (host == NULL) break; if (xstrcmp(host, conf->node_name) == 0) { free(host); break; } free(host); } epilog_msg_time = slurm_get_epilog_msg_time(); _delay_rpc(host_inx, host_cnt, epilog_msg_time); fini: hostset_destroy(hosts); } /* Delay a message based upon the host index, total host count and RPC_TIME. * This logic depends upon synchronized clocks across the cluster. */ static void _delay_rpc(int host_inx, int host_cnt, int usec_per_rpc) { struct timeval tv1; uint32_t cur_time; /* current time in usec (just 9 digits) */ uint32_t tot_time; /* total time expected for all RPCs */ uint32_t offset_time; /* relative time within tot_time */ uint32_t target_time; /* desired time to issue the RPC */ uint32_t delta_time; again: if (gettimeofday(&tv1, NULL)) { usleep(host_inx * usec_per_rpc); return; } cur_time = ((tv1.tv_sec % 1000) * 1000000) + tv1.tv_usec; tot_time = host_cnt * usec_per_rpc; offset_time = cur_time % tot_time; target_time = host_inx * usec_per_rpc; if (target_time < offset_time) delta_time = target_time - offset_time + tot_time; else delta_time = target_time - offset_time; if (usleep(delta_time)) { if (errno == EINVAL) /* usleep for more than 1 sec */ usleep(900000); /* errno == EINTR */ goto again; } } /* * Returns true if "uid" is a "slurm authorized user" - i.e. uid == 0 * or uid == slurm user id at this time. */ static bool _slurm_authorized_user(uid_t uid) { return ((uid == (uid_t) 0) || (uid == conf->slurm_user_id)); } struct waiter { uint32_t jobid; pthread_t thd; }; static struct waiter * _waiter_create(uint32_t jobid) { struct waiter *wp = xmalloc(sizeof(struct waiter)); wp->jobid = jobid; wp->thd = pthread_self(); return wp; } static int _find_waiter(struct waiter *w, uint32_t *jp) { return (w->jobid == *jp); } static void _waiter_destroy(struct waiter *wp) { xfree(wp); } static int _waiter_init (uint32_t jobid) { if (!waiters) waiters = list_create((ListDelF) _waiter_destroy); /* * Exit this thread if another thread is waiting on job */ if (list_find_first (waiters, (ListFindF) _find_waiter, &jobid)) return SLURM_ERROR; else list_append(waiters, _waiter_create(jobid)); return (SLURM_SUCCESS); } static int _waiter_complete (uint32_t jobid) { return (list_delete_all (waiters, (ListFindF) _find_waiter, &jobid)); } /* * Like _wait_for_procs(), but only wait for up to max_time seconds * if max_time == 0, send SIGKILL to tasks repeatedly * * Returns true if all job processes are gone */ static bool _pause_for_job_completion (uint32_t job_id, char *nodes, int max_time) { int sec = 0; int pause = 1; bool rc = false; while ((sec < max_time) || (max_time == 0)) { rc = _job_still_running (job_id); if (!rc) break; if ((max_time == 0) && (sec > 1)) { _terminate_all_steps(job_id, true); } if (sec > 10) { /* Reduce logging frequency about unkillable tasks */ if (max_time) pause = MIN((max_time - sec), 10); else pause = 10; } sleep(pause); sec += pause; } /* * Return true if job is NOT running */ return (!rc); } /* * Does nothing and returns SLURM_SUCCESS (if uid authenticates). * * Timelimit is not currently used in the slurmd or slurmstepd. */ static void _rpc_update_time(slurm_msg_t *msg) { int rc = SLURM_SUCCESS; uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); if ((req_uid != conf->slurm_user_id) && (req_uid != 0)) { rc = ESLURM_USER_ID_MISSING; error("Security violation, uid %d can't update time limit", req_uid); goto done; } /* if (shm_update_job_timelimit(req->job_id, req->expiration_time) < 0) { */ /* error("updating lifetime for job %u: %m", req->job_id); */ /* rc = ESLURM_INVALID_JOB_ID; */ /* } else */ /* debug("reset job %u lifetime", req->job_id); */ done: slurm_send_rc_msg(msg, rc); } /* NOTE: call _destroy_env() to free returned value */ static char ** _build_env(job_env_t *job_env) { char **env = xmalloc(sizeof(char *)); bool user_name_set = 0; env[0] = NULL; if (!valid_spank_job_env(job_env->spank_job_env, job_env->spank_job_env_size, job_env->uid)) { /* If SPANK job environment is bad, log it and do not use */ job_env->spank_job_env_size = 0; job_env->spank_job_env = (char **) NULL; } if (job_env->spank_job_env_size) { env_array_merge_spank(&env, (const char **) job_env->spank_job_env); } slurm_mutex_lock(&conf->config_mutex); setenvf(&env, "SLURMD_NODENAME", "%s", conf->node_name); setenvf(&env, "SLURM_CONF", conf->conffile); slurm_mutex_unlock(&conf->config_mutex); setenvf(&env, "SLURM_CLUSTER_NAME", "%s", conf->cluster_name); setenvf(&env, "SLURM_JOB_ID", "%u", job_env->jobid); setenvf(&env, "SLURM_JOB_UID", "%u", job_env->uid); #ifndef HAVE_NATIVE_CRAY /* uid_to_string on a cray is a heavy call, so try to avoid it */ if (!job_env->user_name) { job_env->user_name = uid_to_string(job_env->uid); user_name_set = 1; } #endif setenvf(&env, "SLURM_JOB_USER", "%s", job_env->user_name); if (user_name_set) xfree(job_env->user_name); setenvf(&env, "SLURM_JOBID", "%u", job_env->jobid); setenvf(&env, "SLURM_UID", "%u", job_env->uid); if (job_env->node_list) setenvf(&env, "SLURM_NODELIST", "%s", job_env->node_list); if (job_env->partition) setenvf(&env, "SLURM_JOB_PARTITION", "%s", job_env->partition); if (job_env->resv_id) { #if defined(HAVE_BG) setenvf(&env, "MPIRUN_PARTITION", "%s", job_env->resv_id); # ifdef HAVE_BGP /* Needed for HTC jobs */ setenvf(&env, "SUBMIT_POOL", "%s", job_env->resv_id); # endif #elif defined(HAVE_ALPS_CRAY) setenvf(&env, "BASIL_RESERVATION_ID", "%s", job_env->resv_id); #endif } return env; } static void _destroy_env(char **env) { int i=0; if (env) { for(i=0; env[i]; i++) { xfree(env[i]); } xfree(env); } return; } /* Trigger srun of spank prolog or epilog in slurmstepd */ static int _run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid) { pid_t cpid; int status = 0, timeout; int pfds[2]; if (pipe (pfds) < 0) { error ("_run_spank_job_script: pipe: %m"); return (-1); } fd_set_close_on_exec (pfds[1]); debug ("Calling %s spank %s", conf->stepd_loc, mode); if ((cpid = fork ()) < 0) { error ("executing spank %s: %m", mode); return (-1); } if (cpid == 0) { /* Run slurmstepd spank [prolog|epilog] */ char *argv[4] = { (char *) conf->stepd_loc, "spank", (char *) mode, NULL }; /* container_g_add_pid needs to be called in the forked process part of the fork to avoid a race condition where if this process makes a file or detacts itself from a child before we add the pid to the container in the parent of the fork. */ if (container_g_add_pid(job_id, getpid(), getuid()) != SLURM_SUCCESS) error("container_g_add_pid(%u): %m", job_id); if (dup2 (pfds[0], STDIN_FILENO) < 0) fatal ("dup2: %m"); #ifdef SETPGRP_TWO_ARGS setpgrp(0, 0); #else setpgrp(); #endif if (conf->chos_loc && !access(conf->chos_loc, X_OK)) execve(conf->chos_loc, argv, env); else execve(argv[0], argv, env); error ("execve(%s): %m", argv[0]); exit (127); } close (pfds[0]); if (_send_slurmd_conf_lite (pfds[1], conf) < 0) error ("Failed to send slurmd conf to slurmstepd\n"); close (pfds[1]); timeout = MAX(slurm_get_prolog_timeout(), 120); /* 120 secs in v15.08 */ if (waitpid_timeout (mode, cpid, &status, timeout) < 0) { error ("spank/%s timed out after %u secs", mode, timeout); return (-1); } if (status) error ("spank/%s returned status 0x%04x", mode, status); /* * No longer need SPANK option env vars in environment */ spank_clear_remote_options_env (env); return (status); } static int _run_job_script(const char *name, const char *path, uint32_t jobid, int timeout, char **env, uid_t uid) { struct stat stat_buf; int status = 0, rc; /* * Always run both spank prolog/epilog and real prolog/epilog script, * even if spank plugins fail. (May want to alter this in the future) * If both "script" mechanisms fail, prefer to return the "real" * prolog/epilog status. */ if (conf->plugstack && (stat(conf->plugstack, &stat_buf) == 0)) status = _run_spank_job_script(name, env, jobid, uid); if ((rc = run_script(name, path, jobid, timeout, env, uid))) status = rc; return (status); } #ifdef HAVE_BG /* a slow prolog is expected on bluegene systems */ static int _run_prolog(job_env_t *job_env, slurm_cred_t *cred) { int rc; char *my_prolog; char **my_env; my_env = _build_env(job_env); setenvf(&my_env, "SLURM_STEP_ID", "%u", job_env->step_id); slurm_mutex_lock(&conf->config_mutex); my_prolog = xstrdup(conf->prolog); slurm_mutex_unlock(&conf->config_mutex); rc = _run_job_script("prolog", my_prolog, job_env->jobid, -1, my_env, job_env->uid); _remove_job_running_prolog(job_env->jobid); xfree(my_prolog); _destroy_env(my_env); return rc; } #else static void *_prolog_timer(void *x) { int delay_time, rc = SLURM_SUCCESS; struct timespec ts; struct timeval now; slurm_msg_t msg; job_notify_msg_t notify_req; char srun_msg[128]; timer_struct_t *timer_struct = (timer_struct_t *) x; delay_time = MAX(2, (timer_struct->msg_timeout - 2)); gettimeofday(&now, NULL); ts.tv_sec = now.tv_sec + delay_time; ts.tv_nsec = now.tv_usec * 1000; slurm_mutex_lock(timer_struct->timer_mutex); if (!timer_struct->prolog_fini) { rc = pthread_cond_timedwait(timer_struct->timer_cond, timer_struct->timer_mutex, &ts); } slurm_mutex_unlock(timer_struct->timer_mutex); if (rc != ETIMEDOUT) return NULL; slurm_msg_t_init(&msg); snprintf(srun_msg, sizeof(srun_msg), "Prolog hung on node %s", conf->node_name); notify_req.job_id = timer_struct->job_id; notify_req.job_step_id = NO_VAL; notify_req.message = srun_msg; msg.msg_type = REQUEST_JOB_NOTIFY; msg.data = &notify_req; slurm_send_only_controller_msg(&msg); return NULL; } static int _run_prolog(job_env_t *job_env, slurm_cred_t *cred) { DEF_TIMERS; int rc, diff_time; char *my_prolog; time_t start_time = time(NULL); static uint16_t msg_timeout = 0; static uint16_t timeout; pthread_t timer_id; pthread_attr_t timer_attr; pthread_cond_t timer_cond = PTHREAD_COND_INITIALIZER; pthread_mutex_t timer_mutex = PTHREAD_MUTEX_INITIALIZER; timer_struct_t timer_struct; bool prolog_fini = false; char **my_env; my_env = _build_env(job_env); setenvf(&my_env, "SLURM_STEP_ID", "%u", job_env->step_id); if (cred) { slurm_cred_arg_t cred_arg; slurm_cred_get_args(cred, &cred_arg); setenvf(&my_env, "SLURM_JOB_CONSTRAINTS", "%s", cred_arg.job_constraints); gres_plugin_job_set_env(&my_env, cred_arg.job_gres_list); slurm_cred_free_args(&cred_arg); } if (msg_timeout == 0) msg_timeout = slurm_get_msg_timeout(); if (timeout == 0) timeout = slurm_get_prolog_timeout(); slurm_mutex_lock(&conf->config_mutex); my_prolog = xstrdup(conf->prolog); slurm_mutex_unlock(&conf->config_mutex); slurm_attr_init(&timer_attr); timer_struct.job_id = job_env->jobid; timer_struct.msg_timeout = msg_timeout; timer_struct.prolog_fini = &prolog_fini; timer_struct.timer_cond = &timer_cond; timer_struct.timer_mutex = &timer_mutex; pthread_create(&timer_id, &timer_attr, &_prolog_timer, &timer_struct); START_TIMER; if (timeout == (uint16_t)NO_VAL) rc = _run_job_script("prolog", my_prolog, job_env->jobid, -1, my_env, job_env->uid); else rc = _run_job_script("prolog", my_prolog, job_env->jobid, timeout, my_env, job_env->uid); END_TIMER; info("%s: run job script took %s", __func__, TIME_STR); slurm_mutex_lock(&timer_mutex); prolog_fini = true; pthread_cond_broadcast(&timer_cond); slurm_mutex_unlock(&timer_mutex); diff_time = difftime(time(NULL), start_time); info("%s: prolog with lock for job %u ran for %d seconds", __func__, job_env->jobid, diff_time); if (diff_time >= (msg_timeout / 2)) { info("prolog for job %u ran for %d seconds", job_env->jobid, diff_time); } _remove_job_running_prolog(job_env->jobid); xfree(my_prolog); _destroy_env(my_env); pthread_join(timer_id, NULL); return rc; } #endif static int _run_epilog(job_env_t *job_env) { time_t start_time = time(NULL); static uint16_t msg_timeout = 0; static uint16_t timeout; int error_code, diff_time; char *my_epilog; char **my_env = _build_env(job_env); if (msg_timeout == 0) msg_timeout = slurm_get_msg_timeout(); if (timeout == 0) timeout = slurm_get_prolog_timeout(); slurm_mutex_lock(&conf->config_mutex); my_epilog = xstrdup(conf->epilog); slurm_mutex_unlock(&conf->config_mutex); _wait_for_job_running_prolog(job_env->jobid); if (timeout == (uint16_t)NO_VAL) error_code = _run_job_script("epilog", my_epilog, job_env->jobid, -1, my_env, job_env->uid); else error_code = _run_job_script("epilog", my_epilog, job_env->jobid, timeout, my_env, job_env->uid); xfree(my_epilog); _destroy_env(my_env); diff_time = difftime(time(NULL), start_time); if (diff_time >= (msg_timeout / 2)) { info("epilog for job %u ran for %d seconds", job_env->jobid, diff_time); } return error_code; } /**********************************************************************/ /* Because calling initgroups(2)/getgrouplist(3) can be expensive and */ /* is not cached by sssd or nscd, we cache the group access list. */ /**********************************************************************/ typedef struct gid_cache_s { char *user; time_t timestamp; gid_t gid; gids_t *gids; struct gid_cache_s *next; } gids_cache_t; #define GIDS_HASH_LEN 64 static gids_cache_t *gids_hashtbl[GIDS_HASH_LEN] = {NULL}; static pthread_mutex_t gids_mutex = PTHREAD_MUTEX_INITIALIZER; static gids_t * _alloc_gids(int n, gid_t *gids) { gids_t *new; new = (gids_t *)xmalloc(sizeof(gids_t)); new->ngids = n; new->gids = gids; return new; } static void _dealloc_gids(gids_t *p) { xfree(p->gids); xfree(p); } /* Duplicate a gids_t struct. */ static gids_t * _gids_dup(gids_t *g) { int buf_size; gids_t *n = xmalloc(sizeof(gids_t)); n->ngids = g->ngids; buf_size = g->ngids * sizeof(gid_t); n->gids = xmalloc(buf_size); memcpy(n->gids, g->gids, buf_size); return n; } static gids_cache_t * _alloc_gids_cache(char *user, gid_t gid, gids_t *gids, gids_cache_t *next) { gids_cache_t *p; p = (gids_cache_t *)xmalloc(sizeof(gids_cache_t)); p->user = xstrdup(user); p->timestamp = time(NULL); p->gid = gid; p->gids = gids; p->next = next; return p; } static void _dealloc_gids_cache(gids_cache_t *p) { xfree(p->user); _dealloc_gids(p->gids); xfree(p); } static size_t _gids_hashtbl_idx(const char *user) { uint64_t x = siphash_str(user); return x % GIDS_HASH_LEN; } void gids_cache_purge(void) { int i; gids_cache_t *p, *q; slurm_mutex_lock(&gids_mutex); for (i=0; i<GIDS_HASH_LEN; i++) { p = gids_hashtbl[i]; while (p) { q = p->next; _dealloc_gids_cache(p); p = q; } gids_hashtbl[i] = NULL; } slurm_mutex_unlock(&gids_mutex); } static void _gids_cache_register(char *user, gid_t gid, gids_t *gids) { size_t idx; gids_cache_t *p, *q; idx = _gids_hashtbl_idx(user); q = gids_hashtbl[idx]; p = _alloc_gids_cache(user, gid, gids, q); gids_hashtbl[idx] = p; debug2("Cached group access list for %s/%d", user, gid); } /* how many groups to use by default to avoid repeated calls to getgrouplist */ #define NGROUPS_START 64 static gids_t *_gids_cache_lookup(char *user, gid_t gid) { size_t idx; gids_cache_t *p; bool found_but_old = false; time_t now = 0; int ngroups = NGROUPS_START; gid_t *groups; gids_t *ret_gids = NULL; idx = _gids_hashtbl_idx(user); slurm_mutex_lock(&gids_mutex); p = gids_hashtbl[idx]; while (p) { if (xstrcmp(p->user, user) == 0 && p->gid == gid) { slurm_ctl_conf_t *cf = slurm_conf_lock(); int group_ttl = cf->group_info & GROUP_TIME_MASK; slurm_conf_unlock(); if (!group_ttl) { ret_gids = _gids_dup(p->gids); goto done; } now = time(NULL); if (difftime(now, p->timestamp) < group_ttl) { ret_gids = _gids_dup(p->gids); goto done; } else { found_but_old = true; break; } } p = p->next; } /* Cache lookup failed or cached value was too old, fetch new * value and insert it into cache. */ groups = xmalloc(ngroups * sizeof(gid_t)); while (getgrouplist(user, gid, groups, &ngroups) == -1) { /* group list larger than array, resize array to fit */ groups = xrealloc(groups, ngroups * sizeof(gid_t)); } if (found_but_old) { xfree(p->gids->gids); p->gids->gids = groups; p->gids->ngids = ngroups; p->timestamp = now; ret_gids = _gids_dup(p->gids); } else { gids_t *gids = _alloc_gids(ngroups, groups); _gids_cache_register(user, gid, gids); ret_gids = _gids_dup(gids); } done: slurm_mutex_unlock(&gids_mutex); return ret_gids; } extern void destroy_starting_step(void *x) { xfree(x); } static int _add_starting_step(uint16_t type, void *req) { starting_step_t *starting_step; int rc = SLURM_SUCCESS; /* Add the step info to a list of starting processes that cannot reliably be contacted. */ slurm_mutex_lock(&conf->starting_steps_lock); starting_step = xmalloc(sizeof(starting_step_t)); if (!starting_step) { error("%s failed to allocate memory", __func__); rc = SLURM_FAILURE; goto fail; } switch (type) { case LAUNCH_BATCH_JOB: starting_step->job_id = ((batch_job_launch_msg_t *)req)->job_id; starting_step->step_id = ((batch_job_launch_msg_t *)req)->step_id; break; case LAUNCH_TASKS: starting_step->job_id = ((launch_tasks_request_msg_t *)req)->job_id; starting_step->step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; break; case REQUEST_LAUNCH_PROLOG: starting_step->job_id = ((prolog_launch_msg_t *)req)->job_id; starting_step->step_id = SLURM_EXTERN_CONT; break; default: error("%s called with an invalid type: %u", __func__, type); rc = SLURM_FAILURE; xfree(starting_step); goto fail; } if (!list_append(conf->starting_steps, starting_step)) { error("%s failed to allocate memory for list", __func__); rc = SLURM_FAILURE; xfree(starting_step); goto fail; } fail: slurm_mutex_unlock(&conf->starting_steps_lock); return rc; } static int _remove_starting_step(uint16_t type, void *req) { uint32_t job_id, step_id; ListIterator iter; starting_step_t *starting_step; int rc = SLURM_SUCCESS; bool found = false; slurm_mutex_lock(&conf->starting_steps_lock); switch(type) { case LAUNCH_BATCH_JOB: job_id = ((batch_job_launch_msg_t *)req)->job_id; step_id = ((batch_job_launch_msg_t *)req)->step_id; break; case LAUNCH_TASKS: job_id = ((launch_tasks_request_msg_t *)req)->job_id; step_id = ((launch_tasks_request_msg_t *)req)->job_step_id; break; default: error("%s called with an invalid type: %u", __func__, type); rc = SLURM_FAILURE; goto fail; } iter = list_iterator_create(conf->starting_steps); while ((starting_step = list_next(iter))) { if (starting_step->job_id == job_id && starting_step->step_id == step_id) { starting_step = list_remove(iter); xfree(starting_step); found = true; pthread_cond_broadcast(&conf->starting_steps_cond); break; } } if (!found) { error("%s: step %u.%u not found", __func__, job_id, step_id); rc = SLURM_FAILURE; } fail: slurm_mutex_unlock(&conf->starting_steps_lock); return rc; } static int _compare_starting_steps(void *listentry, void *key) { starting_step_t *step0 = (starting_step_t *)listentry; starting_step_t *step1 = (starting_step_t *)key; if (step1->step_id != NO_VAL) return (step0->job_id == step1->job_id && step0->step_id == step1->step_id); else return (step0->job_id == step1->job_id); } /* Wait for a step to get far enough in the launch process to have a socket open, ready to handle RPC calls. Pass step_id = NO_VAL to wait on any step for the given job. */ static int _wait_for_starting_step(uint32_t job_id, uint32_t step_id) { starting_step_t starting_step; starting_step.job_id = job_id; starting_step.step_id = step_id; int num_passes = 0; slurm_mutex_lock(&conf->starting_steps_lock); while (list_find_first( conf->starting_steps, &_compare_starting_steps, &starting_step )) { if (num_passes == 0) { if (step_id != NO_VAL) debug( "Blocked waiting for step %d.%d", job_id, step_id); else debug( "Blocked waiting for job %d, all steps", job_id); } num_passes++; pthread_cond_wait(&conf->starting_steps_cond, &conf->starting_steps_lock); } if (num_passes > 0) { if (step_id != NO_VAL) debug( "Finished wait for step %d.%d", job_id, step_id); else debug( "Finished wait for job %d, all steps", job_id); } slurm_mutex_unlock(&conf->starting_steps_lock); return SLURM_SUCCESS; } /* Return true if the step has not yet confirmed that its socket to handle RPC calls has been created. Pass step_id = NO_VAL to return true if any of the job's steps are still starting. */ static bool _step_is_starting(uint32_t job_id, uint32_t step_id) { starting_step_t starting_step; starting_step.job_id = job_id; starting_step.step_id = step_id; bool ret = false; slurm_mutex_lock(&conf->starting_steps_lock); if (list_find_first( conf->starting_steps, &_compare_starting_steps, &starting_step )) { ret = true; } slurm_mutex_unlock(&conf->starting_steps_lock); return ret; } /* Add this job to the list of jobs currently running their prolog */ static void _add_job_running_prolog(uint32_t job_id) { uint32_t *job_running_prolog; /* Add the job to a list of jobs whose prologs are running */ slurm_mutex_lock(&conf->prolog_running_lock); job_running_prolog = xmalloc(sizeof(uint32_t)); if (!job_running_prolog) { error("_add_job_running_prolog failed to allocate memory"); goto fail; } *job_running_prolog = job_id; if (!list_append(conf->prolog_running_jobs, job_running_prolog)) { error("_add_job_running_prolog failed to append job to list"); xfree(job_running_prolog); } fail: slurm_mutex_unlock(&conf->prolog_running_lock); } /* Remove this job from the list of jobs currently running their prolog */ static void _remove_job_running_prolog(uint32_t job_id) { ListIterator iter; uint32_t *job_running_prolog; bool found = false; slurm_mutex_lock(&conf->prolog_running_lock); iter = list_iterator_create(conf->prolog_running_jobs); while ((job_running_prolog = list_next(iter))) { if (*job_running_prolog == job_id) { job_running_prolog = list_remove(iter); xfree(job_running_prolog); found = true; pthread_cond_broadcast(&conf->prolog_running_cond); break; } } if (!found) error("_remove_job_running_prolog: job not found"); slurm_mutex_unlock(&conf->prolog_running_lock); } static int _match_jobid(void *listentry, void *key) { uint32_t *job0 = (uint32_t *)listentry; uint32_t *job1 = (uint32_t *)key; return (*job0 == *job1); } static int _prolog_is_running (uint32_t jobid) { int rc = 0; if (list_find_first (conf->prolog_running_jobs, (ListFindF) _match_jobid, &jobid)) rc = 1; return (rc); } /* Wait for the job's prolog to complete */ static void _wait_for_job_running_prolog(uint32_t job_id) { debug( "Waiting for job %d's prolog to complete", job_id); slurm_mutex_lock(&conf->prolog_running_lock); while (_prolog_is_running (job_id)) { pthread_cond_wait(&conf->prolog_running_cond, &conf->prolog_running_lock); } slurm_mutex_unlock(&conf->prolog_running_lock); debug( "Finished wait for job %d's prolog to complete", job_id); } static void _rpc_forward_data(slurm_msg_t *msg) { forward_data_msg_t *req = (forward_data_msg_t *)msg->data; uint32_t req_uid; struct sockaddr_un sa; int fd = -1, rc = 0; debug3("Entering _rpc_forward_data, address: %s, len: %u", req->address, req->len); /* sanity check */ if (strlen(req->address) > sizeof(sa.sun_path) - 1) { slurm_seterrno(EINVAL); rc = errno; goto done; } /* connect to specified address */ fd = socket(AF_UNIX, SOCK_STREAM, 0); if (fd < 0) { rc = errno; error("failed creating UNIX domain socket: %m"); goto done; } memset(&sa, 0, sizeof(sa)); sa.sun_family = AF_UNIX; strcpy(sa.sun_path, req->address); while (((rc = connect(fd, (struct sockaddr *)&sa, SUN_LEN(&sa))) < 0) && (errno == EINTR)); if (rc < 0) { rc = errno; debug2("failed connecting to specified socket '%s': %m", req->address); goto done; } req_uid = (uint32_t)g_slurm_auth_get_uid(msg->auth_cred, conf->auth_info); /* * although always in localhost, we still convert it to network * byte order, to make it consistent with pack/unpack. */ req_uid = htonl(req_uid); safe_write(fd, &req_uid, sizeof(uint32_t)); req_uid = htonl(req->len); safe_write(fd, &req_uid, sizeof(uint32_t)); safe_write(fd, req->data, req->len); rwfail: done: if (fd >= 0){ close(fd); } slurm_send_rc_msg(msg, rc); } static void _launch_complete_add(uint32_t job_id) { int j, empty; slurm_mutex_lock(&job_state_mutex); empty = -1; for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) break; if ((active_job_id[j] == 0) && (empty == -1)) empty = j; } if (j >= JOB_STATE_CNT || job_id != active_job_id[j]) { if (empty == -1) /* Discard oldest job */ empty = 0; for (j = empty + 1; j < JOB_STATE_CNT; j++) { active_job_id[j - 1] = active_job_id[j]; } active_job_id[JOB_STATE_CNT - 1] = 0; for (j = 0; j < JOB_STATE_CNT; j++) { if (active_job_id[j] == 0) { active_job_id[j] = job_id; break; } } } pthread_cond_signal(&job_state_cond); slurm_mutex_unlock(&job_state_mutex); _launch_complete_log("job add", job_id); } static void _launch_complete_log(char *type, uint32_t job_id) { #if 0 int j; info("active %s %u", type, job_id); slurm_mutex_lock(&job_state_mutex); for (j = 0; j < JOB_STATE_CNT; j++) { if (active_job_id[j] != 0) { info("active_job_id[%d]=%u", j, active_job_id[j]); } } slurm_mutex_unlock(&job_state_mutex); #endif } /* Test if we have a specific job ID still running */ static bool _launch_job_test(uint32_t job_id) { bool found = false; int j; slurm_mutex_lock(&job_state_mutex); for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) { found = true; break; } } slurm_mutex_unlock(&job_state_mutex); return found; } static void _launch_complete_rm(uint32_t job_id) { int j; slurm_mutex_lock(&job_state_mutex); for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) break; } if (j < JOB_STATE_CNT && job_id == active_job_id[j]) { for (j = j + 1; j < JOB_STATE_CNT; j++) { active_job_id[j - 1] = active_job_id[j]; } active_job_id[JOB_STATE_CNT - 1] = 0; } slurm_mutex_unlock(&job_state_mutex); _launch_complete_log("job remove", job_id); } static void _launch_complete_wait(uint32_t job_id) { int i, j, empty; time_t start = time(NULL); struct timeval now; struct timespec timeout; slurm_mutex_lock(&job_state_mutex); for (i = 0; ; i++) { empty = -1; for (j = 0; j < JOB_STATE_CNT; j++) { if (job_id == active_job_id[j]) break; if ((active_job_id[j] == 0) && (empty == -1)) empty = j; } if (j < JOB_STATE_CNT) /* Found job, ready to return */ break; if (difftime(time(NULL), start) <= 9) { /* Retry for 9 secs */ debug2("wait for launch of job %u before suspending it", job_id); gettimeofday(&now, NULL); timeout.tv_sec = now.tv_sec + 1; timeout.tv_nsec = now.tv_usec * 1000; pthread_cond_timedwait(&job_state_cond,&job_state_mutex, &timeout); continue; } if (empty == -1) /* Discard oldest job */ empty = 0; for (j = empty + 1; j < JOB_STATE_CNT; j++) { active_job_id[j - 1] = active_job_id[j]; } active_job_id[JOB_STATE_CNT - 1] = 0; for (j = 0; j < JOB_STATE_CNT; j++) { if (active_job_id[j] == 0) { active_job_id[j] = job_id; break; } } break; } slurm_mutex_unlock(&job_state_mutex); _launch_complete_log("job wait", job_id); } static bool _requeue_setup_env_fail(void) { static time_t config_update = 0; static bool requeue = false; if (config_update != conf->last_update) { char *sched_params = slurm_get_sched_params(); requeue = (sched_params && (strstr(sched_params, "no_env_cache") || strstr(sched_params, "requeue_setup_env_fail"))); xfree(sched_params); config_update = conf->last_update; } return requeue; }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4770_1
crossvul-cpp_data_bad_1822_0
/* * Derived from "arch/i386/kernel/process.c" * Copyright (C) 1995 Linus Torvalds * * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and * Paul Mackerras (paulus@cs.anu.edu.au) * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/prctl.h> #include <linux/init_task.h> #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/mqueue.h> #include <linux/hardirq.h> #include <linux/utsname.h> #include <linux/ftrace.h> #include <linux/kernel_stat.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/runlatch.h> #include <asm/syscalls.h> #include <asm/switch_to.h> #include <asm/tm.h> #include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/firmware.h> #endif #include <asm/code-patching.h> #include <linux/kprobes.h> #include <linux/kdebug.h> /* Transactional Memory debug */ #ifdef TM_DEBUG_SW #define TM_DEBUG(x...) printk(KERN_INFO x) #else #define TM_DEBUG(x...) do { } while(0) #endif extern unsigned long _get_SP(void); #ifndef CONFIG_SMP struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_altivec = NULL; struct task_struct *last_task_used_vsx = NULL; struct task_struct *last_task_used_spe = NULL; #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void giveup_fpu_maybe_transactional(struct task_struct *tsk) { /* * If we are saving the current thread's registers, and the * thread is in a transactional state, set the TIF_RESTORE_TM * bit so that we know to restore the registers before * returning to userspace. */ if (tsk == current && tsk->thread.regs && MSR_TM_ACTIVE(tsk->thread.regs->msr) && !test_thread_flag(TIF_RESTORE_TM)) { tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; set_thread_flag(TIF_RESTORE_TM); } giveup_fpu(tsk); } void giveup_altivec_maybe_transactional(struct task_struct *tsk) { /* * If we are saving the current thread's registers, and the * thread is in a transactional state, set the TIF_RESTORE_TM * bit so that we know to restore the registers before * returning to userspace. */ if (tsk == current && tsk->thread.regs && MSR_TM_ACTIVE(tsk->thread.regs->msr) && !test_thread_flag(TIF_RESTORE_TM)) { tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; set_thread_flag(TIF_RESTORE_TM); } giveup_altivec(tsk); } #else #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #ifdef CONFIG_PPC_FPU /* * Make sure the floating-point register state in the * the thread_struct is up to date for task tsk. */ void flush_fp_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { /* * We need to disable preemption here because if we didn't, * another process could get scheduled after the regs->msr * test but before we have finished saving the FP registers * to the thread_struct. That process could take over the * FPU, and then when we get scheduled again we would store * bogus values for the remaining FP registers. */ preempt_disable(); if (tsk->thread.regs->msr & MSR_FP) { #ifdef CONFIG_SMP /* * This should only ever be called for current or * for a stopped child process. Since we save away * the FP register state on context switch on SMP, * there is something wrong if a stopped child appears * to still have its FP state in the CPU registers. */ BUG_ON(tsk != current); #endif giveup_fpu_maybe_transactional(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_fp_to_thread); #endif /* CONFIG_PPC_FPU */ void enable_kernel_fp(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) giveup_fpu_maybe_transactional(current); else giveup_fpu(NULL); /* just enables FP for kernel */ #else giveup_fpu_maybe_transactional(last_task_used_math); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_fp); #ifdef CONFIG_ALTIVEC void enable_kernel_altivec(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) giveup_altivec_maybe_transactional(current); else giveup_altivec_notask(); #else giveup_altivec_maybe_transactional(last_task_used_altivec); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_altivec); /* * Make sure the VMX/Altivec register state in the * the thread_struct is up to date for task tsk. */ void flush_altivec_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VEC) { #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif giveup_altivec_maybe_transactional(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX void enable_kernel_vsx(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) giveup_vsx(current); else giveup_vsx(NULL); /* just enable vsx for kernel - force */ #else giveup_vsx(last_task_used_vsx); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_vsx); void giveup_vsx(struct task_struct *tsk) { giveup_fpu_maybe_transactional(tsk); giveup_altivec_maybe_transactional(tsk); __giveup_vsx(tsk); } EXPORT_SYMBOL(giveup_vsx); void flush_vsx_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VSX) { #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif giveup_vsx(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_vsx_to_thread); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE void enable_kernel_spe(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) giveup_spe(current); else giveup_spe(NULL); /* just enable SPE for kernel - force */ #else giveup_spe(last_task_used_spe); #endif /* __SMP __ */ } EXPORT_SYMBOL(enable_kernel_spe); void flush_spe_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_SPE) { #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); giveup_spe(tsk); } preempt_enable(); } } #endif /* CONFIG_SPE */ #ifndef CONFIG_SMP /* * If we are doing lazy switching of CPU state (FP, altivec or SPE), * and the current task has some state, discard it. */ void discard_lazy_cpu_state(void) { preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; #ifdef CONFIG_ALTIVEC if (last_task_used_altivec == current) last_task_used_altivec = NULL; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX if (last_task_used_vsx == current) last_task_used_vsx = NULL; #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE if (last_task_used_spe == current) last_task_used_spe = NULL; #endif preempt_enable(); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int signal_code, int breakpt) { siginfo_t info; current->thread.trap_nr = signal_code; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; /* Deliver the signal to userspace */ info.si_signo = SIGTRAP; info.si_errno = breakpt; /* breakpoint or watchpoint id */ info.si_code = signal_code; info.si_addr = (void __user *)address; force_sig_info(SIGTRAP, &info, current); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ void do_break (struct pt_regs *regs, unsigned long address, unsigned long error_code) { siginfo_t info; current->thread.trap_nr = TRAP_HWBKPT; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; if (debugger_break_match(regs)) return; /* Clear the breakpoint */ hw_breakpoint_disable(); /* Deliver the signal to userspace */ info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)address; force_sig_info(SIGTRAP, &info, current); } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * Set the debug registers back to their default "safe" values. */ static void set_debug_reg_defaults(struct thread_struct *thread) { thread->debug.iac1 = thread->debug.iac2 = 0; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 thread->debug.iac3 = thread->debug.iac4 = 0; #endif thread->debug.dac1 = thread->debug.dac2 = 0; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 thread->debug.dvc1 = thread->debug.dvc2 = 0; #endif thread->debug.dbcr0 = 0; #ifdef CONFIG_BOOKE /* * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) */ thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | DBCR1_IAC4US; /* * Force Data Address Compare User/Supervisor bits to be User-only * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. */ thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; #else thread->debug.dbcr1 = 0; #endif } static void prime_debug_regs(struct debug_reg *debug) { /* * We could have inherited MSR_DE from userspace, since * it doesn't get cleared on exception entry. Make sure * MSR_DE is clear before we enable any debug events. */ mtmsr(mfmsr() & ~MSR_DE); mtspr(SPRN_IAC1, debug->iac1); mtspr(SPRN_IAC2, debug->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 mtspr(SPRN_IAC3, debug->iac3); mtspr(SPRN_IAC4, debug->iac4); #endif mtspr(SPRN_DAC1, debug->dac1); mtspr(SPRN_DAC2, debug->dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 mtspr(SPRN_DVC1, debug->dvc1); mtspr(SPRN_DVC2, debug->dvc2); #endif mtspr(SPRN_DBCR0, debug->dbcr0); mtspr(SPRN_DBCR1, debug->dbcr1); #ifdef CONFIG_BOOKE mtspr(SPRN_DBCR2, debug->dbcr2); #endif } /* * Unless neither the old or new thread are making use of the * debug registers, set the debug registers from the values * stored in the new thread. */ void switch_booke_debug_regs(struct debug_reg *new_debug) { if ((current->thread.debug.dbcr0 & DBCR0_IDM) || (new_debug->dbcr0 & DBCR0_IDM)) prime_debug_regs(new_debug); } EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) { thread->hw_brk.address = 0; thread->hw_brk.type = 0; set_breakpoint(&thread->hw_brk); } #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DAC1, dabr); #ifdef CONFIG_PPC_47x isync(); #endif return 0; } #elif defined(CONFIG_PPC_BOOK3S) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DABR, dabr); if (cpu_has_feature(CPU_FTR_DABRX)) mtspr(SPRN_DABRX, dabrx); return 0; } #else static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { return -EINVAL; } #endif static inline int set_dabr(struct arch_hw_breakpoint *brk) { unsigned long dabr, dabrx; dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); dabrx = ((brk->type >> 3) & 0x7); if (ppc_md.set_dabr) return ppc_md.set_dabr(dabr, dabrx); return __set_dabr(dabr, dabrx); } static inline int set_dawr(struct arch_hw_breakpoint *brk) { unsigned long dawr, dawrx, mrd; dawr = brk->address; dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ << (63 - 58); //* read/write bits */ dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ << (63 - 59); //* translate */ dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ >> 3; //* PRIM bits */ /* dawr length is stored in field MDR bits 48:53. Matches range in doublewords (64 bits) baised by -1 eg. 0b000000=1DW and 0b111111=64DW. brk->len is in bytes. This aligns up to double word size, shifts and does the bias. */ mrd = ((brk->len + 7) >> 3) - 1; dawrx |= (mrd & 0x3f) << (63 - 53); if (ppc_md.set_dawr) return ppc_md.set_dawr(dawr, dawrx); mtspr(SPRN_DAWR, dawr); mtspr(SPRN_DAWRX, dawrx); return 0; } void __set_breakpoint(struct arch_hw_breakpoint *brk) { memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk)); if (cpu_has_feature(CPU_FTR_DAWR)) set_dawr(brk); else set_dabr(brk); } void set_breakpoint(struct arch_hw_breakpoint *brk) { preempt_disable(); __set_breakpoint(brk); preempt_enable(); } #ifdef CONFIG_PPC64 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); #endif static inline bool hw_brk_match(struct arch_hw_breakpoint *a, struct arch_hw_breakpoint *b) { if (a->address != b->address) return false; if (a->type != b->type) return false; if (a->len != b->len) return false; return true; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static void tm_reclaim_thread(struct thread_struct *thr, struct thread_info *ti, uint8_t cause) { unsigned long msr_diff = 0; /* * If FP/VSX registers have been already saved to the * thread_struct, move them to the transact_fp array. * We clear the TIF_RESTORE_TM bit since after the reclaim * the thread will no longer be transactional. */ if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr; if (msr_diff & MSR_FP) memcpy(&thr->transact_fp, &thr->fp_state, sizeof(struct thread_fp_state)); if (msr_diff & MSR_VEC) memcpy(&thr->transact_vr, &thr->vr_state, sizeof(struct thread_vr_state)); clear_ti_thread_flag(ti, TIF_RESTORE_TM); msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; } tm_reclaim(thr, thr->regs->msr, cause); /* Having done the reclaim, we now have the checkpointed * FP/VSX values in the registers. These might be valid * even if we have previously called enable_kernel_fp() or * flush_fp_to_thread(), so update thr->regs->msr to * indicate their current validity. */ thr->regs->msr |= msr_diff; } void tm_reclaim_current(uint8_t cause) { tm_enable(); tm_reclaim_thread(&current->thread, current_thread_info(), cause); } static inline void tm_reclaim_task(struct task_struct *tsk) { /* We have to work out if we're switching from/to a task that's in the * middle of a transaction. * * In switching we need to maintain a 2nd register state as * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the * checkpointed (tbegin) state in ckpt_regs and saves the transactional * (current) FPRs into oldtask->thread.transact_fpr[]. * * We also context switch (save) TFHAR/TEXASR/TFIAR in here. */ struct thread_struct *thr = &tsk->thread; if (!thr->regs) return; if (!MSR_TM_ACTIVE(thr->regs->msr)) goto out_and_saveregs; /* Stash the original thread MSR, as giveup_fpu et al will * modify it. We hold onto it to see whether the task used * FP & vector regs. If the TIF_RESTORE_TM flag is set, * ckpt_regs.msr is already set. */ if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) thr->ckpt_regs.msr = thr->regs->msr; TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " "ccr=%lx, msr=%lx, trap=%lx)\n", tsk->pid, thr->regs->nip, thr->regs->ccr, thr->regs->msr, thr->regs->trap); tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); TM_DEBUG("--- tm_reclaim on pid %d complete\n", tsk->pid); out_and_saveregs: /* Always save the regs here, even if a transaction's not active. * This context-switches a thread's TM info SPRs. We do it here to * be consistent with the restore path (in recheckpoint) which * cannot happen later in _switch(). */ tm_save_sprs(thr); } extern void __tm_recheckpoint(struct thread_struct *thread, unsigned long orig_msr); void tm_recheckpoint(struct thread_struct *thread, unsigned long orig_msr) { unsigned long flags; /* We really can't be interrupted here as the TEXASR registers can't * change and later in the trecheckpoint code, we have a userspace R1. * So let's hard disable over this region. */ local_irq_save(flags); hard_irq_disable(); /* The TM SPRs are restored here, so that TEXASR.FS can be set * before the trecheckpoint and no explosion occurs. */ tm_restore_sprs(thread); __tm_recheckpoint(thread, orig_msr); local_irq_restore(flags); } static inline void tm_recheckpoint_new_task(struct task_struct *new) { unsigned long msr; if (!cpu_has_feature(CPU_FTR_TM)) return; /* Recheckpoint the registers of the thread we're about to switch to. * * If the task was using FP, we non-lazily reload both the original and * the speculative FP register states. This is because the kernel * doesn't see if/when a TM rollback occurs, so if we take an FP * unavoidable later, we are unable to determine which set of FP regs * need to be restored. */ if (!new->thread.regs) return; if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ tm_restore_sprs(&new->thread); return; } msr = new->thread.ckpt_regs.msr; /* Recheckpoint to restore original checkpointed register state. */ TM_DEBUG("*** tm_recheckpoint of pid %d " "(new->msr 0x%lx, new->origmsr 0x%lx)\n", new->pid, new->thread.regs->msr, msr); /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&new->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&new->thread); new->thread.regs->msr |= (MSR_FP | new->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&new->thread); new->thread.regs->msr |= MSR_VEC; } #endif /* We may as well turn on VSX too since all the state is restored now */ if (msr & MSR_VSX) new->thread.regs->msr |= MSR_VSX; TM_DEBUG("*** tm_recheckpoint of pid %d complete " "(kernel msr 0x%lx)\n", new->pid, mfmsr()); } static inline void __switch_to_tm(struct task_struct *prev) { if (cpu_has_feature(CPU_FTR_TM)) { tm_enable(); tm_reclaim_task(prev); } } /* * This is called if we are on the way out to userspace and the * TIF_RESTORE_TM flag is set. It checks if we need to reload * FP and/or vector state and does so if necessary. * If userspace is inside a transaction (whether active or * suspended) and FP/VMX/VSX instructions have ever been enabled * inside that transaction, then we have to keep them enabled * and keep the FP/VMX/VSX state loaded while ever the transaction * continues. The reason is that if we didn't, and subsequently * got a FP/VMX/VSX unavailable interrupt inside a transaction, * we don't know whether it's the same transaction, and thus we * don't know which of the checkpointed state and the transactional * state to use. */ void restore_tm_state(struct pt_regs *regs) { unsigned long msr_diff; clear_thread_flag(TIF_RESTORE_TM); if (!MSR_TM_ACTIVE(regs->msr)) return; msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; if (msr_diff & MSR_FP) { fp_enable(); load_fp_state(&current->thread.fp_state); regs->msr |= current->thread.fpexc_mode; } if (msr_diff & MSR_VEC) { vec_enable(); load_vr_state(&current->thread.vr_state); } regs->msr |= msr_diff; } #else #define tm_recheckpoint_new_task(new) #define __switch_to_tm(prev) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { struct thread_struct *new_thread, *old_thread; struct task_struct *last; #ifdef CONFIG_PPC_BOOK3S_64 struct ppc64_tlb_batch *batch; #endif WARN_ON(!irqs_disabled()); /* Back up the TAR and DSCR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception * entry/exit instead, and be in pt_regs. FIXME, this should be in * pt_regs anyway (for debug).) * Save the TAR and DSCR here before we do treclaim/trecheckpoint as * these will change them. */ save_early_sprs(&prev->thread); __switch_to_tm(prev); #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu * by just saving it every time we switch out if * this task used the fpu during the last quantum. * * If it tries to use the fpu again, it'll trap and * reload its fp regs. So we don't have to do a restore * every switch, just a save. * -- Cort */ if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) giveup_fpu(prev); #ifdef CONFIG_ALTIVEC /* * If the previous thread used altivec in the last quantum * (thus changing altivec regs) then save them. * We used to check the VRSAVE register but not all apps * set it, so we don't rely on it now (and in fact we need * to save & restore VSCR even if VRSAVE == 0). -- paulus * * On SMP we always save/restore altivec regs just to avoid the * complexity of changing processors. * -- Cort */ if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) giveup_altivec(prev); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) /* VMX and FPU registers are already save here */ __giveup_vsx(prev); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* * If the previous thread used spe in the last quantum * (thus changing spe regs) then save them. * * On SMP we always save/restore spe regs just to avoid the * complexity of changing processors. */ if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) giveup_spe(prev); #endif /* CONFIG_SPE */ #else /* CONFIG_SMP */ #ifdef CONFIG_ALTIVEC /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_altivec -- Cort */ if (new->thread.regs && last_task_used_altivec == new) new->thread.regs->msr |= MSR_VEC; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX if (new->thread.regs && last_task_used_vsx == new) new->thread.regs->msr |= MSR_VSX; #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_spe */ if (new->thread.regs && last_task_used_spe == new) new->thread.regs->msr |= MSR_SPE; #endif /* CONFIG_SPE */ #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS switch_booke_debug_regs(&new->thread.debug); #else /* * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would * schedule DABR */ #ifndef CONFIG_HAVE_HW_BREAKPOINT if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk))) __set_breakpoint(&new->thread.hw_brk); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif new_thread = &new->thread; old_thread = &current->thread; #ifdef CONFIG_PPC64 /* * Collect processor utilization data per process */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); long unsigned start_tb, current_tb; start_tb = old_thread->start_tb; cu->current_tb = current_tb = mfspr(SPRN_PURR); old_thread->accum_tb += (current_tb - start_tb); new_thread->start_tb = current_tb; } #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC_BOOK3S_64 batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->active) { current_thread_info()->local_flags |= _TLF_LAZY_MMU; if (batch->index) __flush_tlb_pending(batch); batch->active = 0; } #endif /* CONFIG_PPC_BOOK3S_64 */ /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out * of sync. Hard disable here. */ hard_irq_disable(); tm_recheckpoint_new_task(new); last = _switch(old_thread, new_thread); #ifdef CONFIG_PPC_BOOK3S_64 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; batch = this_cpu_ptr(&ppc64_tlb_batch); batch->active = 1; } #endif /* CONFIG_PPC_BOOK3S_64 */ return last; } static int instructions_to_print = 16; static void show_instructions(struct pt_regs *regs) { int i; unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); printk("Instruction dump:"); for (i = 0; i < instructions_to_print; i++) { int instr; if (!(i % 8)) printk("\n"); #if !defined(CONFIG_BOOKE) /* If executing with the IMMU off, adjust pc rather * than print XXXXXXXX. */ if (!(regs->msr & MSR_IR)) pc = (unsigned long)phys_to_virt(pc); #endif if (!__kernel_text_address(pc) || probe_kernel_address((unsigned int __user *)pc, instr)) { printk(KERN_CONT "XXXXXXXX "); } else { if (regs->nip == pc) printk(KERN_CONT "<%08x> ", instr); else printk(KERN_CONT "%08x ", instr); } pc += sizeof(int); } printk("\n"); } static struct regbit { unsigned long bit; const char *name; } msr_bits[] = { #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) {MSR_SF, "SF"}, {MSR_HV, "HV"}, #endif {MSR_VEC, "VEC"}, {MSR_VSX, "VSX"}, #ifdef CONFIG_BOOKE {MSR_CE, "CE"}, #endif {MSR_EE, "EE"}, {MSR_PR, "PR"}, {MSR_FP, "FP"}, {MSR_ME, "ME"}, #ifdef CONFIG_BOOKE {MSR_DE, "DE"}, #else {MSR_SE, "SE"}, {MSR_BE, "BE"}, #endif {MSR_IR, "IR"}, {MSR_DR, "DR"}, {MSR_PMM, "PMM"}, #ifndef CONFIG_BOOKE {MSR_RI, "RI"}, {MSR_LE, "LE"}, #endif {0, NULL} }; static void printbits(unsigned long val, struct regbit *bits) { const char *sep = ""; printk("<"); for (; bits->bit; ++bits) if (val & bits->bit) { printk("%s%s", sep, bits->name); sep = ","; } printk(">"); } #ifdef CONFIG_PPC64 #define REG "%016lx" #define REGS_PER_LINE 4 #define LAST_VOLATILE 13 #else #define REG "%08lx" #define REGS_PER_LINE 8 #define LAST_VOLATILE 12 #endif void show_regs(struct pt_regs * regs) { int i, trap; show_regs_print_info(KERN_DEFAULT); printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); printk("REGS: %p TRAP: %04lx %s (%s)\n", regs, regs->trap, print_tainted(), init_utsname()->release); printk("MSR: "REG" ", regs->msr); printbits(regs->msr, msr_bits); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) printk("CFAR: "REG" ", regs->orig_gpr3); if (trap == 0x200 || trap == 0x300 || trap == 0x600) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); #else printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); #endif #ifdef CONFIG_PPC64 printk("SOFTE: %ld ", regs->softe); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); #endif for (i = 0; i < 32; i++) { if ((i % REGS_PER_LINE) == 0) printk("\nGPR%02d: ", i); printk(REG " ", regs->gpr[i]); if (i == LAST_VOLATILE && !FULL_REGS(regs)) break; } printk("\n"); #ifdef CONFIG_KALLSYMS /* * Lookup NIP late so we have the best change of getting the * above info out without failing */ printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); #endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) show_instructions(regs); } void exit_thread(void) { discard_lazy_cpu_state(); } void flush_thread(void) { discard_lazy_cpu_state(); #ifdef CONFIG_HAVE_HW_BREAKPOINT flush_ptrace_hw_breakpoint(current); #else /* CONFIG_HAVE_HW_BREAKPOINT */ set_debug_reg_defaults(&current->thread); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ } void release_thread(struct task_struct *t) { } /* * this gets called so that we can store coprocessor state into memory and * copy the current task into the new thread. */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { flush_fp_to_thread(src); flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); /* * Flush TM state out so we can copy it. __switch_to_tm() does this * flush but it removes the checkpointed state from the current CPU and * transitions the CPU out of TM mode. Hence we need to call * tm_recheckpoint_new_task() (on the same task) to restore the * checkpointed state back and the TM mode. */ __switch_to_tm(src); tm_recheckpoint_new_task(src); *dst = *src; clear_task_ebb(dst); return 0; } static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) { #ifdef CONFIG_PPC_STD_MMU_64 unsigned long sp_vsid; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) << SLB_VSID_SHIFT_1T; else sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT; sp_vsid |= SLB_VSID_KERNEL | llp; p->thread.ksp_vsid = sp_vsid; #endif } /* * Copy a thread.. */ /* * Copy architecture-specific thread state */ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, struct task_struct *p) { struct pt_regs *childregs, *kregs; extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); void (*f)(void); unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); /* function */ if (usp) childregs->gpr[14] = ppc_function_entry((void *)usp); #ifdef CONFIG_PPC64 clear_tsk_thread_flag(p, TIF_32BIT); childregs->softe = 1; #endif childregs->gpr[15] = kthread_arg; p->thread.regs = NULL; /* no user register state */ ti->flags |= _TIF_RESTOREALL; f = ret_from_kernel_thread; } else { /* user thread */ struct pt_regs *regs = current_pt_regs(); CHECK_FULL_REGS(regs); *childregs = *regs; if (usp) childregs->gpr[1] = usp; p->thread.regs = childregs; childregs->gpr[3] = 0; /* Result from fork() */ if (clone_flags & CLONE_SETTLS) { #ifdef CONFIG_PPC64 if (!is_32bit_task()) childregs->gpr[13] = childregs->gpr[6]; else #endif childregs->gpr[2] = childregs->gpr[6]; } f = ret_from_fork; } sp -= STACK_FRAME_OVERHEAD; /* * The way this works is that at some point in the future * some task will call _switch to switch to the new task. * That will pop off the stack frame created below and start * the new task running at ret_from_fork. The new task will * do some house keeping and then return from the fork or clone * system call, using the stack frame created above. */ ((unsigned long *)sp)[0] = 0; sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; p->thread.ksp = sp; #ifdef CONFIG_PPC32 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + _ALIGN_UP(sizeof(struct thread_info), 16); #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT p->thread.ptrace_bps[0] = NULL; #endif p->thread.fp_save_area = NULL; #ifdef CONFIG_ALTIVEC p->thread.vr_save_area = NULL; #endif setup_ksp_vsid(p, sp); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { p->thread.dscr_inherit = current->thread.dscr_inherit; p->thread.dscr = current->thread.dscr; } if (cpu_has_feature(CPU_FTR_HAS_PPR)) p->thread.ppr = INIT_PPR; #endif kregs->nip = ppc_function_entry(f); return 0; } /* * Set up a thread for executing a new program */ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) { #ifdef CONFIG_PPC64 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ #endif /* * If we exec out of a kernel thread then thread.regs will not be * set. Do it now. */ if (!current->thread.regs) { struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; current->thread.regs = regs - 1; } memset(regs->gpr, 0, sizeof(regs->gpr)); regs->ctr = 0; regs->link = 0; regs->xer = 0; regs->ccr = 0; regs->gpr[1] = sp; /* * We have just cleared all the nonvolatile GPRs, so make * FULL_REGS(regs) return true. This is necessary to allow * ptrace to examine the thread immediately after exec. */ regs->trap &= ~1UL; #ifdef CONFIG_PPC32 regs->mq = 0; regs->nip = start; regs->msr = MSR_USER; #else if (!is_32bit_task()) { unsigned long entry; if (is_elf2_task()) { /* Look ma, no function descriptors! */ entry = start; /* * Ulrich says: * The latest iteration of the ABI requires that when * calling a function (at its global entry point), * the caller must ensure r12 holds the entry point * address (so that the function can quickly * establish addressability). */ regs->gpr[12] = start; /* Make sure that's restored on entry to userspace. */ set_thread_flag(TIF_RESTOREALL); } else { unsigned long toc; /* start is a relocated pointer to the function * descriptor for the elf _start routine. The first * entry in the function descriptor is the entry * address of _start and the second entry is the TOC * value we need to use. */ __get_user(entry, (unsigned long __user *)start); __get_user(toc, (unsigned long __user *)start+1); /* Check whether the e_entry function descriptor entries * need to be relocated before we can use them. */ if (load_addr != 0) { entry += load_addr; toc += load_addr; } regs->gpr[2] = toc; } regs->nip = entry; regs->msr = MSR_USER64; } else { regs->nip = start; regs->gpr[2] = 0; regs->msr = MSR_USER32; } #endif discard_lazy_cpu_state(); #ifdef CONFIG_VSX current->thread.used_vsr = 0; #endif memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); current->thread.fp_save_area = NULL; #ifdef CONFIG_ALTIVEC memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ current->thread.vr_save_area = NULL; current->thread.vrsave = 0; current->thread.used_vr = 0; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_SPE memset(current->thread.evr, 0, sizeof(current->thread.evr)); current->thread.acc = 0; current->thread.spefscr = 0; current->thread.used_spe = 0; #endif /* CONFIG_SPE */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_TM)) regs->msr |= MSR_TM; current->thread.tm_tfhar = 0; current->thread.tm_texasr = 0; current->thread.tm_tfiar = 0; #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ } EXPORT_SYMBOL(start_thread); #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | PR_FP_EXC_RES | PR_FP_EXC_INV) int set_fpexc_mode(struct task_struct *tsk, unsigned int val) { struct pt_regs *regs = tsk->thread.regs; /* This is a bit hairy. If we are an SPE enabled processor * (have embedded fp) we store the IEEE exception enable flags in * fpexc_mode. fpexc_mode is also used for setting FP exception * mode (asyn, precise, disabled) for 'Classic' FP. */ if (val & PR_FP_EXC_SW_ENABLE) { #ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set * directly by userspace, it must call prctl * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE * in the existing prctl settings) or * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in * the bits being set). <fenv.h> functions * saving and restoring the whole * floating-point environment need to do so * anyway to restore the prctl settings from * the saved environment. */ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); tsk->thread.fpexc_mode = val & (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); return 0; } else { return -EINVAL; } #else return -EINVAL; #endif } /* on a CONFIG_SPE this does not hurt us. The bits that * __pack_fe01 use do not overlap with bits used for * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits * on CONFIG_SPE implementations are reserved so writing to * them does not change anything */ if (val > PR_FP_EXC_PRECISE) return -EINVAL; tsk->thread.fpexc_mode = __pack_fe01(val); if (regs != NULL && (regs->msr & MSR_FP) != 0) regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | tsk->thread.fpexc_mode; return 0; } int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) { unsigned int val; if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) #ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set * directly by userspace, it must call prctl * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE * in the existing prctl settings) or * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in * the bits being set). <fenv.h> functions * saving and restoring the whole * floating-point environment need to do so * anyway to restore the prctl settings from * the saved environment. */ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); val = tsk->thread.fpexc_mode; } else return -EINVAL; #else return -EINVAL; #endif else val = __unpack_fe01(tsk->thread.fpexc_mode); return put_user(val, (unsigned int __user *) adr); } int set_endian(struct task_struct *tsk, unsigned int val) { struct pt_regs *regs = tsk->thread.regs; if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) return -EINVAL; if (regs == NULL) return -EINVAL; if (val == PR_ENDIAN_BIG) regs->msr &= ~MSR_LE; else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) regs->msr |= MSR_LE; else return -EINVAL; return 0; } int get_endian(struct task_struct *tsk, unsigned long adr) { struct pt_regs *regs = tsk->thread.regs; unsigned int val; if (!cpu_has_feature(CPU_FTR_PPC_LE) && !cpu_has_feature(CPU_FTR_REAL_LE)) return -EINVAL; if (regs == NULL) return -EINVAL; if (regs->msr & MSR_LE) { if (cpu_has_feature(CPU_FTR_REAL_LE)) val = PR_ENDIAN_LITTLE; else val = PR_ENDIAN_PPC_LITTLE; } else val = PR_ENDIAN_BIG; return put_user(val, (unsigned int __user *)adr); } int set_unalign_ctl(struct task_struct *tsk, unsigned int val) { tsk->thread.align_ctl = val; return 0; } int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) { return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); } static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, unsigned long nbytes) { unsigned long stack_page; unsigned long cpu = task_cpu(p); /* * Avoid crashing if the stack has overflowed and corrupted * task_cpu(p), which is in the thread_info struct. */ if (cpu < NR_CPUS && cpu_possible(cpu)) { stack_page = (unsigned long) hardirq_ctx[cpu]; if (sp >= stack_page + sizeof(struct thread_struct) && sp <= stack_page + THREAD_SIZE - nbytes) return 1; stack_page = (unsigned long) softirq_ctx[cpu]; if (sp >= stack_page + sizeof(struct thread_struct) && sp <= stack_page + THREAD_SIZE - nbytes) return 1; } return 0; } int validate_sp(unsigned long sp, struct task_struct *p, unsigned long nbytes) { unsigned long stack_page = (unsigned long)task_stack_page(p); if (sp >= stack_page + sizeof(struct thread_struct) && sp <= stack_page + THREAD_SIZE - nbytes) return 1; return valid_irq_stack(sp, p, nbytes); } EXPORT_SYMBOL(validate_sp); unsigned long get_wchan(struct task_struct *p) { unsigned long ip, sp; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; sp = p->thread.ksp; if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) return 0; do { sp = *(unsigned long *)sp; if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) return 0; if (count > 0) { ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; if (!in_sched_functions(ip)) return ip; } } while (count++ < 16); return 0; } static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *tsk, unsigned long *stack) { unsigned long sp, ip, lr, newsp; int count = 0; int firstframe = 1; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int curr_frame = current->curr_ret_stack; extern void return_to_handler(void); unsigned long rth = (unsigned long)return_to_handler; #endif sp = (unsigned long) stack; if (tsk == NULL) tsk = current; if (sp == 0) { if (tsk == current) sp = current_stack_pointer(); else sp = tsk->thread.ksp; } lr = 0; printk("Call Trace:\n"); do { if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) return; stack = (unsigned long *) sp; newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((ip == rth) && curr_frame >= 0) { printk(" (%pS)", (void *)current->ret_stack[curr_frame].ret); curr_frame--; } #endif if (firstframe) printk(" (unreliable)"); printk("\n"); } firstframe = 0; /* * See if this is an exception frame. * We look for the "regshere" marker in the current frame. */ if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); lr = regs->link; printk("--- interrupt: %lx at %pS\n LR = %pS\n", regs->trap, (void *)regs->nip, (void *)lr); firstframe = 1; } sp = newsp; } while (count++ < kstack_depth_to_print); } #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ void notrace __ppc64_runlatch_on(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; ctrl = mfspr(SPRN_CTRLF); ctrl |= CTRL_RUNLATCH; mtspr(SPRN_CTRLT, ctrl); ti->local_flags |= _TLF_RUNLATCH; } /* Called with hard IRQs off */ void notrace __ppc64_runlatch_off(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; ti->local_flags &= ~_TLF_RUNLATCH; ctrl = mfspr(SPRN_CTRLF); ctrl &= ~CTRL_RUNLATCH; mtspr(SPRN_CTRLT, ctrl); } #endif /* CONFIG_PPC64 */ unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() & ~PAGE_MASK; return sp & ~0xf; } static inline unsigned long brk_rnd(void) { unsigned long rnd = 0; /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); else rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); return rnd << PAGE_SHIFT; } unsigned long arch_randomize_brk(struct mm_struct *mm) { unsigned long base = mm->brk; unsigned long ret; #ifdef CONFIG_PPC_STD_MMU_64 /* * If we are using 1TB segments and we are allowed to randomise * the heap, we can put it above 1TB so it is backed by a 1TB * segment. Otherwise the heap will be in the bottom 1TB * which always uses 256MB segments and this may result in a * performance penalty. */ if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); #endif ret = PAGE_ALIGN(base + brk_rnd()); if (ret < mm->brk) return mm->brk; return ret; }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_1822_0
crossvul-cpp_data_bad_1571_6
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ /* mysqldump.c - Dump a tables contents and format to an ASCII file ** ** The author's original notes follow :- ** ** AUTHOR: Igor Romanenko (igor@frog.kiev.ua) ** DATE: December 3, 1994 ** WARRANTY: None, expressed, impressed, implied ** or other ** STATUS: Public domain ** Adapted and optimized for MySQL by ** Michael Widenius, Sinisa Milivojevic, Jani Tolonen ** -w --where added 9/10/98 by Jim Faucette ** slave code by David Saez Padros <david@ols.es> ** master/autocommit code by Brian Aker <brian@tangent.org> ** SSL by ** Andrei Errapart <andreie@no.spam.ee> ** Tõnu Samuel <tonu@please.do.not.remove.this.spam.ee> ** XML by Gary Huntress <ghuntress@mediaone.net> 10/10/01, cleaned up ** and adapted to mysqldump 05/11/01 by Jani Tolonen ** Added --single-transaction option 06/06/2002 by Peter Zaitsev ** 10 Jun 2003: SET NAMES and --no-set-names by Alexander Barkov */ #define DUMP_VERSION "10.13" #include <my_global.h> #include <my_sys.h> #include <my_user.h> #include <m_string.h> #include <m_ctype.h> #include <hash.h> #include <stdarg.h> #include "client_priv.h" #include "my_default.h" #include "mysql.h" #include "mysql_version.h" #include "mysqld_error.h" #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ /* Exit codes */ #define EX_USAGE 1 #define EX_MYSQLERR 2 #define EX_CONSCHECK 3 #define EX_EOM 4 #define EX_EOF 5 /* ferror for output file was got */ #define EX_ILLEGAL_TABLE 6 /* index into 'show fields from table' */ #define SHOW_FIELDNAME 0 #define SHOW_TYPE 1 #define SHOW_NULL 2 #define SHOW_DEFAULT 4 #define SHOW_EXTRA 5 /* Size of buffer for dump's select query */ #define QUERY_LENGTH 1536 /* Size of comment buffer. */ #define COMMENT_LENGTH 2048 /* ignore table flags */ #define IGNORE_NONE 0x00 /* no ignore */ #define IGNORE_DATA 0x01 /* don't dump data for this table */ static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value); static ulong find_set(TYPELIB *lib, const char *x, uint length, char **err_pos, uint *err_len); static char *alloc_query_str(ulong size); static void field_escape(DYNAMIC_STRING* in, const char *from); static my_bool verbose= 0, opt_no_create_info= 0, opt_no_data= 0, quick= 1, extended_insert= 1, lock_tables= 1, opt_force= 0, flush_logs= 0, flush_privileges= 0, opt_drop=1,opt_keywords=0,opt_lock=1,opt_compress=0, create_options=1,opt_quoted=0,opt_databases=0, opt_alldbs=0,opt_create_db=0,opt_lock_all_tables=0, opt_set_charset=0, opt_dump_date=1, opt_autocommit=0,opt_disable_keys=1,opt_xml=0, opt_delete_master_logs=0, tty_password=0, opt_single_transaction=0, opt_comments= 0, opt_compact= 0, opt_hex_blob=0, opt_order_by_primary=0, opt_ignore=0, opt_complete_insert= 0, opt_drop_database= 0, opt_replace_into= 0, opt_dump_triggers= 0, opt_routines=0, opt_tz_utc=1, opt_slave_apply= 0, opt_include_master_host_port= 0, opt_events= 0, opt_comments_used= 0, opt_alltspcs=0, opt_notspcs= 0, opt_drop_trigger= 0; static my_bool insert_pat_inited= 0, debug_info_flag= 0, debug_check_flag= 0; static ulong opt_max_allowed_packet, opt_net_buffer_length; static MYSQL mysql_connection,*mysql=0; static DYNAMIC_STRING insert_pat; static char *opt_password=0,*current_user=0, *current_host=0,*path=0,*fields_terminated=0, *lines_terminated=0, *enclosed=0, *opt_enclosed=0, *escaped=0, *where=0, *order_by=0, *opt_compatible_mode_str= 0, *err_ptr= 0, *opt_ignore_error= 0, *log_error_file= NULL; static char **defaults_argv= 0; static char compatible_mode_normal_str[255]; /* Server supports character_set_results session variable? */ static my_bool server_supports_switching_charsets= TRUE; static ulong opt_compatible_mode= 0; #define MYSQL_OPT_MASTER_DATA_EFFECTIVE_SQL 1 #define MYSQL_OPT_MASTER_DATA_COMMENTED_SQL 2 #define MYSQL_OPT_SLAVE_DATA_EFFECTIVE_SQL 1 #define MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL 2 static uint opt_mysql_port= 0, opt_master_data; static uint opt_slave_data; static uint my_end_arg; static char * opt_mysql_unix_port=0; static char *opt_bind_addr = NULL; static int first_error=0; static DYNAMIC_STRING extended_row; #include <sslopt-vars.h> FILE *md_result_file= 0; FILE *stderror_file=0; const char *set_gtid_purged_mode_names[]= {"OFF", "AUTO", "ON", NullS}; static TYPELIB set_gtid_purged_mode_typelib= {array_elements(set_gtid_purged_mode_names) -1, "", set_gtid_purged_mode_names, NULL}; static enum enum_set_gtid_purged_mode { SET_GTID_PURGED_OFF= 0, SET_GTID_PURGED_AUTO =1, SET_GTID_PURGED_ON=2 } opt_set_gtid_purged_mode= SET_GTID_PURGED_AUTO; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static uint opt_protocol= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; DYNAMIC_ARRAY ignore_error; static int parse_ignore_error(); /* Dynamic_string wrapper functions. In this file use these wrappers, they will terminate the process if there is an allocation failure. */ static void init_dynamic_string_checked(DYNAMIC_STRING *str, const char *init_str, uint init_alloc, uint alloc_increment); static void dynstr_append_checked(DYNAMIC_STRING* dest, const char* src); static void dynstr_set_checked(DYNAMIC_STRING *str, const char *init_str); static void dynstr_append_mem_checked(DYNAMIC_STRING *str, const char *append, uint length); static void dynstr_realloc_checked(DYNAMIC_STRING *str, ulong additional_size); /* Constant for detection of default value of default_charset. If default_charset is equal to mysql_universal_client_charset, then it is the default value which assigned at the very beginning of main(). */ static const char *mysql_universal_client_charset= MYSQL_UNIVERSAL_CLIENT_CHARSET; static char *default_charset; static CHARSET_INFO *charset_info= &my_charset_latin1; const char *default_dbug_option="d:t:o,/tmp/mysqldump.trace"; /* have we seen any VIEWs during table scanning? */ my_bool seen_views= 0; const char *compatible_mode_names[]= { "MYSQL323", "MYSQL40", "POSTGRESQL", "ORACLE", "MSSQL", "DB2", "MAXDB", "NO_KEY_OPTIONS", "NO_TABLE_OPTIONS", "NO_FIELD_OPTIONS", "ANSI", NullS }; #define MASK_ANSI_QUOTES \ (\ (1<<2) | /* POSTGRESQL */\ (1<<3) | /* ORACLE */\ (1<<4) | /* MSSQL */\ (1<<5) | /* DB2 */\ (1<<6) | /* MAXDB */\ (1<<10) /* ANSI */\ ) TYPELIB compatible_mode_typelib= {array_elements(compatible_mode_names) - 1, "", compatible_mode_names, NULL}; HASH ignore_table; static struct my_option my_long_options[] = { {"all-databases", 'A', "Dump all the databases. This will be same as --databases with all databases selected.", &opt_alldbs, &opt_alldbs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"all-tablespaces", 'Y', "Dump all the tablespaces.", &opt_alltspcs, &opt_alltspcs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-tablespaces", 'y', "Do not dump any tablespace information.", &opt_notspcs, &opt_notspcs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"add-drop-database", OPT_DROP_DATABASE, "Add a DROP DATABASE before each create.", &opt_drop_database, &opt_drop_database, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"add-drop-table", OPT_DROP, "Add a DROP TABLE before each create.", &opt_drop, &opt_drop, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"add-drop-trigger", 0, "Add a DROP TRIGGER before each create.", &opt_drop_trigger, &opt_drop_trigger, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"add-locks", OPT_LOCKS, "Add locks around INSERT statements.", &opt_lock, &opt_lock, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"allow-keywords", OPT_KEYWORDS, "Allow creation of column names that are keywords.", &opt_keywords, &opt_keywords, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"apply-slave-statements", OPT_MYSQLDUMP_SLAVE_APPLY, "Adds 'STOP SLAVE' prior to 'CHANGE MASTER' and 'START SLAVE' to bottom of dump.", &opt_slave_apply, &opt_slave_apply, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"comments", 'i', "Write additional information.", &opt_comments, &opt_comments, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"compatible", OPT_COMPATIBLE, "Change the dump to be compatible with a given mode. By default tables " "are dumped in a format optimized for MySQL. Legal modes are: ansi, " "mysql323, mysql40, postgresql, oracle, mssql, db2, maxdb, no_key_options, " "no_table_options, no_field_options. One can use several modes separated " "by commas. Note: Requires MySQL server version 4.1.0 or higher. " "This option is ignored with earlier server versions.", &opt_compatible_mode_str, &opt_compatible_mode_str, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compact", OPT_COMPACT, "Give less verbose output (useful for debugging). Disables structure " "comments and header/footer constructs. Enables options --skip-add-" "drop-table --skip-add-locks --skip-comments --skip-disable-keys " "--skip-set-charset.", &opt_compact, &opt_compact, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"complete-insert", 'c', "Use complete insert statements.", &opt_complete_insert, &opt_complete_insert, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"create-options", 'a', "Include all MySQL specific create options.", &create_options, &create_options, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"databases", 'B', "Dump several databases. Note the difference in usage; in this case no tables are given. All name arguments are regarded as database names. 'USE db_name;' will be included in the output.", &opt_databases, &opt_databases, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit.", 0,0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log.", &default_dbug_option, &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"delete-master-logs", OPT_DELETE_MASTER_LOGS, "Delete logs on master after backup. This automatically enables --master-data.", &opt_delete_master_logs, &opt_delete_master_logs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"disable-keys", 'K', "'/*!40000 ALTER TABLE tb_name DISABLE KEYS */; and '/*!40000 ALTER " "TABLE tb_name ENABLE KEYS */; will be put in the output.", &opt_disable_keys, &opt_disable_keys, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"dump-slave", OPT_MYSQLDUMP_SLAVE_DATA, "This causes the binary log position and filename of the master to be " "appended to the dumped data output. Setting the value to 1, will print" "it as a CHANGE MASTER command in the dumped data output; if equal" " to 2, that command will be prefixed with a comment symbol. " "This option will turn --lock-all-tables on, unless " "--single-transaction is specified too (in which case a " "global read lock is only taken a short time at the beginning of the dump " "- don't forget to read about --single-transaction below). In all cases " "any action on logs will happen at the exact moment of the dump." "Option automatically turns --lock-tables off.", &opt_slave_data, &opt_slave_data, 0, GET_UINT, OPT_ARG, 0, 0, MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL, 0, 0, 0}, {"events", 'E', "Dump events.", &opt_events, &opt_events, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"extended-insert", 'e', "Use multiple-row INSERT syntax that include several VALUES lists.", &extended_insert, &extended_insert, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"fields-terminated-by", OPT_FTB, "Fields in the output file are terminated by the given string.", &fields_terminated, &fields_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-enclosed-by", OPT_ENC, "Fields in the output file are enclosed by the given character.", &enclosed, &enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0 ,0, 0}, {"fields-optionally-enclosed-by", OPT_O_ENC, "Fields in the output file are optionally enclosed by the given character.", &opt_enclosed, &opt_enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0 ,0, 0}, {"fields-escaped-by", OPT_ESC, "Fields in the output file are escaped by the given character.", &escaped, &escaped, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"flush-logs", 'F', "Flush logs file in server before starting dump. " "Note that if you dump many databases at once (using the option " "--databases= or --all-databases), the logs will be flushed for " "each database dumped. The exception is when using --lock-all-tables " "or --master-data: " "in this case the logs will be flushed only once, corresponding " "to the moment all tables are locked. So if you want your dump and " "the log flush to happen at the same exact moment you should use " "--lock-all-tables or --master-data with --flush-logs.", &flush_logs, &flush_logs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"flush-privileges", OPT_ESC, "Emit a FLUSH PRIVILEGES statement " "after dumping the mysql database. This option should be used any " "time the dump contains the mysql database and any other database " "that depends on the data in the mysql database for proper restore. ", &flush_privileges, &flush_privileges, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Continue even if we get an SQL error.", &opt_force, &opt_force, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"hex-blob", OPT_HEXBLOB, "Dump binary strings (BINARY, " "VARBINARY, BLOB) in hexadecimal format.", &opt_hex_blob, &opt_hex_blob, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &current_host, &current_host, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-error", OPT_MYSQLDUMP_IGNORE_ERROR, "A comma-separated list of " "error numbers to be ignored if encountered during dump.", &opt_ignore_error, &opt_ignore_error, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-table", OPT_IGNORE_TABLE, "Do not dump the specified table. To specify more than one table to ignore, " "use the directive multiple times, once for each table. Each table must " "be specified with both database and table names, e.g., " "--ignore-table=database.table.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"include-master-host-port", OPT_MYSQLDUMP_INCLUDE_MASTER_HOST_PORT, "Adds 'MASTER_HOST=<host>, MASTER_PORT=<port>' to 'CHANGE MASTER TO..' " "in dump produced with --dump-slave.", &opt_include_master_host_port, &opt_include_master_host_port, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"insert-ignore", OPT_INSERT_IGNORE, "Insert rows with INSERT IGNORE.", &opt_ignore, &opt_ignore, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"lines-terminated-by", OPT_LTB, "Lines in the output file are terminated by the given string.", &lines_terminated, &lines_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"lock-all-tables", 'x', "Locks all tables across all databases. This " "is achieved by taking a global read lock for the duration of the whole " "dump. Automatically turns --single-transaction and --lock-tables off.", &opt_lock_all_tables, &opt_lock_all_tables, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"lock-tables", 'l', "Lock all tables for read.", &lock_tables, &lock_tables, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"log-error", OPT_ERROR_LOG_FILE, "Append warnings and errors to given file.", &log_error_file, &log_error_file, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"master-data", OPT_MASTER_DATA, "This causes the binary log position and filename to be appended to the " "output. If equal to 1, will print it as a CHANGE MASTER command; if equal" " to 2, that command will be prefixed with a comment symbol. " "This option will turn --lock-all-tables on, unless " "--single-transaction is specified too (in which case a " "global read lock is only taken a short time at the beginning of the dump; " "don't forget to read about --single-transaction below). In all cases, " "any action on logs will happen at the exact moment of the dump. " "Option automatically turns --lock-tables off.", &opt_master_data, &opt_master_data, 0, GET_UINT, OPT_ARG, 0, 0, MYSQL_OPT_MASTER_DATA_COMMENTED_SQL, 0, 0, 0}, {"max_allowed_packet", OPT_MAX_ALLOWED_PACKET, "The maximum packet length to send to or receive from server.", &opt_max_allowed_packet, &opt_max_allowed_packet, 0, GET_ULONG, REQUIRED_ARG, 24*1024*1024, 4096, (longlong) 2L*1024L*1024L*1024L, MALLOC_OVERHEAD, 1024, 0}, {"net_buffer_length", OPT_NET_BUFFER_LENGTH, "The buffer size for TCP/IP and socket communication.", &opt_net_buffer_length, &opt_net_buffer_length, 0, GET_ULONG, REQUIRED_ARG, 1024*1024L-1025, 4096, 16*1024L*1024L, MALLOC_OVERHEAD-1024, 1024, 0}, {"no-autocommit", OPT_AUTOCOMMIT, "Wrap tables with autocommit/commit statements.", &opt_autocommit, &opt_autocommit, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-create-db", 'n', "Suppress the CREATE DATABASE ... IF EXISTS statement that normally is " "output for each dumped database if --all-databases or --databases is " "given.", &opt_create_db, &opt_create_db, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-create-info", 't', "Don't write table creation info.", &opt_no_create_info, &opt_no_create_info, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-data", 'd', "No row information.", &opt_no_data, &opt_no_data, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"no-set-names", 'N', "Same as --skip-set-charset.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"opt", OPT_OPTIMIZE, "Same as --add-drop-table, --add-locks, --create-options, --quick, --extended-insert, --lock-tables, --set-charset, and --disable-keys. Enabled by default, disable with --skip-opt.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"order-by-primary", OPT_ORDER_BY_PRIMARY, "Sorts each table's rows by primary key, or first unique key, if such a key exists. Useful when dumping a MyISAM table to be loaded into an InnoDB table, but will make the dump itself take considerably longer.", &opt_order_by_primary, &opt_order_by_primary, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given it's solicited on the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"port", 'P', "Port number to use for connection.", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"quick", 'q', "Don't buffer query, dump directly to stdout.", &quick, &quick, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"quote-names",'Q', "Quote table and column names with backticks (`).", &opt_quoted, &opt_quoted, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"replace", OPT_MYSQL_REPLACE_INTO, "Use REPLACE INTO instead of INSERT INTO.", &opt_replace_into, &opt_replace_into, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"result-file", 'r', "Direct output to a given file. This option should be used in systems " "(e.g., DOS, Windows) that use carriage-return linefeed pairs (\\r\\n) " "to separate text lines. This option ensures that only a single newline " "is used.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"routines", 'R', "Dump stored routines (functions and procedures).", &opt_routines, &opt_routines, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"set-charset", OPT_SET_CHARSET, "Add 'SET NAMES default_character_set' to the output.", &opt_set_charset, &opt_set_charset, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"set-gtid-purged", OPT_SET_GTID_PURGED, "Add 'SET @@GLOBAL.GTID_PURGED' to the output. Possible values for " "this option are ON, OFF and AUTO. If ON is used and GTIDs " "are not enabled on the server, an error is generated. If OFF is " "used, this option does nothing. If AUTO is used and GTIDs are enabled " "on the server, 'SET @@GLOBAL.GTID_PURGED' is added to the output. " "If GTIDs are disabled, AUTO does nothing. Default is AUTO.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif /* Note that the combination --single-transaction --master-data will give bullet-proof binlog position only if server >=4.1.3. That's the old "FLUSH TABLES WITH READ LOCK does not block commit" fixed bug. */ {"single-transaction", OPT_TRANSACTION, "Creates a consistent snapshot by dumping all tables in a single " "transaction. Works ONLY for tables stored in storage engines which " "support multiversioning (currently only InnoDB does); the dump is NOT " "guaranteed to be consistent for other storage engines. " "While a --single-transaction dump is in process, to ensure a valid " "dump file (correct table contents and binary log position), no other " "connection should use the following statements: ALTER TABLE, DROP " "TABLE, RENAME TABLE, TRUNCATE TABLE, as consistent snapshot is not " "isolated from them. Option automatically turns off --lock-tables.", &opt_single_transaction, &opt_single_transaction, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"dump-date", OPT_DUMP_DATE, "Put a dump date to the end of the output.", &opt_dump_date, &opt_dump_date, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"skip-opt", OPT_SKIP_OPTIMIZATION, "Disable --opt. Disables --add-drop-table, --add-locks, --create-options, --quick, --extended-insert, --lock-tables, --set-charset, and --disable-keys.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"tab",'T', "Create tab-separated textfile for each table to given path. (Create .sql " "and .txt files.) NOTE: This only works if mysqldump is run on the same " "machine as the mysqld server.", &path, &path, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"tables", OPT_TABLES, "Overrides option --databases (-B).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"triggers", OPT_TRIGGERS, "Dump triggers for each dumped table.", &opt_dump_triggers, &opt_dump_triggers, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"tz-utc", OPT_TZ_UTC, "SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data when a server has data in different time zones or data is being moved between servers with different time zones.", &opt_tz_utc, &opt_tz_utc, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &current_user, &current_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print info about the various stages.", &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version",'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"where", 'w', "Dump only selected records. Quotes are mandatory.", &where, &where, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"xml", 'X', "Dump a database as well formed XML.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static const char *load_default_groups[]= { "mysqldump","client",0 }; static void maybe_exit(int error); static void die(int error, const char* reason, ...); static void maybe_die(int error, const char* reason, ...); static void write_header(FILE *sql_file, char *db_name); static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, const char *prefix,const char *name, int string_value); static int dump_selected_tables(char *db, char **table_names, int tables); static int dump_all_tables_in_db(char *db); static int init_dumping_views(char *); static int init_dumping_tables(char *); static int init_dumping(char *, int init_func(char*)); static int dump_databases(char **); static int dump_all_databases(); static char *quote_name(const char *name, char *buff, my_bool force); char check_if_ignore_table(const char *table_name, char *table_type); static char *primary_key_fields(const char *table_name); static my_bool get_view_structure(char *table, char* db); static my_bool dump_all_views_in_db(char *database); static int dump_all_tablespaces(); static int dump_tablespaces_for_tables(char *db, char **table_names, int tables); static int dump_tablespaces_for_databases(char** databases); static int dump_tablespaces(char* ts_where); static void print_comment(FILE *sql_file, my_bool is_error, const char *format, ...); /* Print the supplied message if in verbose mode SYNOPSIS verbose_msg() fmt format specifier ... variable number of parameters */ static void verbose_msg(const char *fmt, ...) { va_list args; DBUG_ENTER("verbose_msg"); if (!verbose) DBUG_VOID_RETURN; va_start(args, fmt); vfprintf(stderr, fmt, args); va_end(args); fflush(stderr); DBUG_VOID_RETURN; } /* exit with message if ferror(file) SYNOPSIS check_io() file - checked file */ void check_io(FILE *file) { if (ferror(file)) die(EX_EOF, "Got errno %d on write", errno); } static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n",my_progname,DUMP_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } /* print_version */ static void short_usage_sub(void) { printf("Usage: %s [OPTIONS] database [tables]\n", my_progname); printf("OR %s [OPTIONS] --databases [OPTIONS] DB1 [DB2 DB3...]\n", my_progname); printf("OR %s [OPTIONS] --all-databases [OPTIONS]\n", my_progname); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("Dumping structure and contents of MySQL databases and tables."); short_usage_sub(); print_defaults("my",load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } /* usage */ static void short_usage(void) { short_usage_sub(); printf("For more options, use %s --help\n", my_progname); } static void write_header(FILE *sql_file, char *db_name) { if (opt_xml) { fputs("<?xml version=\"1.0\"?>\n", sql_file); /* Schema reference. Allows use of xsi:nil for NULL values and xsi:type to define an element's data type. */ fputs("<mysqldump ", sql_file); fputs("xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"", sql_file); fputs(">\n", sql_file); check_io(sql_file); } else if (!opt_compact) { print_comment(sql_file, 0, "-- MySQL dump %s Distrib %s, for %s (%s)\n--\n", DUMP_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); print_comment(sql_file, 0, "-- Host: %s Database: %s\n", current_host ? current_host : "localhost", db_name ? db_name : ""); print_comment(sql_file, 0, "-- ------------------------------------------------------\n" ); print_comment(sql_file, 0, "-- Server version\t%s\n", mysql_get_server_info(&mysql_connection)); if (opt_set_charset) fprintf(sql_file, "\n/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;" "\n/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;" "\n/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;" "\n/*!40101 SET NAMES %s */;\n",default_charset); if (opt_tz_utc) { fprintf(sql_file, "/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;\n"); fprintf(sql_file, "/*!40103 SET TIME_ZONE='+00:00' */;\n"); } if (!path) { fprintf(md_result_file,"\ /*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;\n\ /*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;\n\ "); } fprintf(sql_file, "/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='%s%s%s' */;\n" "/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;\n", path?"":"NO_AUTO_VALUE_ON_ZERO",compatible_mode_normal_str[0]==0?"":",", compatible_mode_normal_str); check_io(sql_file); } } /* write_header */ static void write_footer(FILE *sql_file) { if (opt_xml) { fputs("</mysqldump>\n", sql_file); check_io(sql_file); } else if (!opt_compact) { if (opt_tz_utc) fprintf(sql_file,"/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;\n"); fprintf(sql_file,"\n/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;\n"); if (!path) { fprintf(md_result_file,"\ /*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;\n\ /*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;\n"); } if (opt_set_charset) fprintf(sql_file, "/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;\n" "/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;\n" "/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;\n"); fprintf(sql_file, "/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;\n"); fputs("\n", sql_file); if (opt_dump_date) { char time_str[20]; get_date(time_str, GETDATE_DATE_TIME, 0); print_comment(sql_file, 0, "-- Dump completed on %s\n", time_str); } else print_comment(sql_file, 0, "-- Dump completed\n"); check_io(sql_file); } } /* write_footer */ uchar* get_table_key(const char *entry, size_t *length, my_bool not_used __attribute__((unused))) { *length= strlen(entry); return (uchar*) entry; } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch (optid) { case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start=argument; my_free(opt_password); opt_password=my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]=0; /* Cut length of argument */ tty_password= 0; } else tty_password=1; break; case 'r': if (!(md_result_file= my_fopen(argument, O_WRONLY | FILE_BINARY, MYF(MY_WME)))) exit(1); break; case 'W': #ifdef _WIN32 opt_protocol= MYSQL_PROTOCOL_PIPE; #endif break; case 'N': opt_set_charset= 0; break; case 'T': opt_disable_keys=0; if (strlen(argument) >= FN_REFLEN) { /* This check is made because the some the file functions below have FN_REFLEN sized stack allocated buffers and will cause a crash even if the input destination buffer is large enough to hold the output. */ die(EX_USAGE, "Input filename too long: %s", argument); } break; case '#': DBUG_PUSH(argument ? argument : default_dbug_option); debug_check_flag= 1; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); case 'X': opt_xml= 1; extended_insert= opt_drop= opt_lock= opt_disable_keys= opt_autocommit= opt_create_db= 0; break; case 'i': opt_comments_used= 1; break; case 'I': case '?': usage(); exit(0); case (int) OPT_MASTER_DATA: if (!argument) /* work like in old versions */ opt_master_data= MYSQL_OPT_MASTER_DATA_EFFECTIVE_SQL; break; case (int) OPT_MYSQLDUMP_SLAVE_DATA: if (!argument) /* work like in old versions */ opt_slave_data= MYSQL_OPT_SLAVE_DATA_EFFECTIVE_SQL; break; case (int) OPT_OPTIMIZE: extended_insert= opt_drop= opt_lock= quick= create_options= opt_disable_keys= lock_tables= opt_set_charset= 1; break; case (int) OPT_SKIP_OPTIMIZATION: extended_insert= opt_drop= opt_lock= quick= create_options= opt_disable_keys= lock_tables= opt_set_charset= 0; break; case (int) OPT_COMPACT: if (opt_compact) { opt_comments= opt_drop= opt_disable_keys= opt_lock= 0; opt_set_charset= 0; } break; case (int) OPT_TABLES: opt_databases=0; break; case (int) OPT_IGNORE_TABLE: { if (!strchr(argument, '.')) { fprintf(stderr, "Illegal use of option --ignore-table=<database>.<table>\n"); exit(1); } if (my_hash_insert(&ignore_table, (uchar*)my_strdup(PSI_NOT_INSTRUMENTED, argument, MYF(0)))) exit(EX_EOM); break; } case (int) OPT_COMPATIBLE: { char buff[255]; char *end= compatible_mode_normal_str; int i; ulong mode; uint err_len; opt_quoted= 1; opt_set_charset= 0; opt_compatible_mode_str= argument; opt_compatible_mode= find_set(&compatible_mode_typelib, argument, (uint) strlen(argument), &err_ptr, &err_len); if (err_len) { strmake(buff, err_ptr, MY_MIN(sizeof(buff) - 1, err_len)); fprintf(stderr, "Invalid mode to --compatible: %s\n", buff); exit(1); } #if !defined(DBUG_OFF) { uint size_for_sql_mode= 0; const char **ptr; for (ptr= compatible_mode_names; *ptr; ptr++) size_for_sql_mode+= strlen(*ptr); size_for_sql_mode+= sizeof(compatible_mode_names)-1; DBUG_ASSERT(sizeof(compatible_mode_normal_str)>=size_for_sql_mode); } #endif mode= opt_compatible_mode; for (i= 0, mode= opt_compatible_mode; mode; mode>>= 1, i++) { if (mode & 1) { end= my_stpcpy(end, compatible_mode_names[i]); end= my_stpcpy(end, ","); } } if (end!=compatible_mode_normal_str) end[-1]= 0; /* Set charset to the default compiled value if it hasn't been reset yet by --default-character-set=xxx. */ if (default_charset == mysql_universal_client_charset) default_charset= (char*) MYSQL_DEFAULT_CHARSET_NAME; break; } case (int) OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case (int) OPT_SET_GTID_PURGED: { opt_set_gtid_purged_mode= find_type_or_exit(argument, &set_gtid_purged_mode_typelib, opt->name)-1; break; } case (int) OPT_MYSQLDUMP_IGNORE_ERROR: /* Store the supplied list of errors into an array. */ if (parse_ignore_error()) exit(EX_EOM); break; } return 0; } static int get_options(int *argc, char ***argv) { int ho_error; MYSQL_PARAMETERS *mysql_params= mysql_get_parameters(); opt_max_allowed_packet= *mysql_params->p_max_allowed_packet; opt_net_buffer_length= *mysql_params->p_net_buffer_length; md_result_file= stdout; my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,argc,argv)) return 1; my_getopt_use_args_separator= FALSE; defaults_argv= *argv; if (my_hash_init(&ignore_table, charset_info, 16, 0, 0, (my_hash_get_key) get_table_key, my_free, 0)) return(EX_EOM); /* Don't copy internal log tables */ if (my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.apply_status", MYF(MY_WME))) || my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.schema", MYF(MY_WME))) || my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.general_log", MYF(MY_WME))) || my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, "mysql.slow_log", MYF(MY_WME)))) return(EX_EOM); if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) return(ho_error); *mysql_params->p_max_allowed_packet= opt_max_allowed_packet; *mysql_params->p_net_buffer_length= opt_net_buffer_length; if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (!path && (enclosed || opt_enclosed || escaped || lines_terminated || fields_terminated)) { fprintf(stderr, "%s: You must use option --tab with --fields-...\n", my_progname); return(EX_USAGE); } /* We don't delete master logs if slave data option */ if (opt_slave_data) { opt_lock_all_tables= !opt_single_transaction; opt_master_data= 0; opt_delete_master_logs= 0; } /* Ensure consistency of the set of binlog & locking options */ if (opt_delete_master_logs && !opt_master_data) opt_master_data= MYSQL_OPT_MASTER_DATA_COMMENTED_SQL; if (opt_single_transaction && opt_lock_all_tables) { fprintf(stderr, "%s: You can't use --single-transaction and " "--lock-all-tables at the same time.\n", my_progname); return(EX_USAGE); } if (opt_master_data) { opt_lock_all_tables= !opt_single_transaction; opt_slave_data= 0; } if (opt_single_transaction || opt_lock_all_tables) lock_tables= 0; if (enclosed && opt_enclosed) { fprintf(stderr, "%s: You can't use ..enclosed.. and ..optionally-enclosed.. at the same time.\n", my_progname); return(EX_USAGE); } if ((opt_databases || opt_alldbs) && path) { fprintf(stderr, "%s: --databases or --all-databases can't be used with --tab.\n", my_progname); return(EX_USAGE); } if (strcmp(default_charset, charset_info->csname) && !(charset_info= get_charset_by_csname(default_charset, MY_CS_PRIMARY, MYF(MY_WME)))) exit(1); if ((*argc < 1 && !opt_alldbs) || (*argc > 0 && opt_alldbs)) { short_usage(); return EX_USAGE; } if (tty_password) opt_password=get_tty_password(NullS); return(0); } /* get_options */ /* ** DB_error -- prints mysql error message and exits the program. */ static void DB_error(MYSQL *mysql_arg, const char *when) { DBUG_ENTER("DB_error"); maybe_die(EX_MYSQLERR, "Got error: %d: %s %s", mysql_errno(mysql_arg), mysql_error(mysql_arg), when); DBUG_VOID_RETURN; } /* Prints out an error message and kills the process. SYNOPSIS die() error_num - process return value fmt_reason - a format string for use by my_vsnprintf. ... - variable arguments for above fmt_reason string DESCRIPTION This call prints out the formatted error message to stderr and then terminates the process. */ static void die(int error_num, const char* fmt_reason, ...) { char buffer[1000]; va_list args; va_start(args,fmt_reason); my_vsnprintf(buffer, sizeof(buffer), fmt_reason, args); va_end(args); fprintf(stderr, "%s: %s\n", my_progname, buffer); fflush(stderr); /* force the exit */ opt_force= 0; if (opt_ignore_error) my_free(opt_ignore_error); opt_ignore_error= 0; maybe_exit(error_num); } /* Prints out an error message and maybe kills the process. SYNOPSIS maybe_die() error_num - process return value fmt_reason - a format string for use by my_vsnprintf. ... - variable arguments for above fmt_reason string DESCRIPTION This call prints out the formatted error message to stderr and then terminates the process, unless the --force command line option is used. This call should be used for non-fatal errors (such as database errors) that the code may still be able to continue to the next unit of work. */ static void maybe_die(int error_num, const char* fmt_reason, ...) { char buffer[1000]; va_list args; va_start(args,fmt_reason); my_vsnprintf(buffer, sizeof(buffer), fmt_reason, args); va_end(args); fprintf(stderr, "%s: %s\n", my_progname, buffer); fflush(stderr); maybe_exit(error_num); } /* Sends a query to server, optionally reads result, prints error message if some. SYNOPSIS mysql_query_with_error_report() mysql_con connection to use res if non zero, result will be put there with mysql_store_result() query query to send to server RETURN VALUES 0 query sending and (if res!=0) result reading went ok 1 error */ static int mysql_query_with_error_report(MYSQL *mysql_con, MYSQL_RES **res, const char *query) { if (mysql_query(mysql_con, query) || (res && !((*res)= mysql_store_result(mysql_con)))) { maybe_die(EX_MYSQLERR, "Couldn't execute '%s': %s (%d)", query, mysql_error(mysql_con), mysql_errno(mysql_con)); return 1; } return 0; } static int fetch_db_collation(const char *db_name, char *db_cl_name, int db_cl_size) { my_bool err_status= FALSE; char query[QUERY_LENGTH]; MYSQL_RES *db_cl_res; MYSQL_ROW db_cl_row; char quoted_database_buf[NAME_LEN*2+3]; char *qdatabase= quote_name(db_name, quoted_database_buf, 1); my_snprintf(query, sizeof (query), "use %s", qdatabase); if (mysql_query_with_error_report(mysql, NULL, query)) return 1; if (mysql_query_with_error_report(mysql, &db_cl_res, "select @@collation_database")) return 1; do { if (mysql_num_rows(db_cl_res) != 1) { err_status= TRUE; break; } if (!(db_cl_row= mysql_fetch_row(db_cl_res))) { err_status= TRUE; break; } strncpy(db_cl_name, db_cl_row[0], db_cl_size); db_cl_name[db_cl_size - 1]= 0; /* just in case. */ } while (FALSE); mysql_free_result(db_cl_res); return err_status ? 1 : 0; } static char *my_case_str(const char *str, uint str_len, const char *token, uint token_len) { my_match_t match; uint status= my_charset_latin1.coll->instr(&my_charset_latin1, str, str_len, token, token_len, &match, 1); return status ? (char *) str + match.end : NULL; } static int switch_db_collation(FILE *sql_file, const char *db_name, const char *delimiter, const char *current_db_cl_name, const char *required_db_cl_name, int *db_cl_altered) { if (strcmp(current_db_cl_name, required_db_cl_name) != 0) { char quoted_db_buf[NAME_LEN * 2 + 3]; char *quoted_db_name= quote_name(db_name, quoted_db_buf, FALSE); CHARSET_INFO *db_cl= get_charset_by_name(required_db_cl_name, MYF(0)); if (!db_cl) return 1; fprintf(sql_file, "ALTER DATABASE %s CHARACTER SET %s COLLATE %s %s\n", (const char *) quoted_db_name, (const char *) db_cl->csname, (const char *) db_cl->name, (const char *) delimiter); *db_cl_altered= 1; return 0; } *db_cl_altered= 0; return 0; } static int restore_db_collation(FILE *sql_file, const char *db_name, const char *delimiter, const char *db_cl_name) { char quoted_db_buf[NAME_LEN * 2 + 3]; char *quoted_db_name= quote_name(db_name, quoted_db_buf, FALSE); CHARSET_INFO *db_cl= get_charset_by_name(db_cl_name, MYF(0)); if (!db_cl) return 1; fprintf(sql_file, "ALTER DATABASE %s CHARACTER SET %s COLLATE %s %s\n", (const char *) quoted_db_name, (const char *) db_cl->csname, (const char *) db_cl->name, (const char *) delimiter); return 0; } static void switch_cs_variables(FILE *sql_file, const char *delimiter, const char *character_set_client, const char *character_set_results, const char *collation_connection) { fprintf(sql_file, "/*!50003 SET @saved_cs_client = @@character_set_client */ %s\n" "/*!50003 SET @saved_cs_results = @@character_set_results */ %s\n" "/*!50003 SET @saved_col_connection = @@collation_connection */ %s\n" "/*!50003 SET character_set_client = %s */ %s\n" "/*!50003 SET character_set_results = %s */ %s\n" "/*!50003 SET collation_connection = %s */ %s\n", (const char *) delimiter, (const char *) delimiter, (const char *) delimiter, (const char *) character_set_client, (const char *) delimiter, (const char *) character_set_results, (const char *) delimiter, (const char *) collation_connection, (const char *) delimiter); } static void restore_cs_variables(FILE *sql_file, const char *delimiter) { fprintf(sql_file, "/*!50003 SET character_set_client = @saved_cs_client */ %s\n" "/*!50003 SET character_set_results = @saved_cs_results */ %s\n" "/*!50003 SET collation_connection = @saved_col_connection */ %s\n", (const char *) delimiter, (const char *) delimiter, (const char *) delimiter); } static void switch_sql_mode(FILE *sql_file, const char *delimiter, const char *sql_mode) { fprintf(sql_file, "/*!50003 SET @saved_sql_mode = @@sql_mode */ %s\n" "/*!50003 SET sql_mode = '%s' */ %s\n", (const char *) delimiter, (const char *) sql_mode, (const char *) delimiter); } static void restore_sql_mode(FILE *sql_file, const char *delimiter) { fprintf(sql_file, "/*!50003 SET sql_mode = @saved_sql_mode */ %s\n", (const char *) delimiter); } static void switch_time_zone(FILE *sql_file, const char *delimiter, const char *time_zone) { fprintf(sql_file, "/*!50003 SET @saved_time_zone = @@time_zone */ %s\n" "/*!50003 SET time_zone = '%s' */ %s\n", (const char *) delimiter, (const char *) time_zone, (const char *) delimiter); } static void restore_time_zone(FILE *sql_file, const char *delimiter) { fprintf(sql_file, "/*!50003 SET time_zone = @saved_time_zone */ %s\n", (const char *) delimiter); } /** Switch charset for results to some specified charset. If the server does not support character_set_results variable, nothing can be done here. As for whether something should be done here, future new callers of this function should be aware that the server lacking the facility of switching charsets is treated as success. @note If the server lacks support, then nothing is changed and no error condition is returned. @returns whether there was an error or not */ static int switch_character_set_results(MYSQL *mysql, const char *cs_name) { char query_buffer[QUERY_LENGTH]; size_t query_length; /* Server lacks facility. This is not an error, by arbitrary decision . */ if (!server_supports_switching_charsets) return FALSE; query_length= my_snprintf(query_buffer, sizeof (query_buffer), "SET SESSION character_set_results = '%s'", (const char *) cs_name); return mysql_real_query(mysql, query_buffer, query_length); } /** Rewrite statement, enclosing DEFINER clause in version-specific comment. This function parses any CREATE statement and encloses DEFINER-clause in version-specific comment: input query: CREATE DEFINER=a@b FUNCTION ... rewritten query: CREATE * / / *!50020 DEFINER=a@b * / / *!50003 FUNCTION ... @note This function will go away when WL#3995 is implemented. @param[in] stmt_str CREATE statement string. @param[in] stmt_length Length of the stmt_str. @param[in] definer_version_str Minimal MySQL version number when DEFINER clause is supported in the given statement. @param[in] definer_version_length Length of definer_version_str. @param[in] stmt_version_str Minimal MySQL version number when the given statement is supported. @param[in] stmt_version_length Length of stmt_version_str. @param[in] keyword_str Keyword to look for after CREATE. @param[in] keyword_length Length of keyword_str. @return pointer to the new allocated query string. */ static char *cover_definer_clause(const char *stmt_str, uint stmt_length, const char *definer_version_str, uint definer_version_length, const char *stmt_version_str, uint stmt_version_length, const char *keyword_str, uint keyword_length) { char *definer_begin= my_case_str(stmt_str, stmt_length, C_STRING_WITH_LEN(" DEFINER")); char *definer_end= NULL; char *query_str= NULL; char *query_ptr; if (!definer_begin) return NULL; definer_end= my_case_str(definer_begin, strlen(definer_begin), keyword_str, keyword_length); if (!definer_end) return NULL; /* Allocate memory for new query string: original string from SHOW statement and version-specific comments. */ query_str= alloc_query_str(stmt_length + 23); query_ptr= my_stpncpy(query_str, stmt_str, definer_begin - stmt_str); query_ptr= my_stpncpy(query_ptr, C_STRING_WITH_LEN("*/ /*!")); query_ptr= my_stpncpy(query_ptr, definer_version_str, definer_version_length); query_ptr= my_stpncpy(query_ptr, definer_begin, definer_end - definer_begin); query_ptr= my_stpncpy(query_ptr, C_STRING_WITH_LEN("*/ /*!")); query_ptr= my_stpncpy(query_ptr, stmt_version_str, stmt_version_length); query_ptr= strxmov(query_ptr, definer_end, NullS); return query_str; } /* Open a new .sql file to dump the table or view into SYNOPSIS open_sql_file_for_table name name of the table or view flags flags (as per "man 2 open") RETURN VALUES 0 Failed to open file > 0 Handle of the open file */ static FILE* open_sql_file_for_table(const char* table, int flags) { FILE* res; char filename[FN_REFLEN], tmp_path[FN_REFLEN]; convert_dirname(tmp_path,path,NullS); res= my_fopen(fn_format(filename, table, tmp_path, ".sql", 4), flags, MYF(MY_WME)); return res; } static void free_resources() { if (md_result_file && md_result_file != stdout) my_fclose(md_result_file, MYF(0)); my_free(opt_password); if (my_hash_inited(&ignore_table)) my_hash_free(&ignore_table); if (extended_insert) dynstr_free(&extended_row); if (insert_pat_inited) dynstr_free(&insert_pat); if (defaults_argv) free_defaults(defaults_argv); if (opt_ignore_error) my_free(opt_ignore_error); delete_dynamic(&ignore_error); my_end(my_end_arg); } /** Parse the list of error numbers to be ignored and store into a dynamic array. @return Operation status @retval 0 Success @retval >0 Failure */ static int parse_ignore_error() { const char *search= ","; char *token; uint my_err; DBUG_ENTER("parse_ignore_error"); if (my_init_dynamic_array(&ignore_error, sizeof(uint), 12, 12)) goto error; token= strtok(opt_ignore_error, search); while (token != NULL) { my_err= atoi(token); // filter out 0s, if any if (my_err != 0) { if (insert_dynamic(&ignore_error, &my_err)) goto error; } token= strtok(NULL, search); } DBUG_RETURN(0); error: DBUG_RETURN(EX_EOM); } /** Check if the last error should be ignored. @retval 1 yes 0 no */ static my_bool do_ignore_error() { uint i, last_errno, *my_err; my_bool found= 0; DBUG_ENTER("do_ignore_error"); last_errno= mysql_errno(mysql); if (last_errno == 0) goto done; for (i= 0; i < ignore_error.elements; i++) { my_err= dynamic_element(&ignore_error, i, uint *); if (last_errno == *my_err) { found= 1; break; } } done: DBUG_RETURN(found); } static void maybe_exit(int error) { if (!first_error) first_error= error; /* Return if --force is used; else return only if the last error number is in the list of error numbers specified using --ignore-error option. */ if (opt_force || (opt_ignore_error && do_ignore_error())) return; if (mysql) mysql_close(mysql); free_resources(); exit(error); } /* db_connect -- connects to the host and selects DB. */ static int connect_to_db(char *host, char *user,char *passwd) { char buff[20+FN_REFLEN]; DBUG_ENTER("connect_to_db"); verbose_msg("-- Connecting to %s...\n", host ? host : "localhost"); mysql_init(&mysql_connection); if (opt_compress) mysql_options(&mysql_connection,MYSQL_OPT_COMPRESS,NullS); #ifdef HAVE_OPENSSL if (opt_use_ssl) { mysql_ssl_set(&mysql_connection, opt_ssl_key, opt_ssl_cert, opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher); mysql_options(&mysql_connection, MYSQL_OPT_SSL_CRL, opt_ssl_crl); mysql_options(&mysql_connection, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath); } mysql_options(&mysql_connection,MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (char*)&opt_ssl_verify_server_cert); #endif if (opt_protocol) mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(&mysql_connection,MYSQL_OPT_BIND,opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset); if (opt_plugin_dir && *opt_plugin_dir) mysql_options(&mysql_connection, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(&mysql_connection, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqldump"); if (!(mysql= mysql_real_connect(&mysql_connection,host,user,passwd, NULL,opt_mysql_port,opt_mysql_unix_port, 0))) { DB_error(&mysql_connection, "when trying to connect"); DBUG_RETURN(1); } if ((mysql_get_server_version(&mysql_connection) < 40100) || (opt_compatible_mode & 3)) { /* Don't dump SET NAMES with a pre-4.1 server (bug#7997). */ opt_set_charset= 0; /* Don't switch charsets for 4.1 and earlier. (bug#34192). */ server_supports_switching_charsets= FALSE; } /* As we're going to set SQL_MODE, it would be lost on reconnect, so we cannot reconnect. */ mysql->reconnect= 0; my_snprintf(buff, sizeof(buff), "/*!40100 SET @@SQL_MODE='%s' */", compatible_mode_normal_str); if (mysql_query_with_error_report(mysql, 0, buff)) DBUG_RETURN(1); /* set time_zone to UTC to allow dumping date types between servers with different time zone settings */ if (opt_tz_utc) { my_snprintf(buff, sizeof(buff), "/*!40103 SET TIME_ZONE='+00:00' */"); if (mysql_query_with_error_report(mysql, 0, buff)) DBUG_RETURN(1); } DBUG_RETURN(0); } /* connect_to_db */ /* ** dbDisconnect -- disconnects from the host. */ static void dbDisconnect(char *host) { verbose_msg("-- Disconnecting from %s...\n", host ? host : "localhost"); mysql_close(mysql); } /* dbDisconnect */ static void unescape(FILE *file,char *pos,uint length) { char *tmp; DBUG_ENTER("unescape"); if (!(tmp=(char*) my_malloc(PSI_NOT_INSTRUMENTED, length*2+1, MYF(MY_WME)))) die(EX_MYSQLERR, "Couldn't allocate memory"); mysql_real_escape_string(&mysql_connection, tmp, pos, length); fputc('\'', file); fputs(tmp, file); fputc('\'', file); check_io(file); my_free(tmp); DBUG_VOID_RETURN; } /* unescape */ static my_bool test_if_special_chars(const char *str) { #if MYSQL_VERSION_ID >= 32300 for ( ; *str ; str++) if (!my_isvar(charset_info,*str) && *str != '$') return 1; #endif return 0; } /* test_if_special_chars */ /* quote_name(name, buff, force) Quotes char string, taking into account compatible mode Args name Unquoted string containing that which will be quoted buff The buffer that contains the quoted value, also returned force Flag to make it ignore 'test_if_special_chars' Returns buff quoted string */ static char *quote_name(const char *name, char *buff, my_bool force) { char *to= buff; char qtype= (opt_compatible_mode & MASK_ANSI_QUOTES) ? '\"' : '`'; if (!force && !opt_quoted && !test_if_special_chars(name)) return (char*) name; *to++= qtype; while (*name) { if (*name == qtype) *to++= qtype; *to++= *name++; } to[0]= qtype; to[1]= 0; return buff; } /* quote_name */ /* Quote a table name so it can be used in "SHOW TABLES LIKE <tabname>" SYNOPSIS quote_for_like() name name of the table buff quoted name of the table DESCRIPTION Quote \, _, ' and % characters Note: Because MySQL uses the C escape syntax in strings (for example, '\n' to represent newline), you must double any '\' that you use in your LIKE strings. For example, to search for '\n', specify it as '\\n'. To search for '\', specify it as '\\\\' (the backslashes are stripped once by the parser and another time when the pattern match is done, leaving a single backslash to be matched). Example: "t\1" => "t\\\\1" */ static char *quote_for_like(const char *name, char *buff) { char *to= buff; *to++= '\''; while (*name) { if (*name == '\\') { *to++='\\'; *to++='\\'; *to++='\\'; } else if (*name == '\'' || *name == '_' || *name == '%') *to++= '\\'; *to++= *name++; } to[0]= '\''; to[1]= 0; return buff; } /** Quote and print a string. @param xml_file - Output file. @param str - String to print. @param len - Its length. @param is_attribute_name - A check for attribute name or value. @description Quote '<' '>' '&' '\"' chars and print a string to the xml_file. */ static void print_quoted_xml(FILE *xml_file, const char *str, ulong len, my_bool is_attribute_name) { const char *end; for (end= str + len; str != end; str++) { switch (*str) { case '<': fputs("&lt;", xml_file); break; case '>': fputs("&gt;", xml_file); break; case '&': fputs("&amp;", xml_file); break; case '\"': fputs("&quot;", xml_file); break; case ' ': /* Attribute names cannot contain spaces. */ if (is_attribute_name) { fputs("_", xml_file); break; } /* fall through */ default: fputc(*str, xml_file); break; } } check_io(xml_file); } /* Print xml tag. Optionally add attribute(s). SYNOPSIS print_xml_tag(xml_file, sbeg, send, tag_name, first_attribute_name, ..., attribute_name_n, attribute_value_n, NullS) xml_file - output file sbeg - line beginning line_end - line ending tag_name - XML tag name. first_attribute_name - tag and first attribute first_attribute_value - (Implied) value of first attribute attribute_name_n - attribute n attribute_value_n - value of attribute n DESCRIPTION Print XML tag with any number of attribute="value" pairs to the xml_file. Format is: sbeg<tag_name first_attribute_name="first_attribute_value" ... attribute_name_n="attribute_value_n">send NOTE Additional arguments must be present in attribute/value pairs. The last argument should be the null character pointer. All attribute_value arguments MUST be NULL terminated strings. All attribute_value arguments will be quoted before output. */ static void print_xml_tag(FILE * xml_file, const char* sbeg, const char* line_end, const char* tag_name, const char* first_attribute_name, ...) { va_list arg_list; const char *attribute_name, *attribute_value; fputs(sbeg, xml_file); fputc('<', xml_file); fputs(tag_name, xml_file); va_start(arg_list, first_attribute_name); attribute_name= first_attribute_name; while (attribute_name != NullS) { attribute_value= va_arg(arg_list, char *); DBUG_ASSERT(attribute_value != NullS); fputc(' ', xml_file); fputs(attribute_name, xml_file); fputc('\"', xml_file); print_quoted_xml(xml_file, attribute_value, strlen(attribute_value), 0); fputc('\"', xml_file); attribute_name= va_arg(arg_list, char *); } va_end(arg_list); fputc('>', xml_file); fputs(line_end, xml_file); check_io(xml_file); } /* Print xml tag with for a field that is null SYNOPSIS print_xml_null_tag() xml_file - output file sbeg - line beginning stag_atr - tag and attribute sval - value of attribute line_end - line ending DESCRIPTION Print tag with one attribute to the xml_file. Format is: <stag_atr="sval" xsi:nil="true"/> NOTE sval MUST be a NULL terminated string. sval string will be qouted before output. */ static void print_xml_null_tag(FILE * xml_file, const char* sbeg, const char* stag_atr, const char* sval, const char* line_end) { fputs(sbeg, xml_file); fputs("<", xml_file); fputs(stag_atr, xml_file); fputs("\"", xml_file); print_quoted_xml(xml_file, sval, strlen(sval), 0); fputs("\" xsi:nil=\"true\" />", xml_file); fputs(line_end, xml_file); check_io(xml_file); } /** Print xml CDATA section. @param xml_file - output file @param str - string to print @param len - length of the string @note This function also takes care of the presence of '[[>' string in the str. If found, the CDATA section is broken into two CDATA sections, <![CDATA[]]]]> and <![CDATA[>]]. */ static void print_xml_cdata(FILE *xml_file, const char *str, ulong len) { const char *end; fputs("<![CDATA[\n", xml_file); for (end= str + len; str != end; str++) { switch(*str) { case ']': if ((*(str + 1) == ']') && (*(str + 2) =='>')) { fputs("]]]]><![CDATA[>", xml_file); str += 2; continue; } /* fall through */ default: fputc(*str, xml_file); break; } } fputs("\n]]>\n", xml_file); check_io(xml_file); } /* Print xml tag with many attributes. SYNOPSIS print_xml_row() xml_file - output file row_name - xml tag name tableRes - query result row - result row str_create - create statement header string DESCRIPTION Print tag with many attribute to the xml_file. Format is: \t\t<row_name Atr1="Val1" Atr2="Val2"... /> NOTE All atributes and values will be quoted before output. */ static void print_xml_row(FILE *xml_file, const char *row_name, MYSQL_RES *tableRes, MYSQL_ROW *row, const char *str_create) { uint i; char *create_stmt_ptr= NULL; ulong create_stmt_len= 0; MYSQL_FIELD *field; ulong *lengths= mysql_fetch_lengths(tableRes); fprintf(xml_file, "\t\t<%s", row_name); check_io(xml_file); mysql_field_seek(tableRes, 0); for (i= 0; (field= mysql_fetch_field(tableRes)); i++) { if ((*row)[i]) { /* For 'create' statements, dump using CDATA. */ if ((str_create) && (strcmp(str_create, field->name) == 0)) { create_stmt_ptr= (*row)[i]; create_stmt_len= lengths[i]; } else { fputc(' ', xml_file); print_quoted_xml(xml_file, field->name, field->name_length, 1); fputs("=\"", xml_file); print_quoted_xml(xml_file, (*row)[i], lengths[i], 0); fputc('"', xml_file); check_io(xml_file); } } } if (create_stmt_len) { fputs(">\n", xml_file); print_xml_cdata(xml_file, create_stmt_ptr, create_stmt_len); fprintf(xml_file, "\t\t</%s>\n", row_name); } else fputs(" />\n", xml_file); check_io(xml_file); } /** Print xml comments. @param xml_file - output file @param len - length of comment message @param comment_string - comment message @description Print the comment message in the format: "<!-- \n comment string \n -->\n" @note Any occurrence of continuous hyphens will be squeezed to a single hyphen. */ static void print_xml_comment(FILE *xml_file, ulong len, const char *comment_string) { const char* end; fputs("<!-- ", xml_file); for (end= comment_string + len; comment_string != end; comment_string++) { /* The string "--" (double-hyphen) MUST NOT occur within xml comments. */ switch (*comment_string) { case '-': if (*(comment_string + 1) == '-') /* Only one hyphen allowed. */ break; default: fputc(*comment_string, xml_file); break; } } fputs(" -->\n", xml_file); check_io(xml_file); } /* A common printing function for xml and non-xml modes. */ static void print_comment(FILE *sql_file, my_bool is_error, const char *format, ...) { static char comment_buff[COMMENT_LENGTH]; va_list args; /* If its an error message, print it ignoring opt_comments. */ if (!is_error && !opt_comments) return; va_start(args, format); my_vsnprintf(comment_buff, COMMENT_LENGTH, format, args); va_end(args); if (!opt_xml) { fputs(comment_buff, sql_file); check_io(sql_file); return; } print_xml_comment(sql_file, strlen(comment_buff), comment_buff); } /* create_delimiter Generate a new (null-terminated) string that does not exist in query and is therefore suitable for use as a query delimiter. Store this delimiter in delimiter_buff . This is quite simple in that it doesn't even try to parse statements as an interpreter would. It merely returns a string that is not in the query, which is much more than adequate for constructing a delimiter. RETURN ptr to the delimiter on Success NULL on Failure */ static char *create_delimiter(char *query, char *delimiter_buff, int delimiter_max_size) { int proposed_length; char *presence; delimiter_buff[0]= ';'; /* start with one semicolon, and */ for (proposed_length= 2; proposed_length < delimiter_max_size; delimiter_max_size++) { delimiter_buff[proposed_length-1]= ';'; /* add semicolons, until */ delimiter_buff[proposed_length]= '\0'; presence = strstr(query, delimiter_buff); if (presence == NULL) { /* the proposed delimiter is not in the query. */ return delimiter_buff; } } return NULL; /* but if we run out of space, return nothing at all. */ } /* dump_events_for_db -- retrieves list of events for a given db, and prints out the CREATE EVENT statement into the output (the dump). RETURN 0 Success 1 Error */ static uint dump_events_for_db(char *db) { char query_buff[QUERY_LENGTH]; char db_name_buff[NAME_LEN*2+3], name_buff[NAME_LEN*2+3]; char *event_name; char delimiter[QUERY_LENGTH]; FILE *sql_file= md_result_file; MYSQL_RES *event_res, *event_list_res; MYSQL_ROW row, event_list_row; char db_cl_name[MY_CS_NAME_SIZE]; int db_cl_altered= FALSE; DBUG_ENTER("dump_events_for_db"); DBUG_PRINT("enter", ("db: '%s'", db)); mysql_real_escape_string(mysql, db_name_buff, db, strlen(db)); /* nice comments */ print_comment(sql_file, 0, "\n--\n-- Dumping events for database '%s'\n--\n", db); /* not using "mysql_query_with_error_report" because we may have not enough privileges to lock mysql.events. */ if (lock_tables) mysql_query(mysql, "LOCK TABLES mysql.event READ"); if (mysql_query_with_error_report(mysql, &event_list_res, "show events")) DBUG_RETURN(0); strcpy(delimiter, ";"); if (mysql_num_rows(event_list_res) > 0) { if (opt_xml) fputs("\t<events>\n", sql_file); else { fprintf(sql_file, "/*!50106 SET @save_time_zone= @@TIME_ZONE */ ;\n"); /* Get database collation. */ if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) DBUG_RETURN(1); } if (switch_character_set_results(mysql, "binary")) DBUG_RETURN(1); while ((event_list_row= mysql_fetch_row(event_list_res)) != NULL) { event_name= quote_name(event_list_row[1], name_buff, 0); DBUG_PRINT("info", ("retrieving CREATE EVENT for %s", name_buff)); my_snprintf(query_buff, sizeof(query_buff), "SHOW CREATE EVENT %s", event_name); if (mysql_query_with_error_report(mysql, &event_res, query_buff)) DBUG_RETURN(1); while ((row= mysql_fetch_row(event_res)) != NULL) { if (opt_xml) { print_xml_row(sql_file, "event", event_res, &row, "Create Event"); continue; } /* if the user has EXECUTE privilege he can see event names, but not the event body! */ if (strlen(row[3]) != 0) { char *query_str; if (opt_drop) fprintf(sql_file, "/*!50106 DROP EVENT IF EXISTS %s */%s\n", event_name, delimiter); if (create_delimiter(row[3], delimiter, sizeof(delimiter)) == NULL) { fprintf(stderr, "%s: Warning: Can't create delimiter for event '%s'\n", my_progname, event_name); DBUG_RETURN(1); } fprintf(sql_file, "DELIMITER %s\n", delimiter); if (mysql_num_fields(event_res) >= 7) { if (switch_db_collation(sql_file, db_name_buff, delimiter, db_cl_name, row[6], &db_cl_altered)) { DBUG_RETURN(1); } switch_cs_variables(sql_file, delimiter, row[4], /* character_set_client */ row[4], /* character_set_results */ row[5]); /* collation_connection */ } else { /* mysqldump is being run against the server, that does not provide character set information in SHOW CREATE statements. NOTE: the dump may be incorrect, since character set information is required in order to restore event properly. */ fprintf(sql_file, "--\n" "-- WARNING: old server version. " "The following dump may be incomplete.\n" "--\n"); } switch_sql_mode(sql_file, delimiter, row[1]); switch_time_zone(sql_file, delimiter, row[2]); query_str= cover_definer_clause(row[3], strlen(row[3]), C_STRING_WITH_LEN("50117"), C_STRING_WITH_LEN("50106"), C_STRING_WITH_LEN(" EVENT")); fprintf(sql_file, "/*!50106 %s */ %s\n", (const char *) (query_str != NULL ? query_str : row[3]), (const char *) delimiter); restore_time_zone(sql_file, delimiter); restore_sql_mode(sql_file, delimiter); if (mysql_num_fields(event_res) >= 7) { restore_cs_variables(sql_file, delimiter); if (db_cl_altered) { if (restore_db_collation(sql_file, db_name_buff, delimiter, db_cl_name)) DBUG_RETURN(1); } } } } /* end of event printing */ mysql_free_result(event_res); } /* end of list of events */ if (opt_xml) { fputs("\t</events>\n", sql_file); check_io(sql_file); } else { fprintf(sql_file, "DELIMITER ;\n"); fprintf(sql_file, "/*!50106 SET TIME_ZONE= @save_time_zone */ ;\n"); } if (switch_character_set_results(mysql, default_charset)) DBUG_RETURN(1); } mysql_free_result(event_list_res); if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); DBUG_RETURN(0); } /* Print hex value for blob data. SYNOPSIS print_blob_as_hex() output_file - output file str - string to print len - its length DESCRIPTION Print hex value for blob data. */ static void print_blob_as_hex(FILE *output_file, const char *str, ulong len) { /* sakaik got the idea to to provide blob's in hex notation. */ const char *ptr= str, *end= ptr + len; for (; ptr < end ; ptr++) fprintf(output_file, "%02X", *((uchar *)ptr)); check_io(output_file); } /* dump_routines_for_db -- retrieves list of routines for a given db, and prints out the CREATE PROCEDURE definition into the output (the dump). This function has logic to print the appropriate syntax depending on whether this is a procedure or functions RETURN 0 Success 1 Error */ static uint dump_routines_for_db(char *db) { char query_buff[QUERY_LENGTH]; const char *routine_type[]= {"FUNCTION", "PROCEDURE"}; char db_name_buff[NAME_LEN*2+3], name_buff[NAME_LEN*2+3]; char *routine_name; int i; FILE *sql_file= md_result_file; MYSQL_RES *routine_res, *routine_list_res; MYSQL_ROW row, routine_list_row; char db_cl_name[MY_CS_NAME_SIZE]; int db_cl_altered= FALSE; DBUG_ENTER("dump_routines_for_db"); DBUG_PRINT("enter", ("db: '%s'", db)); mysql_real_escape_string(mysql, db_name_buff, db, strlen(db)); /* nice comments */ print_comment(sql_file, 0, "\n--\n-- Dumping routines for database '%s'\n--\n", db); /* not using "mysql_query_with_error_report" because we may have not enough privileges to lock mysql.proc. */ if (lock_tables) mysql_query(mysql, "LOCK TABLES mysql.proc READ"); /* Get database collation. */ if (fetch_db_collation(db_name_buff, db_cl_name, sizeof (db_cl_name))) DBUG_RETURN(1); if (switch_character_set_results(mysql, "binary")) DBUG_RETURN(1); if (opt_xml) fputs("\t<routines>\n", sql_file); /* 0, retrieve and dump functions, 1, procedures */ for (i= 0; i <= 1; i++) { my_snprintf(query_buff, sizeof(query_buff), "SHOW %s STATUS WHERE Db = '%s'", routine_type[i], db_name_buff); if (mysql_query_with_error_report(mysql, &routine_list_res, query_buff)) DBUG_RETURN(1); if (mysql_num_rows(routine_list_res)) { while ((routine_list_row= mysql_fetch_row(routine_list_res))) { routine_name= quote_name(routine_list_row[1], name_buff, 0); DBUG_PRINT("info", ("retrieving CREATE %s for %s", routine_type[i], name_buff)); my_snprintf(query_buff, sizeof(query_buff), "SHOW CREATE %s %s", routine_type[i], routine_name); if (mysql_query_with_error_report(mysql, &routine_res, query_buff)) DBUG_RETURN(1); while ((row= mysql_fetch_row(routine_res))) { /* if the user has EXECUTE privilege he see routine names, but NOT the routine body of other routines that are not the creator of! */ DBUG_PRINT("info",("length of body for %s row[2] '%s' is %d", routine_name, row[2] ? row[2] : "(null)", row[2] ? (int) strlen(row[2]) : 0)); if (row[2] == NULL) { print_comment(sql_file, 1, "\n-- insufficient privileges to %s\n", query_buff); print_comment(sql_file, 1, "-- does %s have permissions on mysql.proc?\n\n", current_user); maybe_die(EX_MYSQLERR,"%s has insufficent privileges to %s!", current_user, query_buff); } else if (strlen(row[2])) { if (opt_xml) { if (i) // Procedures. print_xml_row(sql_file, "routine", routine_res, &row, "Create Procedure"); else // Functions. print_xml_row(sql_file, "routine", routine_res, &row, "Create Function"); continue; } if (opt_drop) fprintf(sql_file, "/*!50003 DROP %s IF EXISTS %s */;\n", routine_type[i], routine_name); if (mysql_num_fields(routine_res) >= 6) { if (switch_db_collation(sql_file, db_name_buff, ";", db_cl_name, row[5], &db_cl_altered)) { DBUG_RETURN(1); } switch_cs_variables(sql_file, ";", row[3], /* character_set_client */ row[3], /* character_set_results */ row[4]); /* collation_connection */ } else { /* mysqldump is being run against the server, that does not provide character set information in SHOW CREATE statements. NOTE: the dump may be incorrect, since character set information is required in order to restore stored procedure/function properly. */ fprintf(sql_file, "--\n" "-- WARNING: old server version. " "The following dump may be incomplete.\n" "--\n"); } switch_sql_mode(sql_file, ";", row[1]); fprintf(sql_file, "DELIMITER ;;\n" "%s ;;\n" "DELIMITER ;\n", (const char *) row[2]); restore_sql_mode(sql_file, ";"); if (mysql_num_fields(routine_res) >= 6) { restore_cs_variables(sql_file, ";"); if (db_cl_altered) { if (restore_db_collation(sql_file, db_name_buff, ";", db_cl_name)) DBUG_RETURN(1); } } } } /* end of routine printing */ mysql_free_result(routine_res); } /* end of list of routines */ } mysql_free_result(routine_list_res); } /* end of for i (0 .. 1) */ if (opt_xml) { fputs("\t</routines>\n", sql_file); check_io(sql_file); } if (switch_character_set_results(mysql, default_charset)) DBUG_RETURN(1); if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); DBUG_RETURN(0); } /* general_log or slow_log tables under mysql database */ static inline my_bool general_log_or_slow_log_tables(const char *db, const char *table) { return (!my_strcasecmp(charset_info, db, "mysql")) && (!my_strcasecmp(charset_info, table, "general_log") || !my_strcasecmp(charset_info, table, "slow_log")); } /* get_table_structure -- retrievs database structure, prints out corresponding CREATE statement and fills out insert_pat if the table is the type we will be dumping. ARGS table - table name db - db name table_type - table type, e.g. "MyISAM" or "InnoDB", but also "VIEW" ignore_flag - what we must particularly ignore - see IGNORE_ defines above RETURN number of fields in table, 0 if error */ static uint get_table_structure(char *table, char *db, char *table_type, char *ignore_flag) { my_bool init=0, write_data, complete_insert; my_ulonglong num_fields; char *result_table, *opt_quoted_table; const char *insert_option; char name_buff[NAME_LEN+3],table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3], query_buff[QUERY_LENGTH]; const char *show_fields_stmt= "SELECT `COLUMN_NAME` AS `Field`, " "`COLUMN_TYPE` AS `Type`, " "`IS_NULLABLE` AS `Null`, " "`COLUMN_KEY` AS `Key`, " "`COLUMN_DEFAULT` AS `Default`, " "`EXTRA` AS `Extra`, " "`COLUMN_COMMENT` AS `Comment` " "FROM `INFORMATION_SCHEMA`.`COLUMNS` WHERE " "TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s'"; FILE *sql_file= md_result_file; int len; my_bool is_log_table; MYSQL_RES *result; MYSQL_ROW row; DBUG_ENTER("get_table_structure"); DBUG_PRINT("enter", ("db: %s table: %s", db, table)); *ignore_flag= check_if_ignore_table(table, table_type); complete_insert= 0; if ((write_data= !(*ignore_flag & IGNORE_DATA))) { complete_insert= opt_complete_insert; if (!insert_pat_inited) { insert_pat_inited= 1; init_dynamic_string_checked(&insert_pat, "", 1024, 1024); } else dynstr_set_checked(&insert_pat, ""); } insert_option= (opt_ignore ? " IGNORE " : ""); verbose_msg("-- Retrieving table structure for table %s...\n", table); len= my_snprintf(query_buff, sizeof(query_buff), "SET SQL_QUOTE_SHOW_CREATE=%d", (opt_quoted || opt_keywords)); if (!create_options) my_stpcpy(query_buff+len, "/*!40102 ,SQL_MODE=concat(@@sql_mode, _utf8 ',NO_KEY_OPTIONS,NO_TABLE_OPTIONS,NO_FIELD_OPTIONS') */"); result_table= quote_name(table, table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); if (opt_order_by_primary) order_by= primary_key_fields(result_table); if (!opt_xml && !mysql_query_with_error_report(mysql, 0, query_buff)) { /* using SHOW CREATE statement */ if (!opt_no_create_info) { /* Make an sql-file, if path was given iow. option -T was given */ char buff[20+FN_REFLEN]; MYSQL_FIELD *field; my_snprintf(buff, sizeof(buff), "show create table %s", result_table); if (switch_character_set_results(mysql, "binary") || mysql_query_with_error_report(mysql, &result, buff) || switch_character_set_results(mysql, default_charset)) DBUG_RETURN(0); if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) DBUG_RETURN(0); write_header(sql_file, db); } if (strcmp (table_type, "VIEW") == 0) /* view */ print_comment(sql_file, 0, "\n--\n-- Temporary table structure for view %s\n--\n\n", result_table); else print_comment(sql_file, 0, "\n--\n-- Table structure for table %s\n--\n\n", result_table); if (opt_drop) { /* Even if the "table" is a view, we do a DROP TABLE here. The view-specific code below fills in the DROP VIEW. We will skip the DROP TABLE for general_log and slow_log, since those stmts will fail, in case we apply dump by enabling logging. */ if (!general_log_or_slow_log_tables(db, table)) fprintf(sql_file, "DROP TABLE IF EXISTS %s;\n", opt_quoted_table); check_io(sql_file); } field= mysql_fetch_field_direct(result, 0); if (strcmp(field->name, "View") == 0) { char *scv_buff= NULL; my_ulonglong n_cols; verbose_msg("-- It's a view, create dummy table for view\n"); /* save "show create" statement for later */ if ((row= mysql_fetch_row(result)) && (scv_buff=row[1])) scv_buff= my_strdup(PSI_NOT_INSTRUMENTED, scv_buff, MYF(0)); mysql_free_result(result); /* Create a table with the same name as the view and with columns of the same name in order to satisfy views that depend on this view. The table will be removed when the actual view is created. The properties of each column, are not preserved in this temporary table, because they are not necessary. This will not be necessary once we can determine dependencies between views and can simply dump them in the appropriate order. */ my_snprintf(query_buff, sizeof(query_buff), "SHOW FIELDS FROM %s", result_table); if (switch_character_set_results(mysql, "binary") || mysql_query_with_error_report(mysql, &result, query_buff) || switch_character_set_results(mysql, default_charset)) { /* View references invalid or privileged table/col/fun (err 1356), so we cannot create a stand-in table. Be defensive and dump a comment with the view's 'show create' statement. (Bug #17371) */ if (mysql_errno(mysql) == ER_VIEW_INVALID) fprintf(sql_file, "\n-- failed on view %s: %s\n\n", result_table, scv_buff ? scv_buff : ""); my_free(scv_buff); DBUG_RETURN(0); } else my_free(scv_buff); n_cols= mysql_num_rows(result); if (0 != n_cols) { /* The actual formula is based on the column names and how the .FRM files are stored and is too volatile to be repeated here. Thus we simply warn the user if the columns exceed a limit we know works most of the time. */ if (n_cols >= 1000) fprintf(stderr, "-- Warning: Creating a stand-in table for view %s may" " fail when replaying the dump file produced because " "of the number of columns exceeding 1000. Exercise " "caution when replaying the produced dump file.\n", table); if (opt_drop) { /* We have already dropped any table of the same name above, so here we just drop the view. */ fprintf(sql_file, "/*!50001 DROP VIEW IF EXISTS %s*/;\n", opt_quoted_table); check_io(sql_file); } fprintf(sql_file, "SET @saved_cs_client = @@character_set_client;\n" "SET character_set_client = utf8;\n" "/*!50001 CREATE TABLE %s (\n", result_table); /* Get first row, following loop will prepend comma - keeps from having to know if the row being printed is last to determine if there should be a _trailing_ comma. */ row= mysql_fetch_row(result); /* The actual column type doesn't matter anyway, since the table will be dropped at run time. We do tinyint to avoid hitting the row size limit. */ fprintf(sql_file, " %s tinyint NOT NULL", quote_name(row[0], name_buff, 0)); while((row= mysql_fetch_row(result))) { /* col name, col type */ fprintf(sql_file, ",\n %s tinyint NOT NULL", quote_name(row[0], name_buff, 0)); } /* Stand-in tables are always MyISAM tables as the default engine might have a column-limit that's lower than the number of columns in the view, and MyISAM support is guaranteed to be in the server anyway. */ fprintf(sql_file, "\n) ENGINE=MyISAM */;\n" "SET character_set_client = @saved_cs_client;\n"); check_io(sql_file); } mysql_free_result(result); if (path) my_fclose(sql_file, MYF(MY_WME)); seen_views= 1; DBUG_RETURN(0); } row= mysql_fetch_row(result); is_log_table= general_log_or_slow_log_tables(db, table); if (is_log_table) row[1]+= 13; /* strlen("CREATE TABLE ")= 13 */ if (opt_compatible_mode & 3) { fprintf(sql_file, is_log_table ? "CREATE TABLE IF NOT EXISTS %s;\n" : "%s;\n", row[1]); } else { fprintf(sql_file, "/*!40101 SET @saved_cs_client = @@character_set_client */;\n" "/*!40101 SET character_set_client = utf8 */;\n" "%s%s;\n" "/*!40101 SET character_set_client = @saved_cs_client */;\n", is_log_table ? "CREATE TABLE IF NOT EXISTS " : "", row[1]); } check_io(sql_file); mysql_free_result(result); } my_snprintf(query_buff, sizeof(query_buff), "show fields from %s", result_table); if (mysql_query_with_error_report(mysql, &result, query_buff)) { if (path) my_fclose(sql_file, MYF(MY_WME)); DBUG_RETURN(0); } /* If write_data is true, then we build up insert statements for the table's data. Note: in subsequent lines of code, this test will have to be performed each time we are appending to insert_pat. */ if (write_data) { if (opt_replace_into) dynstr_append_checked(&insert_pat, "REPLACE "); else dynstr_append_checked(&insert_pat, "INSERT "); dynstr_append_checked(&insert_pat, insert_option); dynstr_append_checked(&insert_pat, "INTO "); dynstr_append_checked(&insert_pat, opt_quoted_table); if (complete_insert) { dynstr_append_checked(&insert_pat, " ("); } else { dynstr_append_checked(&insert_pat, " VALUES "); if (!extended_insert) dynstr_append_checked(&insert_pat, "("); } } while ((row= mysql_fetch_row(result))) { if (complete_insert) { if (init) { dynstr_append_checked(&insert_pat, ", "); } init=1; dynstr_append_checked(&insert_pat, quote_name(row[SHOW_FIELDNAME], name_buff, 0)); } } num_fields= mysql_num_rows(result); mysql_free_result(result); } else { verbose_msg("%s: Warning: Can't set SQL_QUOTE_SHOW_CREATE option (%s)\n", my_progname, mysql_error(mysql)); my_snprintf(query_buff, sizeof(query_buff), show_fields_stmt, db, table); if (mysql_query_with_error_report(mysql, &result, query_buff)) DBUG_RETURN(0); /* Make an sql-file, if path was given iow. option -T was given */ if (!opt_no_create_info) { if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) DBUG_RETURN(0); write_header(sql_file, db); } print_comment(sql_file, 0, "\n--\n-- Table structure for table %s\n--\n\n", result_table); if (opt_drop) fprintf(sql_file, "DROP TABLE IF EXISTS %s;\n", result_table); if (!opt_xml) fprintf(sql_file, "CREATE TABLE %s (\n", result_table); else print_xml_tag(sql_file, "\t", "\n", "table_structure", "name=", table, NullS); check_io(sql_file); } if (write_data) { if (opt_replace_into) dynstr_append_checked(&insert_pat, "REPLACE "); else dynstr_append_checked(&insert_pat, "INSERT "); dynstr_append_checked(&insert_pat, insert_option); dynstr_append_checked(&insert_pat, "INTO "); dynstr_append_checked(&insert_pat, result_table); if (complete_insert) dynstr_append_checked(&insert_pat, " ("); else { dynstr_append_checked(&insert_pat, " VALUES "); if (!extended_insert) dynstr_append_checked(&insert_pat, "("); } } while ((row= mysql_fetch_row(result))) { ulong *lengths= mysql_fetch_lengths(result); if (init) { if (!opt_xml && !opt_no_create_info) { fputs(",\n",sql_file); check_io(sql_file); } if (complete_insert) dynstr_append_checked(&insert_pat, ", "); } init=1; if (complete_insert) dynstr_append_checked(&insert_pat, quote_name(row[SHOW_FIELDNAME], name_buff, 0)); if (!opt_no_create_info) { if (opt_xml) { print_xml_row(sql_file, "field", result, &row, NullS); continue; } if (opt_keywords) fprintf(sql_file, " %s.%s %s", result_table, quote_name(row[SHOW_FIELDNAME],name_buff, 0), row[SHOW_TYPE]); else fprintf(sql_file, " %s %s", quote_name(row[SHOW_FIELDNAME], name_buff, 0), row[SHOW_TYPE]); if (row[SHOW_DEFAULT]) { fputs(" DEFAULT ", sql_file); unescape(sql_file, row[SHOW_DEFAULT], lengths[SHOW_DEFAULT]); } if (!row[SHOW_NULL][0]) fputs(" NOT NULL", sql_file); if (row[SHOW_EXTRA][0]) fprintf(sql_file, " %s",row[SHOW_EXTRA]); check_io(sql_file); } } num_fields= mysql_num_rows(result); mysql_free_result(result); if (!opt_no_create_info) { /* Make an sql-file, if path was given iow. option -T was given */ char buff[20+FN_REFLEN]; uint keynr,primary_key; my_snprintf(buff, sizeof(buff), "show keys from %s", result_table); if (mysql_query_with_error_report(mysql, &result, buff)) { if (mysql_errno(mysql) == ER_WRONG_OBJECT) { /* it is VIEW */ fputs("\t\t<options Comment=\"view\" />\n", sql_file); goto continue_xml; } fprintf(stderr, "%s: Can't get keys for table %s (%s)\n", my_progname, result_table, mysql_error(mysql)); if (path) my_fclose(sql_file, MYF(MY_WME)); DBUG_RETURN(0); } /* Find first which key is primary key */ keynr=0; primary_key=INT_MAX; while ((row= mysql_fetch_row(result))) { if (atoi(row[3]) == 1) { keynr++; if (!strcmp(row[2],"PRIMARY")) { primary_key=keynr; break; } } } mysql_data_seek(result,0); keynr=0; while ((row= mysql_fetch_row(result))) { if (opt_xml) { print_xml_row(sql_file, "key", result, &row, NullS); continue; } if (atoi(row[3]) == 1) { if (keynr++) putc(')', sql_file); if (atoi(row[1])) /* Test if duplicate key */ /* Duplicate allowed */ fprintf(sql_file, ",\n KEY %s (",quote_name(row[2],name_buff,0)); else if (keynr == primary_key) fputs(",\n PRIMARY KEY (",sql_file); /* First UNIQUE is primary */ else fprintf(sql_file, ",\n UNIQUE %s (",quote_name(row[2],name_buff, 0)); } else putc(',', sql_file); fputs(quote_name(row[4], name_buff, 0), sql_file); if (row[7]) fprintf(sql_file, " (%s)",row[7]); /* Sub key */ check_io(sql_file); } mysql_free_result(result); if (!opt_xml) { if (keynr) putc(')', sql_file); fputs("\n)",sql_file); check_io(sql_file); } /* Get MySQL specific create options */ if (create_options) { char show_name_buff[NAME_LEN*2+2+24]; /* Check memory for quote_for_like() */ my_snprintf(buff, sizeof(buff), "show table status like %s", quote_for_like(table, show_name_buff)); if (mysql_query_with_error_report(mysql, &result, buff)) { if (mysql_errno(mysql) != ER_PARSE_ERROR) { /* If old MySQL version */ verbose_msg("-- Warning: Couldn't get status information for " \ "table %s (%s)\n", result_table,mysql_error(mysql)); } } else if (!(row= mysql_fetch_row(result))) { fprintf(stderr, "Error: Couldn't read status information for table %s (%s)\n", result_table,mysql_error(mysql)); } else { if (opt_xml) print_xml_row(sql_file, "options", result, &row, NullS); else { fputs("/*!",sql_file); print_value(sql_file,result,row,"engine=","Engine",0); print_value(sql_file,result,row,"","Create_options",0); print_value(sql_file,result,row,"comment=","Comment",1); fputs(" */",sql_file); check_io(sql_file); } } mysql_free_result(result); /* Is always safe to free */ } continue_xml: if (!opt_xml) fputs(";\n", sql_file); else fputs("\t</table_structure>\n", sql_file); check_io(sql_file); } } if (complete_insert) { dynstr_append_checked(&insert_pat, ") VALUES "); if (!extended_insert) dynstr_append_checked(&insert_pat, "("); } if (sql_file != md_result_file) { fputs("\n", sql_file); write_footer(sql_file); my_fclose(sql_file, MYF(MY_WME)); } DBUG_RETURN((uint) num_fields); } /* get_table_structure */ static void dump_trigger_old(FILE *sql_file, MYSQL_RES *show_triggers_rs, MYSQL_ROW *show_trigger_row, const char *table_name) { char quoted_table_name_buf[NAME_LEN * 2 + 3]; char *quoted_table_name= quote_name(table_name, quoted_table_name_buf, 1); char name_buff[NAME_LEN * 4 + 3]; const char *xml_msg= "\nWarning! mysqldump being run against old server " "that does not\nsupport 'SHOW CREATE TRIGGERS' " "statement. Skipping..\n"; DBUG_ENTER("dump_trigger_old"); if (opt_xml) { print_xml_comment(sql_file, strlen(xml_msg), xml_msg); check_io(sql_file); DBUG_VOID_RETURN; } fprintf(sql_file, "--\n" "-- WARNING: old server version. " "The following dump may be incomplete.\n" "--\n"); if (opt_compact) fprintf(sql_file, "/*!50003 SET @OLD_SQL_MODE=@@SQL_MODE*/;\n"); if (opt_drop_trigger) fprintf(sql_file, "/*!50032 DROP TRIGGER IF EXISTS %s */;\n", (*show_trigger_row)[0]); fprintf(sql_file, "DELIMITER ;;\n" "/*!50003 SET SESSION SQL_MODE=\"%s\" */;;\n" "/*!50003 CREATE */ ", (*show_trigger_row)[6]); if (mysql_num_fields(show_triggers_rs) > 7) { /* mysqldump can be run against the server, that does not support definer in triggers (there is no DEFINER column in SHOW TRIGGERS output). So, we should check if we have this column before accessing it. */ size_t user_name_len; char user_name_str[USERNAME_LENGTH + 1]; char quoted_user_name_str[USERNAME_LENGTH * 2 + 3]; size_t host_name_len; char host_name_str[HOSTNAME_LENGTH + 1]; char quoted_host_name_str[HOSTNAME_LENGTH * 2 + 3]; parse_user((*show_trigger_row)[7], strlen((*show_trigger_row)[7]), user_name_str, &user_name_len, host_name_str, &host_name_len); fprintf(sql_file, "/*!50017 DEFINER=%s@%s */ ", quote_name(user_name_str, quoted_user_name_str, FALSE), quote_name(host_name_str, quoted_host_name_str, FALSE)); } fprintf(sql_file, "/*!50003 TRIGGER %s %s %s ON %s FOR EACH ROW%s%s */;;\n" "DELIMITER ;\n", quote_name((*show_trigger_row)[0], name_buff, 0), /* Trigger */ (*show_trigger_row)[4], /* Timing */ (*show_trigger_row)[1], /* Event */ quoted_table_name, (strchr(" \t\n\r", *((*show_trigger_row)[3]))) ? "" : " ", (*show_trigger_row)[3] /* Statement */); if (opt_compact) fprintf(sql_file, "/*!50003 SET SESSION SQL_MODE=@OLD_SQL_MODE */;\n"); DBUG_VOID_RETURN; } static int dump_trigger(FILE *sql_file, MYSQL_RES *show_create_trigger_rs, const char *db_name, const char *db_cl_name) { MYSQL_ROW row; char *query_str; int db_cl_altered= FALSE; DBUG_ENTER("dump_trigger"); while ((row= mysql_fetch_row(show_create_trigger_rs))) { if (opt_xml) { print_xml_row(sql_file, "trigger", show_create_trigger_rs, &row, "SQL Original Statement"); check_io(sql_file); continue; } query_str= cover_definer_clause(row[2], strlen(row[2]), C_STRING_WITH_LEN("50017"), C_STRING_WITH_LEN("50003"), C_STRING_WITH_LEN(" TRIGGER")); if (switch_db_collation(sql_file, db_name, ";", db_cl_name, row[5], &db_cl_altered)) DBUG_RETURN(TRUE); switch_cs_variables(sql_file, ";", row[3], /* character_set_client */ row[3], /* character_set_results */ row[4]); /* collation_connection */ switch_sql_mode(sql_file, ";", row[1]); if (opt_drop_trigger) fprintf(sql_file, "/*!50032 DROP TRIGGER IF EXISTS %s */;\n", row[0]); fprintf(sql_file, "DELIMITER ;;\n" "/*!50003 %s */;;\n" "DELIMITER ;\n", (const char *) (query_str != NULL ? query_str : row[2])); restore_sql_mode(sql_file, ";"); restore_cs_variables(sql_file, ";"); if (db_cl_altered) { if (restore_db_collation(sql_file, db_name, ";", db_cl_name)) DBUG_RETURN(TRUE); } my_free(query_str); } DBUG_RETURN(FALSE); } /** Dump the triggers for a given table. This should be called after the tables have been dumped in case a trigger depends on the existence of a table. @param[in] table_name @param[in] db_name @return Error status. @retval TRUE error has occurred. @retval FALSE operation succeed. */ static int dump_triggers_for_table(char *table_name, char *db_name) { char name_buff[NAME_LEN*4+3]; char query_buff[QUERY_LENGTH]; uint old_opt_compatible_mode= opt_compatible_mode; MYSQL_RES *show_triggers_rs; MYSQL_ROW row; FILE *sql_file= md_result_file; char db_cl_name[MY_CS_NAME_SIZE]; int ret= TRUE; DBUG_ENTER("dump_triggers_for_table"); DBUG_PRINT("enter", ("db: %s, table_name: %s", db_name, table_name)); if (path && !(sql_file= open_sql_file_for_table(table_name, O_WRONLY | O_APPEND))) DBUG_RETURN(1); /* Do not use ANSI_QUOTES on triggers in dump */ opt_compatible_mode&= ~MASK_ANSI_QUOTES; /* Get database collation. */ if (switch_character_set_results(mysql, "binary")) goto done; if (fetch_db_collation(db_name, db_cl_name, sizeof (db_cl_name))) goto done; /* Get list of triggers. */ my_snprintf(query_buff, sizeof(query_buff), "SHOW TRIGGERS LIKE %s", quote_for_like(table_name, name_buff)); if (mysql_query_with_error_report(mysql, &show_triggers_rs, query_buff)) goto done; /* Dump triggers. */ if (! mysql_num_rows(show_triggers_rs)) goto skip; if (opt_xml) print_xml_tag(sql_file, "\t", "\n", "triggers", "name=", table_name, NullS); while ((row= mysql_fetch_row(show_triggers_rs))) { my_snprintf(query_buff, sizeof (query_buff), "SHOW CREATE TRIGGER %s", quote_name(row[0], name_buff, TRUE)); if (mysql_query(mysql, query_buff)) { /* mysqldump is being run against old server, that does not support SHOW CREATE TRIGGER statement. We should use SHOW TRIGGERS output. NOTE: the dump may be incorrect, as old SHOW TRIGGERS does not provide all the necessary information to restore trigger properly. */ dump_trigger_old(sql_file, show_triggers_rs, &row, table_name); } else { MYSQL_RES *show_create_trigger_rs= mysql_store_result(mysql); if (!show_create_trigger_rs || dump_trigger(sql_file, show_create_trigger_rs, db_name, db_cl_name)) goto done; mysql_free_result(show_create_trigger_rs); } } if (opt_xml) { fputs("\t</triggers>\n", sql_file); check_io(sql_file); } skip: mysql_free_result(show_triggers_rs); if (switch_character_set_results(mysql, default_charset)) goto done; /* make sure to set back opt_compatible mode to original value */ opt_compatible_mode=old_opt_compatible_mode; ret= FALSE; done: if (path) my_fclose(sql_file, MYF(0)); DBUG_RETURN(ret); } static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value) { if (!option_value) { /* Null value means we don't add this option. */ return; } dynstr_append_checked(str, option); if (strncmp(option_value, "0x", sizeof("0x")-1) == 0) { /* It's a hex constant, don't escape */ dynstr_append_checked(str, option_value); } else { /* char constant; escape */ field_escape(str, option_value); } } /* Allow the user to specify field terminator strings like: "'", "\", "\\" (escaped backslash), "\t" (tab), "\n" (newline) This is done by doubling ' and add a end -\ if needed to avoid syntax errors from the SQL parser. */ static void field_escape(DYNAMIC_STRING* in, const char *from) { uint end_backslashes= 0; dynstr_append_checked(in, "'"); while (*from) { dynstr_append_mem_checked(in, from, 1); if (*from == '\\') end_backslashes^=1; /* find odd number of backslashes */ else { if (*from == '\'' && !end_backslashes) { /* We want a duplicate of "'" for MySQL */ dynstr_append_checked(in, "\'"); } end_backslashes=0; } from++; } /* Add missing backslashes if user has specified odd number of backs.*/ if (end_backslashes) dynstr_append_checked(in, "\\"); dynstr_append_checked(in, "'"); } static char *alloc_query_str(ulong size) { char *query; if (!(query= (char*) my_malloc(PSI_NOT_INSTRUMENTED, size, MYF(MY_WME)))) die(EX_MYSQLERR, "Couldn't allocate a query string."); return query; } /* SYNOPSIS dump_table() dump_table saves database contents as a series of INSERT statements. ARGS table - table name db - db name RETURNS void */ static void dump_table(char *table, char *db) { char ignore_flag; char buf[200], table_buff[NAME_LEN+3]; DYNAMIC_STRING query_string; char table_type[NAME_LEN]; char *result_table, table_buff2[NAME_LEN*2+3], *opt_quoted_table; int error= 0; ulong rownr, row_break, total_length, init_length; uint num_fields; MYSQL_RES *res; MYSQL_FIELD *field; MYSQL_ROW row; DBUG_ENTER("dump_table"); /* Make sure you get the create table info before the following check for --no-data flag below. Otherwise, the create table info won't be printed. */ num_fields= get_table_structure(table, db, table_type, &ignore_flag); /* The "table" could be a view. If so, we don't do anything here. */ if (strcmp(table_type, "VIEW") == 0) DBUG_VOID_RETURN; /* Check --no-data flag */ if (opt_no_data) { verbose_msg("-- Skipping dump data for table '%s', --no-data was used\n", table); DBUG_VOID_RETURN; } DBUG_PRINT("info", ("ignore_flag: %x num_fields: %d", (int) ignore_flag, num_fields)); /* If the table type is a merge table or any type that has to be _completely_ ignored and no data dumped */ if (ignore_flag & IGNORE_DATA) { verbose_msg("-- Warning: Skipping data for table '%s' because " \ "it's of type %s\n", table, table_type); DBUG_VOID_RETURN; } /* Check that there are any fields in the table */ if (num_fields == 0) { verbose_msg("-- Skipping dump data for table '%s', it has no fields\n", table); DBUG_VOID_RETURN; } result_table= quote_name(table,table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); verbose_msg("-- Sending SELECT query...\n"); init_dynamic_string_checked(&query_string, "", 1024, 1024); if (path) { char filename[FN_REFLEN], tmp_path[FN_REFLEN]; /* Convert the path to native os format and resolve to the full filepath. */ convert_dirname(tmp_path,path,NullS); my_load_path(tmp_path, tmp_path, NULL); fn_format(filename, table, tmp_path, ".txt", MYF(MY_UNPACK_FILENAME)); /* Must delete the file that 'INTO OUTFILE' will write to */ my_delete(filename, MYF(0)); /* convert to a unix path name to stick into the query */ to_unix_path(filename); /* now build the query string */ dynstr_append_checked(&query_string, "SELECT /*!40001 SQL_NO_CACHE */ * INTO OUTFILE '"); dynstr_append_checked(&query_string, filename); dynstr_append_checked(&query_string, "'"); dynstr_append_checked(&query_string, " /*!50138 CHARACTER SET "); dynstr_append_checked(&query_string, default_charset == mysql_universal_client_charset ? my_charset_bin.name : /* backward compatibility */ default_charset); dynstr_append_checked(&query_string, " */"); if (fields_terminated || enclosed || opt_enclosed || escaped) dynstr_append_checked(&query_string, " FIELDS"); add_load_option(&query_string, " TERMINATED BY ", fields_terminated); add_load_option(&query_string, " ENCLOSED BY ", enclosed); add_load_option(&query_string, " OPTIONALLY ENCLOSED BY ", opt_enclosed); add_load_option(&query_string, " ESCAPED BY ", escaped); add_load_option(&query_string, " LINES TERMINATED BY ", lines_terminated); dynstr_append_checked(&query_string, " FROM "); dynstr_append_checked(&query_string, result_table); if (where) { dynstr_append_checked(&query_string, " WHERE "); dynstr_append_checked(&query_string, where); } if (order_by) { dynstr_append_checked(&query_string, " ORDER BY "); dynstr_append_checked(&query_string, order_by); } if (mysql_real_query(mysql, query_string.str, query_string.length)) { DB_error(mysql, "when executing 'SELECT INTO OUTFILE'"); dynstr_free(&query_string); DBUG_VOID_RETURN; } } else { print_comment(md_result_file, 0, "\n--\n-- Dumping data for table %s\n--\n", result_table); dynstr_append_checked(&query_string, "SELECT /*!40001 SQL_NO_CACHE */ * FROM "); dynstr_append_checked(&query_string, result_table); if (where) { print_comment(md_result_file, 0, "-- WHERE: %s\n", where); dynstr_append_checked(&query_string, " WHERE "); dynstr_append_checked(&query_string, where); } if (order_by) { print_comment(md_result_file, 0, "-- ORDER BY: %s\n", order_by); dynstr_append_checked(&query_string, " ORDER BY "); dynstr_append_checked(&query_string, order_by); } if (!opt_xml && !opt_compact) { fputs("\n", md_result_file); check_io(md_result_file); } if (mysql_query_with_error_report(mysql, 0, query_string.str)) { DB_error(mysql, "when retrieving data from server"); goto err; } if (quick) res=mysql_use_result(mysql); else res=mysql_store_result(mysql); if (!res) { DB_error(mysql, "when retrieving data from server"); goto err; } verbose_msg("-- Retrieving rows...\n"); if (mysql_num_fields(res) != num_fields) { fprintf(stderr,"%s: Error in field count for table: %s ! Aborting.\n", my_progname, result_table); error= EX_CONSCHECK; goto err; } if (opt_lock) { fprintf(md_result_file,"LOCK TABLES %s WRITE;\n", opt_quoted_table); check_io(md_result_file); } /* Moved disable keys to after lock per bug 15977 */ if (opt_disable_keys) { fprintf(md_result_file, "/*!40000 ALTER TABLE %s DISABLE KEYS */;\n", opt_quoted_table); check_io(md_result_file); } total_length= opt_net_buffer_length; /* Force row break */ row_break=0; rownr=0; init_length=(uint) insert_pat.length+4; if (opt_xml) print_xml_tag(md_result_file, "\t", "\n", "table_data", "name=", table, NullS); if (opt_autocommit) { fprintf(md_result_file, "set autocommit=0;\n"); check_io(md_result_file); } while ((row= mysql_fetch_row(res))) { uint i; ulong *lengths= mysql_fetch_lengths(res); rownr++; if (!extended_insert && !opt_xml) { fputs(insert_pat.str,md_result_file); check_io(md_result_file); } mysql_field_seek(res,0); if (opt_xml) { fputs("\t<row>\n", md_result_file); check_io(md_result_file); } for (i= 0; i < mysql_num_fields(res); i++) { int is_blob; ulong length= lengths[i]; if (!(field= mysql_fetch_field(res))) die(EX_CONSCHECK, "Not enough fields from table %s! Aborting.\n", result_table); /* 63 is my_charset_bin. If charsetnr is not 63, we have not a BLOB but a TEXT column. we'll dump in hex only BLOB columns. */ is_blob= (opt_hex_blob && field->charsetnr == 63 && (field->type == MYSQL_TYPE_BIT || field->type == MYSQL_TYPE_STRING || field->type == MYSQL_TYPE_VAR_STRING || field->type == MYSQL_TYPE_VARCHAR || field->type == MYSQL_TYPE_BLOB || field->type == MYSQL_TYPE_LONG_BLOB || field->type == MYSQL_TYPE_MEDIUM_BLOB || field->type == MYSQL_TYPE_TINY_BLOB)) ? 1 : 0; if (extended_insert && !opt_xml) { if (i == 0) dynstr_set_checked(&extended_row,"("); else dynstr_append_checked(&extended_row,","); if (row[i]) { if (length) { if (!(field->flags & NUM_FLAG)) { /* "length * 2 + 2" is OK for both HEX and non-HEX modes: - In HEX mode we need exactly 2 bytes per character plus 2 bytes for '0x' prefix. - In non-HEX mode we need up to 2 bytes per character, plus 2 bytes for leading and trailing '\'' characters. Also we need to reserve 1 byte for terminating '\0'. */ dynstr_realloc_checked(&extended_row,length * 2 + 2 + 1); if (opt_hex_blob && is_blob) { dynstr_append_checked(&extended_row, "0x"); extended_row.length+= mysql_hex_string(extended_row.str + extended_row.length, row[i], length); DBUG_ASSERT(extended_row.length+1 <= extended_row.max_length); /* mysql_hex_string() already terminated string by '\0' */ DBUG_ASSERT(extended_row.str[extended_row.length] == '\0'); } else { dynstr_append_checked(&extended_row,"'"); extended_row.length += mysql_real_escape_string(&mysql_connection, &extended_row.str[extended_row.length], row[i],length); extended_row.str[extended_row.length]='\0'; dynstr_append_checked(&extended_row,"'"); } } else { /* change any strings ("inf", "-inf", "nan") into NULL */ char *ptr= row[i]; if (my_isalpha(charset_info, *ptr) || (*ptr == '-' && my_isalpha(charset_info, ptr[1]))) dynstr_append_checked(&extended_row, "NULL"); else { if (field->type == MYSQL_TYPE_DECIMAL) { /* add " signs around */ dynstr_append_checked(&extended_row, "'"); dynstr_append_checked(&extended_row, ptr); dynstr_append_checked(&extended_row, "'"); } else dynstr_append_checked(&extended_row, ptr); } } } else dynstr_append_checked(&extended_row,"''"); } else dynstr_append_checked(&extended_row,"NULL"); } else { if (i && !opt_xml) { fputc(',', md_result_file); check_io(md_result_file); } if (row[i]) { if (!(field->flags & NUM_FLAG)) { if (opt_xml) { if (opt_hex_blob && is_blob && length) { /* Define xsi:type="xs:hexBinary" for hex encoded data */ print_xml_tag(md_result_file, "\t\t", "", "field", "name=", field->name, "xsi:type=", "xs:hexBinary", NullS); print_blob_as_hex(md_result_file, row[i], length); } else { print_xml_tag(md_result_file, "\t\t", "", "field", "name=", field->name, NullS); print_quoted_xml(md_result_file, row[i], length, 0); } fputs("</field>\n", md_result_file); } else if (opt_hex_blob && is_blob && length) { fputs("0x", md_result_file); print_blob_as_hex(md_result_file, row[i], length); } else unescape(md_result_file, row[i], length); } else { /* change any strings ("inf", "-inf", "nan") into NULL */ char *ptr= row[i]; if (opt_xml) { print_xml_tag(md_result_file, "\t\t", "", "field", "name=", field->name, NullS); fputs(!my_isalpha(charset_info, *ptr) ? ptr: "NULL", md_result_file); fputs("</field>\n", md_result_file); } else if (my_isalpha(charset_info, *ptr) || (*ptr == '-' && my_isalpha(charset_info, ptr[1]))) fputs("NULL", md_result_file); else if (field->type == MYSQL_TYPE_DECIMAL) { /* add " signs around */ fputc('\'', md_result_file); fputs(ptr, md_result_file); fputc('\'', md_result_file); } else fputs(ptr, md_result_file); } } else { /* The field value is NULL */ if (!opt_xml) fputs("NULL", md_result_file); else print_xml_null_tag(md_result_file, "\t\t", "field name=", field->name, "\n"); } check_io(md_result_file); } } if (opt_xml) { fputs("\t</row>\n", md_result_file); check_io(md_result_file); } if (extended_insert) { ulong row_length; dynstr_append_checked(&extended_row,")"); row_length= 2 + extended_row.length; if (total_length + row_length < opt_net_buffer_length) { total_length+= row_length; fputc(',',md_result_file); /* Always row break */ fputs(extended_row.str,md_result_file); } else { if (row_break) fputs(";\n", md_result_file); row_break=1; /* This is first row */ fputs(insert_pat.str,md_result_file); fputs(extended_row.str,md_result_file); total_length= row_length+init_length; } check_io(md_result_file); } else if (!opt_xml) { fputs(");\n", md_result_file); check_io(md_result_file); } } /* XML - close table tag and supress regular output */ if (opt_xml) fputs("\t</table_data>\n", md_result_file); else if (extended_insert && row_break) fputs(";\n", md_result_file); /* If not empty table */ fflush(md_result_file); check_io(md_result_file); if (mysql_errno(mysql)) { my_snprintf(buf, sizeof(buf), "%s: Error %d: %s when dumping table %s at row: %ld\n", my_progname, mysql_errno(mysql), mysql_error(mysql), result_table, rownr); fputs(buf,stderr); error= EX_CONSCHECK; goto err; } /* Moved enable keys to before unlock per bug 15977 */ if (opt_disable_keys) { fprintf(md_result_file,"/*!40000 ALTER TABLE %s ENABLE KEYS */;\n", opt_quoted_table); check_io(md_result_file); } if (opt_lock) { fputs("UNLOCK TABLES;\n", md_result_file); check_io(md_result_file); } if (opt_autocommit) { fprintf(md_result_file, "commit;\n"); check_io(md_result_file); } mysql_free_result(res); } dynstr_free(&query_string); DBUG_VOID_RETURN; err: dynstr_free(&query_string); maybe_exit(error); DBUG_VOID_RETURN; } /* dump_table */ static char *getTableName(int reset) { static MYSQL_RES *res= NULL; MYSQL_ROW row; if (!res) { if (!(res= mysql_list_tables(mysql,NullS))) return(NULL); } if ((row= mysql_fetch_row(res))) return((char*) row[0]); if (reset) mysql_data_seek(res,0); /* We want to read again */ else { mysql_free_result(res); res= NULL; } return(NULL); } /* getTableName */ /* dump all logfile groups and tablespaces */ static int dump_all_tablespaces() { return dump_tablespaces(NULL); } static int dump_tablespaces_for_tables(char *db, char **table_names, int tables) { DYNAMIC_STRING where; int r; int i; char name_buff[NAME_LEN*2+3]; mysql_real_escape_string(mysql, name_buff, db, strlen(db)); init_dynamic_string_checked(&where, " AND TABLESPACE_NAME IN (" "SELECT DISTINCT TABLESPACE_NAME FROM" " INFORMATION_SCHEMA.PARTITIONS" " WHERE" " TABLE_SCHEMA='", 256, 1024); dynstr_append_checked(&where, name_buff); dynstr_append_checked(&where, "' AND TABLE_NAME IN ("); for (i=0 ; i<tables ; i++) { mysql_real_escape_string(mysql, name_buff, table_names[i], strlen(table_names[i])); dynstr_append_checked(&where, "'"); dynstr_append_checked(&where, name_buff); dynstr_append_checked(&where, "',"); } dynstr_trunc(&where, 1); dynstr_append_checked(&where,"))"); DBUG_PRINT("info",("Dump TS for Tables where: %s",where.str)); r= dump_tablespaces(where.str); dynstr_free(&where); return r; } static int dump_tablespaces_for_databases(char** databases) { DYNAMIC_STRING where; int r; int i; init_dynamic_string_checked(&where, " AND TABLESPACE_NAME IN (" "SELECT DISTINCT TABLESPACE_NAME FROM" " INFORMATION_SCHEMA.PARTITIONS" " WHERE" " TABLE_SCHEMA IN (", 256, 1024); for (i=0 ; databases[i]!=NULL ; i++) { char db_name_buff[NAME_LEN*2+3]; mysql_real_escape_string(mysql, db_name_buff, databases[i], strlen(databases[i])); dynstr_append_checked(&where, "'"); dynstr_append_checked(&where, db_name_buff); dynstr_append_checked(&where, "',"); } dynstr_trunc(&where, 1); dynstr_append_checked(&where,"))"); DBUG_PRINT("info",("Dump TS for DBs where: %s",where.str)); r= dump_tablespaces(where.str); dynstr_free(&where); return r; } static int dump_tablespaces(char* ts_where) { MYSQL_ROW row; MYSQL_RES *tableres; char buf[FN_REFLEN]; DYNAMIC_STRING sqlbuf; int first= 0; /* The following are used for parsing the EXTRA field */ char extra_format[]= "UNDO_BUFFER_SIZE="; char *ubs; char *endsemi; DBUG_ENTER("dump_tablespaces"); init_dynamic_string_checked(&sqlbuf, "SELECT LOGFILE_GROUP_NAME," " FILE_NAME," " TOTAL_EXTENTS," " INITIAL_SIZE," " ENGINE," " EXTRA" " FROM INFORMATION_SCHEMA.FILES" " WHERE FILE_TYPE = 'UNDO LOG'" " AND FILE_NAME IS NOT NULL", 256, 1024); if(ts_where) { dynstr_append_checked(&sqlbuf, " AND LOGFILE_GROUP_NAME IN (" "SELECT DISTINCT LOGFILE_GROUP_NAME" " FROM INFORMATION_SCHEMA.FILES" " WHERE FILE_TYPE = 'DATAFILE'" ); dynstr_append_checked(&sqlbuf, ts_where); dynstr_append_checked(&sqlbuf, ")"); } dynstr_append_checked(&sqlbuf, " GROUP BY LOGFILE_GROUP_NAME, FILE_NAME" ", ENGINE" " ORDER BY LOGFILE_GROUP_NAME"); if (mysql_query(mysql, sqlbuf.str) || !(tableres = mysql_store_result(mysql))) { dynstr_free(&sqlbuf); if (mysql_errno(mysql) == ER_BAD_TABLE_ERROR || mysql_errno(mysql) == ER_BAD_DB_ERROR || mysql_errno(mysql) == ER_UNKNOWN_TABLE) { fprintf(md_result_file, "\n--\n-- Not dumping tablespaces as no INFORMATION_SCHEMA.FILES" " table on this server\n--\n"); check_io(md_result_file); DBUG_RETURN(0); } my_printf_error(0, "Error: '%s' when trying to dump tablespaces", MYF(0), mysql_error(mysql)); DBUG_RETURN(1); } buf[0]= 0; while ((row= mysql_fetch_row(tableres))) { if (strcmp(buf, row[0]) != 0) first= 1; if (first) { print_comment(md_result_file, 0, "\n--\n-- Logfile group: %s\n--\n", row[0]); fprintf(md_result_file, "\nCREATE"); } else { fprintf(md_result_file, "\nALTER"); } fprintf(md_result_file, " LOGFILE GROUP %s\n" " ADD UNDOFILE '%s'\n", row[0], row[1]); if (first) { ubs= strstr(row[5],extra_format); if(!ubs) break; ubs+= strlen(extra_format); endsemi= strstr(ubs,";"); if(endsemi) endsemi[0]= '\0'; fprintf(md_result_file, " UNDO_BUFFER_SIZE %s\n", ubs); } fprintf(md_result_file, " INITIAL_SIZE %s\n" " ENGINE=%s;\n", row[3], row[4]); check_io(md_result_file); if (first) { first= 0; strxmov(buf, row[0], NullS); } } dynstr_free(&sqlbuf); mysql_free_result(tableres); init_dynamic_string_checked(&sqlbuf, "SELECT DISTINCT TABLESPACE_NAME," " FILE_NAME," " LOGFILE_GROUP_NAME," " EXTENT_SIZE," " INITIAL_SIZE," " ENGINE" " FROM INFORMATION_SCHEMA.FILES" " WHERE FILE_TYPE = 'DATAFILE'", 256, 1024); if(ts_where) dynstr_append_checked(&sqlbuf, ts_where); dynstr_append_checked(&sqlbuf, " ORDER BY TABLESPACE_NAME, LOGFILE_GROUP_NAME"); if (mysql_query_with_error_report(mysql, &tableres, sqlbuf.str)) { dynstr_free(&sqlbuf); DBUG_RETURN(1); } buf[0]= 0; while ((row= mysql_fetch_row(tableres))) { if (strcmp(buf, row[0]) != 0) first= 1; if (first) { print_comment(md_result_file, 0, "\n--\n-- Tablespace: %s\n--\n", row[0]); fprintf(md_result_file, "\nCREATE"); } else { fprintf(md_result_file, "\nALTER"); } fprintf(md_result_file, " TABLESPACE %s\n" " ADD DATAFILE '%s'\n", row[0], row[1]); if (first) { fprintf(md_result_file, " USE LOGFILE GROUP %s\n" " EXTENT_SIZE %s\n", row[2], row[3]); } fprintf(md_result_file, " INITIAL_SIZE %s\n" " ENGINE=%s;\n", row[4], row[5]); check_io(md_result_file); if (first) { first= 0; strxmov(buf, row[0], NullS); } } mysql_free_result(tableres); dynstr_free(&sqlbuf); DBUG_RETURN(0); } static int is_ndbinfo(MYSQL* mysql, const char* dbname) { static int checked_ndbinfo= 0; static int have_ndbinfo= 0; if (!checked_ndbinfo) { MYSQL_RES *res; MYSQL_ROW row; char buf[32], query[64]; my_snprintf(query, sizeof(query), "SHOW VARIABLES LIKE %s", quote_for_like("ndbinfo_version", buf)); checked_ndbinfo= 1; if (mysql_query_with_error_report(mysql, &res, query)) return 0; if (!(row= mysql_fetch_row(res))) { mysql_free_result(res); return 0; } have_ndbinfo= 1; mysql_free_result(res); } if (!have_ndbinfo) return 0; if (my_strcasecmp(&my_charset_latin1, dbname, "ndbinfo") == 0) return 1; return 0; } static int dump_all_databases() { MYSQL_ROW row; MYSQL_RES *tableres; int result=0; if (mysql_query_with_error_report(mysql, &tableres, "SHOW DATABASES")) return 1; while ((row= mysql_fetch_row(tableres))) { if (mysql_get_server_version(mysql) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], INFORMATION_SCHEMA_DB_NAME)) continue; if (mysql_get_server_version(mysql) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], PERFORMANCE_SCHEMA_DB_NAME)) continue; if (is_ndbinfo(mysql, row[0])) continue; if (dump_all_tables_in_db(row[0])) result=1; } if (seen_views) { if (mysql_query(mysql, "SHOW DATABASES") || !(tableres= mysql_store_result(mysql))) { my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s", MYF(0), mysql_error(mysql)); return 1; } while ((row= mysql_fetch_row(tableres))) { if (mysql_get_server_version(mysql) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], INFORMATION_SCHEMA_DB_NAME)) continue; if (mysql_get_server_version(mysql) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, row[0], PERFORMANCE_SCHEMA_DB_NAME)) continue; if (is_ndbinfo(mysql, row[0])) continue; if (dump_all_views_in_db(row[0])) result=1; } } return result; } /* dump_all_databases */ static int dump_databases(char **db_names) { int result=0; char **db; DBUG_ENTER("dump_databases"); for (db= db_names ; *db ; db++) { if (dump_all_tables_in_db(*db)) result=1; } if (!result && seen_views) { for (db= db_names ; *db ; db++) { if (dump_all_views_in_db(*db)) result=1; } } DBUG_RETURN(result); } /* dump_databases */ /* View Specific database initalization. SYNOPSIS init_dumping_views qdatabase quoted name of the database RETURN VALUES 0 Success. 1 Failure. */ int init_dumping_views(char *qdatabase __attribute__((unused))) { return 0; } /* init_dumping_views */ /* Table Specific database initalization. SYNOPSIS init_dumping_tables qdatabase quoted name of the database RETURN VALUES 0 Success. 1 Failure. */ int init_dumping_tables(char *qdatabase) { DBUG_ENTER("init_dumping_tables"); if (!opt_create_db) { char qbuf[256]; MYSQL_ROW row; MYSQL_RES *dbinfo; my_snprintf(qbuf, sizeof(qbuf), "SHOW CREATE DATABASE IF NOT EXISTS %s", qdatabase); if (mysql_query(mysql, qbuf) || !(dbinfo = mysql_store_result(mysql))) { /* Old server version, dump generic CREATE DATABASE */ if (opt_drop_database) fprintf(md_result_file, "\n/*!40000 DROP DATABASE IF EXISTS %s*/;\n", qdatabase); fprintf(md_result_file, "\nCREATE DATABASE /*!32312 IF NOT EXISTS*/ %s;\n", qdatabase); } else { if (opt_drop_database) fprintf(md_result_file, "\n/*!40000 DROP DATABASE IF EXISTS %s*/;\n", qdatabase); row = mysql_fetch_row(dbinfo); if (row[1]) { fprintf(md_result_file,"\n%s;\n",row[1]); } mysql_free_result(dbinfo); } } DBUG_RETURN(0); } /* init_dumping_tables */ static int init_dumping(char *database, int init_func(char*)) { if (is_ndbinfo(mysql, database)) { verbose_msg("-- Skipping dump of ndbinfo database\n"); return 0; } if (mysql_select_db(mysql, database)) { DB_error(mysql, "when selecting the database"); return 1; /* If --force */ } if (!path && !opt_xml) { if (opt_databases || opt_alldbs) { /* length of table name * 2 (if name contains quotes), 2 quotes and 0 */ char quoted_database_buf[NAME_LEN*2+3]; char *qdatabase= quote_name(database,quoted_database_buf,opt_quoted); print_comment(md_result_file, 0, "\n--\n-- Current Database: %s\n--\n", qdatabase); /* Call the view or table specific function */ init_func(qdatabase); fprintf(md_result_file,"\nUSE %s;\n", qdatabase); check_io(md_result_file); } } if (extended_insert) init_dynamic_string_checked(&extended_row, "", 1024, 1024); return 0; } /* init_dumping */ /* Return 1 if we should copy the table */ my_bool include_table(const uchar *hash_key, size_t len) { return ! my_hash_search(&ignore_table, hash_key, len); } static int dump_all_tables_in_db(char *database) { char *table; uint numrows; char table_buff[NAME_LEN*2+3]; char hash_key[2*NAME_LEN+2]; /* "db.tablename" */ char *afterdot; my_bool general_log_table_exists= 0, slow_log_table_exists=0; int using_mysql_db= !my_strcasecmp(charset_info, database, "mysql"); DBUG_ENTER("dump_all_tables_in_db"); afterdot= my_stpcpy(hash_key, database); *afterdot++= '.'; if (init_dumping(database, init_dumping_tables)) DBUG_RETURN(1); if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); if (lock_tables) { DYNAMIC_STRING query; init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024); for (numrows= 0 ; (table= getTableName(1)) ; ) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key,end - hash_key)) { numrows++; dynstr_append_checked(&query, quote_name(table, table_buff, 1)); dynstr_append_checked(&query, " READ /*!32311 LOCAL */,"); } } if (numrows && mysql_real_query(mysql, query.str, query.length-1)) DB_error(mysql, "when using LOCK TABLES"); /* We shall continue here, if --force was given */ dynstr_free(&query); } if (flush_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) DB_error(mysql, "when doing refresh"); /* We shall continue here, if --force was given */ else verbose_msg("-- dump_all_tables_in_db : logs flushed successfully!\n"); } while ((table= getTableName(0))) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key, end - hash_key)) { dump_table(table,database); my_free(order_by); order_by= 0; if (opt_dump_triggers && mysql_get_server_version(mysql) >= 50009) { if (dump_triggers_for_table(table, database)) { if (path) my_fclose(md_result_file, MYF(MY_WME)); maybe_exit(EX_MYSQLERR); } } } else { /* If general_log and slow_log exists in the 'mysql' database, we should dump the table structure. But we cannot call get_table_structure() here as 'LOCK TABLES' query got executed above on the session and that 'LOCK TABLES' query does not contain 'general_log' and 'slow_log' tables. (you cannot acquire lock on log tables). Hence mark the existence of these log tables here and after 'UNLOCK TABLES' query is executed on the session, get the table structure from server and dump it in the file. */ if (using_mysql_db) { if (!my_strcasecmp(charset_info, table, "general_log")) general_log_table_exists= 1; else if (!my_strcasecmp(charset_info, table, "slow_log")) slow_log_table_exists= 1; } } } if (opt_events && mysql_get_server_version(mysql) >= 50106) { DBUG_PRINT("info", ("Dumping events for database %s", database)); dump_events_for_db(database); } if (opt_routines && mysql_get_server_version(mysql) >= 50009) { DBUG_PRINT("info", ("Dumping routines for database %s", database)); dump_routines_for_db(database); } if (opt_xml) { fputs("</database>\n", md_result_file); check_io(md_result_file); } if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); if (using_mysql_db) { char table_type[NAME_LEN]; char ignore_flag; if (general_log_table_exists) { if (!get_table_structure((char *) "general_log", database, table_type, &ignore_flag) ) verbose_msg("-- Warning: get_table_structure() failed with some internal " "error for 'general_log' table\n"); } if (slow_log_table_exists) { if (!get_table_structure((char *) "slow_log", database, table_type, &ignore_flag) ) verbose_msg("-- Warning: get_table_structure() failed with some internal " "error for 'slow_log' table\n"); } } if (flush_privileges && using_mysql_db) { fprintf(md_result_file,"\n--\n-- Flush Grant Tables \n--\n"); fprintf(md_result_file,"\n/*! FLUSH PRIVILEGES */;\n"); } DBUG_RETURN(0); } /* dump_all_tables_in_db */ /* dump structure of views of database SYNOPSIS dump_all_views_in_db() database database name RETURN 0 OK 1 ERROR */ static my_bool dump_all_views_in_db(char *database) { char *table; uint numrows; char table_buff[NAME_LEN*2+3]; char hash_key[2*NAME_LEN+2]; /* "db.tablename" */ char *afterdot; afterdot= my_stpcpy(hash_key, database); *afterdot++= '.'; if (init_dumping(database, init_dumping_views)) return 1; if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", database, NullS); if (lock_tables) { DYNAMIC_STRING query; init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024); for (numrows= 0 ; (table= getTableName(1)); ) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key,end - hash_key)) { numrows++; dynstr_append_checked(&query, quote_name(table, table_buff, 1)); dynstr_append_checked(&query, " READ /*!32311 LOCAL */,"); } } if (numrows && mysql_real_query(mysql, query.str, query.length-1)) DB_error(mysql, "when using LOCK TABLES"); /* We shall continue here, if --force was given */ dynstr_free(&query); } if (flush_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) DB_error(mysql, "when doing refresh"); /* We shall continue here, if --force was given */ else verbose_msg("-- dump_all_views_in_db : logs flushed successfully!\n"); } while ((table= getTableName(0))) { char *end= my_stpcpy(afterdot, table); if (include_table((uchar*) hash_key, end - hash_key)) get_view_structure(table, database); } if (opt_xml) { fputs("</database>\n", md_result_file); check_io(md_result_file); } if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); return 0; } /* dump_all_tables_in_db */ /* get_actual_table_name -- executes a SHOW TABLES LIKE '%s' to get the actual table name from the server for the table name given on the command line. we do this because the table name given on the command line may be a different case (e.g. T1 vs t1) RETURN pointer to the table name 0 if error */ static char *get_actual_table_name(const char *old_table_name, MEM_ROOT *root) { char *name= 0; MYSQL_RES *table_res; MYSQL_ROW row; char query[50 + 2*NAME_LEN]; char show_name_buff[FN_REFLEN]; DBUG_ENTER("get_actual_table_name"); /* Check memory for quote_for_like() */ DBUG_ASSERT(2*sizeof(old_table_name) < sizeof(show_name_buff)); my_snprintf(query, sizeof(query), "SHOW TABLES LIKE %s", quote_for_like(old_table_name, show_name_buff)); if (mysql_query_with_error_report(mysql, 0, query)) DBUG_RETURN(NullS); if ((table_res= mysql_store_result(mysql))) { my_ulonglong num_rows= mysql_num_rows(table_res); if (num_rows > 0) { ulong *lengths; /* Return first row TODO: Return all matching rows */ row= mysql_fetch_row(table_res); lengths= mysql_fetch_lengths(table_res); name= strmake_root(root, row[0], lengths[0]); } mysql_free_result(table_res); } DBUG_PRINT("exit", ("new_table_name: %s", name)); DBUG_RETURN(name); } static int dump_selected_tables(char *db, char **table_names, int tables) { char table_buff[NAME_LEN*2+3]; DYNAMIC_STRING lock_tables_query; MEM_ROOT root; char **dump_tables, **pos, **end; DBUG_ENTER("dump_selected_tables"); if (init_dumping(db, init_dumping_tables)) DBUG_RETURN(1); init_alloc_root(PSI_NOT_INSTRUMENTED, &root, 8192, 0); if (!(dump_tables= pos= (char**) alloc_root(&root, tables * sizeof(char *)))) die(EX_EOM, "alloc_root failure."); init_dynamic_string_checked(&lock_tables_query, "LOCK TABLES ", 256, 1024); for (; tables > 0 ; tables-- , table_names++) { /* the table name passed on commandline may be wrong case */ if ((*pos= get_actual_table_name(*table_names, &root))) { /* Add found table name to lock_tables_query */ if (lock_tables) { dynstr_append_checked(&lock_tables_query, quote_name(*pos, table_buff, 1)); dynstr_append_checked(&lock_tables_query, " READ /*!32311 LOCAL */,"); } pos++; } else { if (!opt_force) { dynstr_free(&lock_tables_query); free_root(&root, MYF(0)); } maybe_die(EX_ILLEGAL_TABLE, "Couldn't find table: \"%s\"", *table_names); /* We shall countinue here, if --force was given */ } } end= pos; /* Can't LOCK TABLES in I_S / P_S, so don't try. */ if (lock_tables && !(mysql_get_server_version(mysql) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, db, INFORMATION_SCHEMA_DB_NAME)) && !(mysql_get_server_version(mysql) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, db, PERFORMANCE_SCHEMA_DB_NAME))) { if (mysql_real_query(mysql, lock_tables_query.str, lock_tables_query.length-1)) { if (!opt_force) { dynstr_free(&lock_tables_query); free_root(&root, MYF(0)); } DB_error(mysql, "when doing LOCK TABLES"); /* We shall countinue here, if --force was given */ } } dynstr_free(&lock_tables_query); if (flush_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) { if (!opt_force) free_root(&root, MYF(0)); DB_error(mysql, "when doing refresh"); } /* We shall countinue here, if --force was given */ else verbose_msg("-- dump_selected_tables : logs flushed successfully!\n"); } if (opt_xml) print_xml_tag(md_result_file, "", "\n", "database", "name=", db, NullS); /* Dump each selected table */ for (pos= dump_tables; pos < end; pos++) { DBUG_PRINT("info",("Dumping table %s", *pos)); dump_table(*pos, db); if (opt_dump_triggers && mysql_get_server_version(mysql) >= 50009) { if (dump_triggers_for_table(*pos, db)) { if (path) my_fclose(md_result_file, MYF(MY_WME)); maybe_exit(EX_MYSQLERR); } } } /* Dump each selected view */ if (seen_views) { for (pos= dump_tables; pos < end; pos++) get_view_structure(*pos, db); } if (opt_events && mysql_get_server_version(mysql) >= 50106) { DBUG_PRINT("info", ("Dumping events for database %s", db)); dump_events_for_db(db); } /* obtain dump of routines (procs/functions) */ if (opt_routines && mysql_get_server_version(mysql) >= 50009) { DBUG_PRINT("info", ("Dumping routines for database %s", db)); dump_routines_for_db(db); } free_root(&root, MYF(0)); my_free(order_by); order_by= 0; if (opt_xml) { fputs("</database>\n", md_result_file); check_io(md_result_file); } if (lock_tables) (void) mysql_query_with_error_report(mysql, 0, "UNLOCK TABLES"); DBUG_RETURN(0); } /* dump_selected_tables */ static int do_show_master_status(MYSQL *mysql_con) { MYSQL_ROW row; MYSQL_RES *master; const char *comment_prefix= (opt_master_data == MYSQL_OPT_MASTER_DATA_COMMENTED_SQL) ? "-- " : ""; if (mysql_query_with_error_report(mysql_con, &master, "SHOW MASTER STATUS")) { return 1; } else { row= mysql_fetch_row(master); if (row && row[0] && row[1]) { /* SHOW MASTER STATUS reports file and position */ print_comment(md_result_file, 0, "\n--\n-- Position to start replication or point-in-time " "recovery from\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", comment_prefix, row[0], row[1]); check_io(md_result_file); } else if (!opt_force) { /* SHOW MASTER STATUS reports nothing and --force is not enabled */ my_printf_error(0, "Error: Binlogging on server not active", MYF(0)); mysql_free_result(master); maybe_exit(EX_MYSQLERR); return 1; } mysql_free_result(master); } return 0; } static int do_stop_slave_sql(MYSQL *mysql_con) { MYSQL_RES *slave; /* We need to check if the slave sql is running in the first place */ if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS")) return(1); else { MYSQL_ROW row= mysql_fetch_row(slave); if (row && row[11]) { /* if SLAVE SQL is not running, we don't stop it */ if (!strcmp(row[11],"No")) { mysql_free_result(slave); /* Silently assume that they don't have the slave running */ return(0); } } } mysql_free_result(slave); /* now, stop slave if running */ if (mysql_query_with_error_report(mysql_con, 0, "STOP SLAVE SQL_THREAD")) return(1); return(0); } static int add_stop_slave(void) { if (opt_comments) fprintf(md_result_file, "\n--\n-- stop slave statement to make a recovery dump)\n--\n\n"); fprintf(md_result_file, "STOP SLAVE;\n"); return(0); } static int add_slave_statements(void) { if (opt_comments) fprintf(md_result_file, "\n--\n-- start slave statement to make a recovery dump)\n--\n\n"); fprintf(md_result_file, "START SLAVE;\n"); return(0); } static int do_show_slave_status(MYSQL *mysql_con) { MYSQL_RES *slave= NULL; const char *comment_prefix= (opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : ""; if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS")) { if (!opt_force) { /* SHOW SLAVE STATUS reports nothing and --force is not enabled */ my_printf_error(0, "Error: Slave not set up", MYF(0)); } mysql_free_result(slave); return 1; } else { MYSQL_ROW row= mysql_fetch_row(slave); if (row && row[9] && row[21]) { /* SHOW MASTER STATUS reports file and position */ if (opt_comments) fprintf(md_result_file, "\n--\n-- Position to start replication or point-in-time " "recovery from (the master of this slave)\n--\n\n"); fprintf(md_result_file, "%sCHANGE MASTER TO ", comment_prefix); if (opt_include_master_host_port) { if (row[1]) fprintf(md_result_file, "MASTER_HOST='%s', ", row[1]); if (row[3]) fprintf(md_result_file, "MASTER_PORT=%s, ", row[3]); } fprintf(md_result_file, "MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n", row[9], row[21]); check_io(md_result_file); } mysql_free_result(slave); } return 0; } static int do_start_slave_sql(MYSQL *mysql_con) { MYSQL_RES *slave; /* We need to check if the slave sql is stopped in the first place */ if (mysql_query_with_error_report(mysql_con, &slave, "SHOW SLAVE STATUS")) return(1); else { MYSQL_ROW row= mysql_fetch_row(slave); if (row && row[11]) { /* if SLAVE SQL is not running, we don't start it */ if (!strcmp(row[11],"Yes")) { mysql_free_result(slave); /* Silently assume that they don't have the slave running */ return(0); } } } mysql_free_result(slave); /* now, start slave if stopped */ if (mysql_query_with_error_report(mysql_con, 0, "START SLAVE")) { my_printf_error(0, "Error: Unable to start slave", MYF(0)); return 1; } return(0); } static int do_flush_tables_read_lock(MYSQL *mysql_con) { /* We do first a FLUSH TABLES. If a long update is running, the FLUSH TABLES will wait but will not stall the whole mysqld, and when the long update is done the FLUSH TABLES WITH READ LOCK will start and succeed quickly. So, FLUSH TABLES is to lower the probability of a stage where both mysqldump and most client connections are stalled. Of course, if a second long update starts between the two FLUSHes, we have that bad stall. */ return ( mysql_query_with_error_report(mysql_con, 0, ((opt_master_data != 0) ? "FLUSH /*!40101 LOCAL */ TABLES" : "FLUSH TABLES")) || mysql_query_with_error_report(mysql_con, 0, "FLUSH TABLES WITH READ LOCK") ); } static int do_unlock_tables(MYSQL *mysql_con) { return mysql_query_with_error_report(mysql_con, 0, "UNLOCK TABLES"); } static int get_bin_log_name(MYSQL *mysql_con, char* buff_log_name, uint buff_len) { MYSQL_RES *res; MYSQL_ROW row; if (mysql_query(mysql_con, "SHOW MASTER STATUS") || !(res= mysql_store_result(mysql))) return 1; if (!(row= mysql_fetch_row(res))) { mysql_free_result(res); return 1; } /* Only one row is returned, and the first column is the name of the active log. */ strmake(buff_log_name, row[0], buff_len - 1); mysql_free_result(res); return 0; } static int purge_bin_logs_to(MYSQL *mysql_con, char* log_name) { DYNAMIC_STRING str; int err; init_dynamic_string_checked(&str, "PURGE BINARY LOGS TO '", 1024, 1024); dynstr_append_checked(&str, log_name); dynstr_append_checked(&str, "'"); err = mysql_query_with_error_report(mysql_con, 0, str.str); dynstr_free(&str); return err; } static int start_transaction(MYSQL *mysql_con) { verbose_msg("-- Starting transaction...\n"); /* We use BEGIN for old servers. --single-transaction --master-data will fail on old servers, but that's ok as it was already silently broken (it didn't do a consistent read, so better tell people frankly, with the error). We want the first consistent read to be used for all tables to dump so we need the REPEATABLE READ level (not anything lower, for example READ COMMITTED would give one new consistent read per dumped table). */ if ((mysql_get_server_version(mysql_con) < 40100) && opt_master_data) { fprintf(stderr, "-- %s: the combination of --single-transaction and " "--master-data requires a MySQL server version of at least 4.1 " "(current server's version is %s). %s\n", opt_force ? "Warning" : "Error", mysql_con->server_version ? mysql_con->server_version : "unknown", opt_force ? "Continuing due to --force, backup may not be " "consistent across all tables!" : "Aborting."); if (!opt_force) exit(EX_MYSQLERR); } return (mysql_query_with_error_report(mysql_con, 0, "SET SESSION TRANSACTION ISOLATION " "LEVEL REPEATABLE READ") || mysql_query_with_error_report(mysql_con, 0, "START TRANSACTION " "/*!40100 WITH CONSISTENT SNAPSHOT */")); } static ulong find_set(TYPELIB *lib, const char *x, uint length, char **err_pos, uint *err_len) { const char *end= x + length; ulong found= 0; uint find; char buff[255]; *err_pos= 0; /* No error yet */ while (end > x && my_isspace(charset_info, end[-1])) end--; *err_len= 0; if (x != end) { const char *start= x; for (;;) { const char *pos= start; uint var_len; for (; pos != end && *pos != ','; pos++) ; var_len= (uint) (pos - start); strmake(buff, start, MY_MIN(sizeof(buff) - 1, var_len)); find= find_type(buff, lib, FIND_TYPE_BASIC); if (!find) { *err_pos= (char*) start; *err_len= var_len; } else found|= ((longlong) 1 << (find - 1)); if (pos == end) break; start= pos + 1; } } return found; } /* Print a value with a prefix on file */ static void print_value(FILE *file, MYSQL_RES *result, MYSQL_ROW row, const char *prefix, const char *name, int string_value) { MYSQL_FIELD *field; mysql_field_seek(result, 0); for ( ; (field= mysql_fetch_field(result)) ; row++) { if (!strcmp(field->name,name)) { if (row[0] && row[0][0] && strcmp(row[0],"0")) /* Skip default */ { fputc(' ',file); fputs(prefix, file); if (string_value) unescape(file,row[0],(uint) strlen(row[0])); else fputs(row[0], file); check_io(file); return; } } } return; /* This shouldn't happen */ } /* print_value */ /* SYNOPSIS Check if the table is one of the table types that should be ignored: MRG_ISAM, MRG_MYISAM. If the table should be altogether ignored, it returns a TRUE, FALSE if it should not be ignored. ARGS check_if_ignore_table() table_name Table name to check table_type Type of table GLOBAL VARIABLES mysql MySQL connection verbose Write warning messages RETURN char (bit value) See IGNORE_ values at top */ char check_if_ignore_table(const char *table_name, char *table_type) { char result= IGNORE_NONE; char buff[FN_REFLEN+80], show_name_buff[FN_REFLEN]; MYSQL_RES *res= NULL; MYSQL_ROW row; DBUG_ENTER("check_if_ignore_table"); /* Check memory for quote_for_like() */ DBUG_ASSERT(2*sizeof(table_name) < sizeof(show_name_buff)); my_snprintf(buff, sizeof(buff), "show table status like %s", quote_for_like(table_name, show_name_buff)); if (mysql_query_with_error_report(mysql, &res, buff)) { if (mysql_errno(mysql) != ER_PARSE_ERROR) { /* If old MySQL version */ verbose_msg("-- Warning: Couldn't get status information for " "table %s (%s)\n", table_name, mysql_error(mysql)); DBUG_RETURN(result); /* assume table is ok */ } } if (!(row= mysql_fetch_row(res))) { fprintf(stderr, "Error: Couldn't read status information for table %s (%s)\n", table_name, mysql_error(mysql)); mysql_free_result(res); DBUG_RETURN(result); /* assume table is ok */ } if (!(row[1])) strmake(table_type, "VIEW", NAME_LEN-1); else { strmake(table_type, row[1], NAME_LEN-1); /* If these two types, we want to skip dumping the table. */ if (!opt_no_data && (!my_strcasecmp(&my_charset_latin1, table_type, "MRG_MyISAM") || !strcmp(table_type,"MRG_ISAM") || !strcmp(table_type,"FEDERATED"))) result= IGNORE_DATA; } mysql_free_result(res); DBUG_RETURN(result); } /* Get string of comma-separated primary key field names SYNOPSIS char *primary_key_fields(const char *table_name) RETURNS pointer to allocated buffer (must be freed by caller) table_name quoted table name DESCRIPTION Use SHOW KEYS FROM table_name, allocate a buffer to hold the field names, and then build that string and return the pointer to that buffer. Returns NULL if there is no PRIMARY or UNIQUE key on the table, or if there is some failure. It is better to continue to dump the table unsorted, rather than exit without dumping the data. */ static char *primary_key_fields(const char *table_name) { MYSQL_RES *res= NULL; MYSQL_ROW row; /* SHOW KEYS FROM + table name * 2 (escaped) + 2 quotes + \0 */ char show_keys_buff[15 + NAME_LEN * 2 + 3]; uint result_length= 0; char *result= 0; char buff[NAME_LEN * 2 + 3]; char *quoted_field; my_snprintf(show_keys_buff, sizeof(show_keys_buff), "SHOW KEYS FROM %s", table_name); if (mysql_query(mysql, show_keys_buff) || !(res= mysql_store_result(mysql))) { fprintf(stderr, "Warning: Couldn't read keys from table %s;" " records are NOT sorted (%s)\n", table_name, mysql_error(mysql)); /* Don't exit, because it's better to print out unsorted records */ goto cleanup; } /* * Figure out the length of the ORDER BY clause result. * Note that SHOW KEYS is ordered: a PRIMARY key is always the first * row, and UNIQUE keys come before others. So we only need to check * the first key, not all keys. */ if ((row= mysql_fetch_row(res)) && atoi(row[1]) == 0) { /* Key is unique */ do { quoted_field= quote_name(row[4], buff, 0); result_length+= strlen(quoted_field) + 1; /* + 1 for ',' or \0 */ } while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1); } /* Build the ORDER BY clause result */ if (result_length) { char *end; /* result (terminating \0 is already in result_length) */ result= my_malloc(PSI_NOT_INSTRUMENTED, result_length + 10, MYF(MY_WME)); if (!result) { fprintf(stderr, "Error: Not enough memory to store ORDER BY clause\n"); goto cleanup; } mysql_data_seek(res, 0); row= mysql_fetch_row(res); quoted_field= quote_name(row[4], buff, 0); end= my_stpcpy(result, quoted_field); while ((row= mysql_fetch_row(res)) && atoi(row[3]) > 1) { quoted_field= quote_name(row[4], buff, 0); end= strxmov(end, ",", quoted_field, NullS); } } cleanup: if (res) mysql_free_result(res); return result; } /* Replace a substring SYNOPSIS replace ds_str The string to search and perform the replace in search_str The string to search for search_len Length of the string to search for replace_str The string to replace with replace_len Length of the string to replace with RETURN 0 String replaced 1 Could not find search_str in str */ static int replace(DYNAMIC_STRING *ds_str, const char *search_str, ulong search_len, const char *replace_str, ulong replace_len) { DYNAMIC_STRING ds_tmp; const char *start= strstr(ds_str->str, search_str); if (!start) return 1; init_dynamic_string_checked(&ds_tmp, "", ds_str->length + replace_len, 256); dynstr_append_mem_checked(&ds_tmp, ds_str->str, start - ds_str->str); dynstr_append_mem_checked(&ds_tmp, replace_str, replace_len); dynstr_append_checked(&ds_tmp, start + search_len); dynstr_set_checked(ds_str, ds_tmp.str); dynstr_free(&ds_tmp); return 0; } /** This function sets the session binlog in the dump file. When --set-gtid-purged is used, this function is called to disable the session binlog and at the end of the dump, to restore the session binlog. @note: md_result_file should have been opened, before this function is called. @param[in] flag If FALSE, disable binlog. If TRUE and binlog disabled previously, restore the session binlog. */ static void set_session_binlog(my_bool flag) { static my_bool is_binlog_disabled= FALSE; if (!flag && !is_binlog_disabled) { fprintf(md_result_file, "SET @MYSQLDUMP_TEMP_LOG_BIN = @@SESSION.SQL_LOG_BIN;\n"); fprintf(md_result_file, "SET @@SESSION.SQL_LOG_BIN= 0;\n"); is_binlog_disabled= 1; } else if (flag && is_binlog_disabled) { fprintf(md_result_file, "SET @@SESSION.SQL_LOG_BIN = @MYSQLDUMP_TEMP_LOG_BIN;\n"); is_binlog_disabled= 0; } } /** This function gets the GTID_EXECUTED sets from the server and assigns those sets to GTID_PURGED in the dump file. @param[in] mysql_con connection to the server @retval FALSE succesfully printed GTID_PURGED sets in the dump file. @retval TRUE failed. */ static my_bool add_set_gtid_purged(MYSQL *mysql_con) { MYSQL_RES *gtid_purged_res; MYSQL_ROW gtid_set; ulong num_sets, idx; /* query to get the GTID_EXECUTED */ if (mysql_query_with_error_report(mysql_con, &gtid_purged_res, "SELECT @@GLOBAL.GTID_EXECUTED")) return TRUE; /* Proceed only if gtid_purged_res is non empty */ if ((num_sets= mysql_num_rows(gtid_purged_res)) > 0) { if (opt_comments) fprintf(md_result_file, "\n--\n-- GTID state at the beginning of the backup \n--\n\n"); fprintf(md_result_file,"SET @@GLOBAL.GTID_PURGED='"); /* formatting is not required, even for multiple gtid sets */ for (idx= 0; idx< num_sets-1; idx++) { gtid_set= mysql_fetch_row(gtid_purged_res); fprintf(md_result_file,"%s,", (char*)gtid_set[0]); } /* for the last set */ gtid_set= mysql_fetch_row(gtid_purged_res); /* close the SET expression */ fprintf(md_result_file,"%s';\n", (char*)gtid_set[0]); } return FALSE; /*success */ } /** This function processes the opt_set_gtid_purged option. This function also calls set_session_binlog() function before setting the SET @@GLOBAL.GTID_PURGED in the output. @param[in] mysql_con the connection to the server @retval FALSE successful according to the value of opt_set_gtid_purged. @retval TRUE fail. */ static my_bool process_set_gtid_purged(MYSQL* mysql_con) { MYSQL_RES *gtid_mode_res; MYSQL_ROW gtid_mode_row; char *gtid_mode_val= 0; char buf[32], query[64]; if (opt_set_gtid_purged_mode == SET_GTID_PURGED_OFF) return FALSE; /* nothing to be done */ /* Check if the server has the knowledge of GTIDs(pre mysql-5.6) or if the gtid_mode is ON or OFF. */ my_snprintf(query, sizeof(query), "SHOW VARIABLES LIKE %s", quote_for_like("gtid_mode", buf)); if (mysql_query_with_error_report(mysql_con, &gtid_mode_res, query)) return TRUE; gtid_mode_row = mysql_fetch_row(gtid_mode_res); /* gtid_mode_row is NULL for pre 5.6 versions. For versions >= 5.6, get the gtid_mode value from the second column. */ gtid_mode_val = gtid_mode_row ? (char*)gtid_mode_row[1] : NULL; if (gtid_mode_val && strcmp(gtid_mode_val, "OFF")) { /* For any gtid_mode !=OFF and irrespective of --set-gtid-purged being AUTO or ON, add GTID_PURGED in the output. */ if (opt_databases || !opt_alldbs || !opt_dump_triggers || !opt_routines || !opt_events) { fprintf(stderr,"Warning: A partial dump from a server that has GTIDs will " "by default include the GTIDs of all transactions, even " "those that changed suppressed parts of the database. If " "you don't want to restore GTIDs, pass " "--set-gtid-purged=OFF. To make a complete dump, pass " "--all-databases --triggers --routines --events. \n"); } set_session_binlog(FALSE); if (add_set_gtid_purged(mysql_con)) return TRUE; } else /* gtid_mode is off */ { if (opt_set_gtid_purged_mode == SET_GTID_PURGED_ON) { fprintf(stderr, "Error: Server has GTIDs disabled.\n"); return TRUE; } } return FALSE; } /* Getting VIEW structure SYNOPSIS get_view_structure() table view name db db name RETURN 0 OK 1 ERROR */ static my_bool get_view_structure(char *table, char* db) { MYSQL_RES *table_res; MYSQL_ROW row; MYSQL_FIELD *field; char *result_table, *opt_quoted_table; char table_buff[NAME_LEN*2+3]; char table_buff2[NAME_LEN*2+3]; char query[QUERY_LENGTH]; FILE *sql_file= md_result_file; DBUG_ENTER("get_view_structure"); if (opt_no_create_info) /* Don't write table creation info */ DBUG_RETURN(0); verbose_msg("-- Retrieving view structure for table %s...\n", table); result_table= quote_name(table, table_buff, 1); opt_quoted_table= quote_name(table, table_buff2, 0); if (switch_character_set_results(mysql, "binary")) DBUG_RETURN(1); my_snprintf(query, sizeof(query), "SHOW CREATE TABLE %s", result_table); if (mysql_query_with_error_report(mysql, &table_res, query)) { switch_character_set_results(mysql, default_charset); DBUG_RETURN(0); } /* Check if this is a view */ field= mysql_fetch_field_direct(table_res, 0); if (strcmp(field->name, "View") != 0) { switch_character_set_results(mysql, default_charset); verbose_msg("-- It's base table, skipped\n"); DBUG_RETURN(0); } /* If requested, open separate .sql file for this view */ if (path) { if (!(sql_file= open_sql_file_for_table(table, O_WRONLY))) DBUG_RETURN(1); write_header(sql_file, db); } print_comment(sql_file, 0, "\n--\n-- Final view structure for view %s\n--\n\n", result_table); /* Table might not exist if this view was dumped with --tab. */ fprintf(sql_file, "/*!50001 DROP TABLE IF EXISTS %s*/;\n", opt_quoted_table); if (opt_drop) { fprintf(sql_file, "/*!50001 DROP VIEW IF EXISTS %s*/;\n", opt_quoted_table); check_io(sql_file); } my_snprintf(query, sizeof(query), "SELECT CHECK_OPTION, DEFINER, SECURITY_TYPE, " " CHARACTER_SET_CLIENT, COLLATION_CONNECTION " "FROM information_schema.views " "WHERE table_name=\"%s\" AND table_schema=\"%s\"", table, db); if (mysql_query(mysql, query)) { /* Use the raw output from SHOW CREATE TABLE if information_schema query fails. */ row= mysql_fetch_row(table_res); fprintf(sql_file, "/*!50001 %s */;\n", row[1]); check_io(sql_file); mysql_free_result(table_res); } else { char *ptr; ulong *lengths; char search_buf[256], replace_buf[256]; ulong search_len, replace_len; DYNAMIC_STRING ds_view; /* Save the result of SHOW CREATE TABLE in ds_view */ row= mysql_fetch_row(table_res); lengths= mysql_fetch_lengths(table_res); init_dynamic_string_checked(&ds_view, row[1], lengths[1] + 1, 1024); mysql_free_result(table_res); /* Get the result from "select ... information_schema" */ if (!(table_res= mysql_store_result(mysql)) || !(row= mysql_fetch_row(table_res))) { if (table_res) mysql_free_result(table_res); dynstr_free(&ds_view); DB_error(mysql, "when trying to save the result of SHOW CREATE TABLE in ds_view."); DBUG_RETURN(1); } lengths= mysql_fetch_lengths(table_res); /* "WITH %s CHECK OPTION" is available from 5.0.2 Surround it with !50002 comments */ if (strcmp(row[0], "NONE")) { ptr= search_buf; search_len= (ulong)(strxmov(ptr, "WITH ", row[0], " CHECK OPTION", NullS) - ptr); ptr= replace_buf; replace_len=(ulong)(strxmov(ptr, "*/\n/*!50002 WITH ", row[0], " CHECK OPTION", NullS) - ptr); replace(&ds_view, search_buf, search_len, replace_buf, replace_len); } /* "DEFINER=%s SQL SECURITY %s" is available from 5.0.13 Surround it with !50013 comments */ { size_t user_name_len; char user_name_str[USERNAME_LENGTH + 1]; char quoted_user_name_str[USERNAME_LENGTH * 2 + 3]; size_t host_name_len; char host_name_str[HOSTNAME_LENGTH + 1]; char quoted_host_name_str[HOSTNAME_LENGTH * 2 + 3]; parse_user(row[1], lengths[1], user_name_str, &user_name_len, host_name_str, &host_name_len); ptr= search_buf; search_len= (ulong)(strxmov(ptr, "DEFINER=", quote_name(user_name_str, quoted_user_name_str, FALSE), "@", quote_name(host_name_str, quoted_host_name_str, FALSE), " SQL SECURITY ", row[2], NullS) - ptr); ptr= replace_buf; replace_len= (ulong)(strxmov(ptr, "*/\n/*!50013 DEFINER=", quote_name(user_name_str, quoted_user_name_str, FALSE), "@", quote_name(host_name_str, quoted_host_name_str, FALSE), " SQL SECURITY ", row[2], " */\n/*!50001", NullS) - ptr); replace(&ds_view, search_buf, search_len, replace_buf, replace_len); } /* Dump view structure to file */ fprintf(sql_file, "/*!50001 SET @saved_cs_client = @@character_set_client */;\n" "/*!50001 SET @saved_cs_results = @@character_set_results */;\n" "/*!50001 SET @saved_col_connection = @@collation_connection */;\n" "/*!50001 SET character_set_client = %s */;\n" "/*!50001 SET character_set_results = %s */;\n" "/*!50001 SET collation_connection = %s */;\n" "/*!50001 %s */;\n" "/*!50001 SET character_set_client = @saved_cs_client */;\n" "/*!50001 SET character_set_results = @saved_cs_results */;\n" "/*!50001 SET collation_connection = @saved_col_connection */;\n", (const char *) row[3], (const char *) row[3], (const char *) row[4], (const char *) ds_view.str); check_io(sql_file); mysql_free_result(table_res); dynstr_free(&ds_view); } if (switch_character_set_results(mysql, default_charset)) DBUG_RETURN(1); /* If a separate .sql file was opened, close it now */ if (sql_file != md_result_file) { fputs("\n", sql_file); write_footer(sql_file); my_fclose(sql_file, MYF(MY_WME)); } DBUG_RETURN(0); } /* The following functions are wrappers for the dynamic string functions and if they fail, the wrappers will terminate the current process. */ #define DYNAMIC_STR_ERROR_MSG "Couldn't perform DYNAMIC_STRING operation" static void init_dynamic_string_checked(DYNAMIC_STRING *str, const char *init_str, uint init_alloc, uint alloc_increment) { if (init_dynamic_string(str, init_str, init_alloc, alloc_increment)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_append_checked(DYNAMIC_STRING* dest, const char* src) { if (dynstr_append(dest, src)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_set_checked(DYNAMIC_STRING *str, const char *init_str) { if (dynstr_set(str, init_str)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_append_mem_checked(DYNAMIC_STRING *str, const char *append, uint length) { if (dynstr_append_mem(str, append, length)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } static void dynstr_realloc_checked(DYNAMIC_STRING *str, ulong additional_size) { if (dynstr_realloc(str, additional_size)) die(EX_MYSQLERR, DYNAMIC_STR_ERROR_MSG); } int main(int argc, char **argv) { char bin_log_name[FN_REFLEN]; int exit_code; MY_INIT("mysqldump"); compatible_mode_normal_str[0]= 0; default_charset= (char *)mysql_universal_client_charset; memset(&ignore_table, 0, sizeof(ignore_table)); exit_code= get_options(&argc, &argv); if (exit_code) { free_resources(); exit(exit_code); } /* Disable comments in xml mode if 'comments' option is not explicitly used. */ if (opt_xml && !opt_comments_used) opt_comments= 0; if (log_error_file) { if(!(stderror_file= freopen(log_error_file, "a+", stderr))) { free_resources(); exit(EX_MYSQLERR); } } if (connect_to_db(current_host, current_user, opt_password)) { free_resources(); exit(EX_MYSQLERR); } if (!path) write_header(md_result_file, *argv); if (opt_slave_data && do_stop_slave_sql(mysql)) goto err; if ((opt_lock_all_tables || opt_master_data || (opt_single_transaction && flush_logs)) && do_flush_tables_read_lock(mysql)) goto err; /* Flush logs before starting transaction since this causes implicit commit starting mysql-5.5. */ if (opt_lock_all_tables || opt_master_data || (opt_single_transaction && flush_logs) || opt_delete_master_logs) { if (flush_logs || opt_delete_master_logs) { if (mysql_refresh(mysql, REFRESH_LOG)) goto err; verbose_msg("-- main : logs flushed successfully!\n"); } /* Not anymore! That would not be sensible. */ flush_logs= 0; } if (opt_delete_master_logs) { if (get_bin_log_name(mysql, bin_log_name, sizeof(bin_log_name))) goto err; } if (opt_single_transaction && start_transaction(mysql)) goto err; /* Add 'STOP SLAVE to beginning of dump */ if (opt_slave_apply && add_stop_slave()) goto err; /* Process opt_set_gtid_purged and add SET @@GLOBAL.GTID_PURGED if required. */ if (process_set_gtid_purged(mysql)) goto err; if (opt_master_data && do_show_master_status(mysql)) goto err; if (opt_slave_data && do_show_slave_status(mysql)) goto err; if (opt_single_transaction && do_unlock_tables(mysql)) /* unlock but no commit! */ goto err; if (opt_alltspcs) dump_all_tablespaces(); if (opt_alldbs) { if (!opt_alltspcs && !opt_notspcs) dump_all_tablespaces(); dump_all_databases(); } else if (argc > 1 && !opt_databases) { /* Only one database and selected table(s) */ if (!opt_alltspcs && !opt_notspcs) dump_tablespaces_for_tables(*argv, (argv + 1), (argc -1)); dump_selected_tables(*argv, (argv + 1), (argc - 1)); } else { /* One or more databases, all tables */ if (!opt_alltspcs && !opt_notspcs) dump_tablespaces_for_databases(argv); dump_databases(argv); } /* if --dump-slave , start the slave sql thread */ if (opt_slave_data && do_start_slave_sql(mysql)) goto err; /* if --set-gtid-purged, restore binlog at the end of the session if required. */ set_session_binlog(TRUE); /* add 'START SLAVE' to end of dump */ if (opt_slave_apply && add_slave_statements()) goto err; /* ensure dumped data flushed */ if (md_result_file && fflush(md_result_file)) { if (!first_error) first_error= EX_MYSQLERR; goto err; } /* everything successful, purge the old logs files */ if (opt_delete_master_logs && purge_bin_logs_to(mysql, bin_log_name)) goto err; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif /* No reason to explicitely COMMIT the transaction, neither to explicitely UNLOCK TABLES: these will be automatically be done by the server when we disconnect now. Saves some code here, some network trips, adds nothing to server. */ err: dbDisconnect(current_host); if (!path) write_footer(md_result_file); free_resources(); if (stderror_file) fclose(stderror_file); return(first_error); } /* main */
./CrossVul/dataset_final_sorted/CWE-284/c/bad_1571_6
crossvul-cpp_data_good_1571_2
/* Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ #include "client_priv.h" #include "my_default.h" #include <sslopt-vars.h> #include "../scripts/mysql_fix_privilege_tables_sql.c" #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ #define VER "1.1" #ifdef HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #ifndef WEXITSTATUS # ifdef _WIN32 # define WEXITSTATUS(stat_val) (stat_val) # else # define WEXITSTATUS(stat_val) ((unsigned)(stat_val) >> 8) # endif #endif static char mysql_path[FN_REFLEN]; static char mysqlcheck_path[FN_REFLEN]; static my_bool opt_force, opt_verbose, debug_info_flag, debug_check_flag, opt_systables_only, opt_version_check; static uint my_end_arg= 0; static char *opt_user= (char*)"root"; static DYNAMIC_STRING ds_args; static DYNAMIC_STRING conn_args; static char *opt_password= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static my_bool tty_password= 0; static char opt_tmpdir[FN_REFLEN] = ""; #ifndef DBUG_OFF static char *default_dbug_option= (char*) "d:t:O,/tmp/mysql_upgrade.trace"; #endif static char **defaults_argv; static my_bool not_used; /* Can't use GET_BOOL without a value pointer */ static my_bool opt_write_binlog; static struct my_option my_long_options[]= { {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compress", OPT_COMPRESS, "Use compression in server/client protocol.", &not_used, &not_used, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit.", 0, 0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log.", &default_dbug_option, &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", 'T', "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Force execution of mysqlcheck even if mysql_upgrade " "has already been executed for the current version of MySQL.", &opt_force, &opt_force, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host",'h', "Connect to host.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given," " it's solicited on the tty.", &opt_password,&opt_password, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"version-check", 'k', "Run this program only if its \'server version\' " "matches the version of the server to which it's connecting, (enabled by " "default); use --skip-version-check to avoid this check. Note: the \'server " "version\' of the program is the version of the MySQL server with which it " "was built/distributed.", &opt_version_check, &opt_version_check, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"tmpdir", 't', "Directory for temporary files.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"upgrade-system-tables", 's', "Only upgrade the system tables " "do not try to upgrade the data.", &opt_systables_only, &opt_systables_only, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &opt_user, &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Display more output about the process.", &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"write-binlog", OPT_WRITE_BINLOG, "All commands including mysqlcheck are binlogged. Disabled by default; " "use when commands should be sent to replication slaves.", &opt_write_binlog, &opt_write_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void free_used_memory(void) { /* Free memory allocated by 'load_defaults' */ free_defaults(defaults_argv); dynstr_free(&ds_args); dynstr_free(&conn_args); } static void die(const char *fmt, ...) { va_list args; DBUG_ENTER("die"); /* Print the error message */ va_start(args, fmt); if (fmt) { fprintf(stderr, "FATAL ERROR: "); vfprintf(stderr, fmt, args); fprintf(stderr, "\n"); fflush(stderr); } va_end(args); DBUG_LEAVE; free_used_memory(); my_end(my_end_arg); exit(1); } static void verbose(const char *fmt, ...) { va_list args; if (!opt_verbose) return; /* Print the verbose message */ va_start(args, fmt); if (fmt) { vfprintf(stdout, fmt, args); fprintf(stdout, "\n"); fflush(stdout); } va_end(args); } /* Add one option - passed to mysql_upgrade on command line or by defaults file(my.cnf) - to a dynamic string, in this way we pass the same arguments on to mysql and mysql_check */ static void add_one_option(DYNAMIC_STRING* ds, const struct my_option *opt, const char* argument) { const char* eq= NullS; const char* arg= NullS; if (opt->arg_type != NO_ARG) { eq= "="; switch (opt->var_type & GET_TYPE_MASK) { case GET_STR: case GET_PASSWORD: arg= argument; break; case GET_BOOL: arg= (*(my_bool *)opt->value) ? "1" : "0"; break; default: die("internal error at %s: %d",__FILE__, __LINE__); } } dynstr_append_os_quoted(ds, "--", opt->name, eq, arg, NullS); dynstr_append(ds, " "); } static my_bool get_one_option(int optid, const struct my_option *opt, char *argument) { my_bool add_option= TRUE; switch (optid) { case '?': printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("MySQL utility for upgrading databases to new MySQL versions.\n"); my_print_help(my_long_options); exit(0); break; case '#': DBUG_PUSH(argument ? argument : default_dbug_option); add_option= FALSE; debug_check_flag= 1; break; case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ tty_password= 1; add_option= FALSE; if (argument) { /* Add password to ds_args before overwriting the arg with x's */ add_one_option(&ds_args, opt, argument); while (*argument) *argument++= 'x'; /* Destroy argument */ tty_password= 0; } break; case 't': my_stpnmov(opt_tmpdir, argument, sizeof(opt_tmpdir)); add_option= FALSE; break; case 'k': /* --version-check */ case 'v': /* --verbose */ case 'f': /* --force */ case 's': /* --upgrade-system-tables */ case OPT_WRITE_BINLOG: /* --write-binlog */ add_option= FALSE; break; #include <sslopt-case.h> case 'h': /* --host */ case 'W': /* --pipe */ case 'P': /* --port */ case 'S': /* --socket */ case OPT_MYSQL_PROTOCOL: /* --protocol */ case OPT_SHARED_MEMORY_BASE_NAME: /* --shared-memory-base-name */ case OPT_PLUGIN_DIR: /* --plugin-dir */ case OPT_DEFAULT_AUTH: /* --default-auth */ add_one_option(&conn_args, opt, argument); break; } if (add_option) { /* This is an option that is accpted by mysql_upgrade just so it can be passed on to "mysql" and "mysqlcheck" Save it in the ds_args string */ add_one_option(&ds_args, opt, argument); } return 0; } /** Run a command using the shell, storing its output in the supplied dynamic string. */ static int run_command(char* cmd, DYNAMIC_STRING *ds_res) { char buf[512]= {0}; FILE *res_file; int error; if (! ds_res) { fflush(stdout); fflush(stderr); } if (!(res_file= popen(cmd, "r"))) die("popen(\"%s\", \"r\") failed", cmd); while (fgets(buf, sizeof(buf), res_file)) { DBUG_PRINT("info", ("buf: %s", buf)); if(ds_res) { /* Save the output of this command in the supplied string */ dynstr_append(ds_res, buf); } else { /* Print it directly on screen */ fprintf(stdout, "%s", buf); } } if (! ds_res) { fflush(stdout); fflush(stderr); } error= pclose(res_file); return WEXITSTATUS(error); } static int run_tool(char *tool_path, DYNAMIC_STRING *ds_res, ...) { int ret; const char* arg; va_list args; DYNAMIC_STRING ds_cmdline; DBUG_ENTER("run_tool"); DBUG_PRINT("enter", ("tool_path: %s", tool_path)); if (init_dynamic_string(&ds_cmdline, IF_WIN("\"", ""), FN_REFLEN, FN_REFLEN)) die("Out of memory"); dynstr_append_os_quoted(&ds_cmdline, tool_path, NullS); dynstr_append(&ds_cmdline, " "); va_start(args, ds_res); while ((arg= va_arg(args, char *))) { /* Options should be os quoted */ if (strncmp(arg, "--", 2) == 0) dynstr_append_os_quoted(&ds_cmdline, arg, NullS); else dynstr_append(&ds_cmdline, arg); dynstr_append(&ds_cmdline, " "); } va_end(args); #ifdef _WIN32 dynstr_append(&ds_cmdline, "\""); #endif DBUG_PRINT("info", ("Running: %s", ds_cmdline.str)); ret= run_command(ds_cmdline.str, ds_res); DBUG_PRINT("exit", ("ret: %d", ret)); dynstr_free(&ds_cmdline); DBUG_RETURN(ret); } /** Look for the filename of given tool, with the presumption that it is in the same directory as mysql_upgrade and that the same executable-searching mechanism will be used when we run our sub-shells with popen() later. */ static void find_tool(char *tool_executable_name, const char *tool_name, const char *self_name) { char *last_fn_libchar; DYNAMIC_STRING ds_tmp; DBUG_ENTER("find_tool"); DBUG_PRINT("enter", ("progname: %s", my_progname)); if (init_dynamic_string(&ds_tmp, "", 32, 32)) die("Out of memory"); last_fn_libchar= strrchr(self_name, FN_LIBCHAR); if (last_fn_libchar == NULL) { /* mysql_upgrade was found by the shell searching the path. A sibling next to us should be found the same way. */ strncpy(tool_executable_name, tool_name, FN_REFLEN); } else { int len; /* mysql_upgrade was run absolutely or relatively. We can find a sibling by replacing our name after the LIBCHAR with the new tool name. */ /* When running in a not yet installed build and using libtool, the program(mysql_upgrade) will be in .libs/ and executed through a libtool wrapper in order to use the dynamic libraries from this build. The same must be done for the tools(mysql and mysqlcheck). Thus if path ends in .libs/, step up one directory and execute the tools from there */ if (((last_fn_libchar - 6) >= self_name) && (strncmp(last_fn_libchar - 5, ".libs", 5) == 0) && (*(last_fn_libchar - 6) == FN_LIBCHAR)) { DBUG_PRINT("info", ("Chopping off \".libs\" from end of path")); last_fn_libchar -= 6; } len= last_fn_libchar - self_name; my_snprintf(tool_executable_name, FN_REFLEN, "%.*s%c%s", len, self_name, FN_LIBCHAR, tool_name); } verbose("Looking for '%s' as: %s", tool_name, tool_executable_name); /* Make sure it can be executed */ if (run_tool(tool_executable_name, &ds_tmp, /* Get output from command, discard*/ "--help", "2>&1", IF_WIN("> NUL", "> /dev/null"), NULL)) die("Can't execute '%s'", tool_executable_name); dynstr_free(&ds_tmp); DBUG_VOID_RETURN; } /* Run query using "mysql" */ static int run_query(const char *query, DYNAMIC_STRING *ds_res, my_bool force) { int ret; File fd; char query_file_path[FN_REFLEN]; const uchar sql_log_bin[]= "SET SQL_LOG_BIN=0;"; DBUG_ENTER("run_query"); DBUG_PRINT("enter", ("query: %s", query)); if ((fd= create_temp_file(query_file_path, opt_tmpdir[0] ? opt_tmpdir : NULL, "sql", O_CREAT | O_SHARE | O_RDWR, MYF(MY_WME))) < 0) die("Failed to create temporary file for defaults"); /* Master and slave should be upgraded separately. All statements executed by mysql_upgrade will not be binlogged. 'SET SQL_LOG_BIN=0' is executed before any other statements. */ if (!opt_write_binlog) { if (my_write(fd, sql_log_bin, sizeof(sql_log_bin)-1, MYF(MY_FNABP | MY_WME))) { my_close(fd, MYF(0)); my_delete(query_file_path, MYF(0)); die("Failed to write to '%s'", query_file_path); } } if (my_write(fd, (uchar*) query, strlen(query), MYF(MY_FNABP | MY_WME))) { my_close(fd, MYF(0)); my_delete(query_file_path, MYF(0)); die("Failed to write to '%s'", query_file_path); } ret= run_tool(mysql_path, ds_res, "--no-defaults", ds_args.str, "--database=mysql", "--batch", /* Turns off pager etc. */ force ? "--force": "--skip-force", ds_res ? "--silent": "", "<", query_file_path, "2>&1", NULL); my_close(fd, MYF(0)); my_delete(query_file_path, MYF(0)); DBUG_RETURN(ret); } /* Extract the value returned from result of "show variable like ..." */ static int extract_variable_from_show(DYNAMIC_STRING* ds, char* value) { char *value_start, *value_end; size_t len; /* The query returns "datadir\t<datadir>\n", skip past the tab */ if ((value_start= strchr(ds->str, '\t')) == NULL) return 1; /* Unexpected result */ value_start++; /* Don't copy the ending newline */ if ((value_end= strchr(value_start, '\n')) == NULL) return 1; /* Unexpected result */ len= (size_t) MY_MIN(FN_REFLEN, value_end-value_start); strncpy(value, value_start, len); value[len]= '\0'; return 0; } static int get_upgrade_info_file_name(char* name) { DYNAMIC_STRING ds_datadir; DBUG_ENTER("get_upgrade_info_file_name"); if (init_dynamic_string(&ds_datadir, NULL, 32, 32)) die("Out of memory"); if (run_query("show variables like 'datadir'", &ds_datadir, FALSE) || extract_variable_from_show(&ds_datadir, name)) { dynstr_free(&ds_datadir); DBUG_RETURN(1); /* Query failed */ } dynstr_free(&ds_datadir); fn_format(name, "mysql_upgrade_info", name, "", MYF(0)); DBUG_PRINT("exit", ("name: %s", name)); DBUG_RETURN(0); } /* Read the content of mysql_upgrade_info file and compare the version number form file against version number wich mysql_upgrade was compiled for NOTE This is an optimization to avoid running mysql_upgrade when it's already been performed for the particular version of MySQL. In case the MySQL server can't return the upgrade info file it's always better to report that the upgrade hasn't been performed. */ static int upgrade_already_done(void) { FILE *in; char upgrade_info_file[FN_REFLEN]= {0}; char buf[sizeof(MYSQL_SERVER_VERSION)+1]; char *res; if (get_upgrade_info_file_name(upgrade_info_file)) return 0; /* Could not get filename => not sure */ if (!(in= my_fopen(upgrade_info_file, O_RDONLY, MYF(0)))) return 0; /* Could not open file => not sure */ /* Read from file, don't care if it fails since it will be detected by the strncmp */ memset(buf, 0, sizeof(buf)); res= fgets(buf, sizeof(buf), in); my_fclose(in, MYF(0)); if (!res) return 0; /* Could not read from file => not sure */ return (strncmp(res, MYSQL_SERVER_VERSION, sizeof(MYSQL_SERVER_VERSION)-1)==0); } /* Write mysql_upgrade_info file in servers data dir indicating that upgrade has been done for this version NOTE This might very well fail but since it's just an optimization to run mysql_upgrade only when necessary the error can be ignored. */ static void create_mysql_upgrade_info_file(void) { FILE *out; char upgrade_info_file[FN_REFLEN]= {0}; if (get_upgrade_info_file_name(upgrade_info_file)) return; /* Could not get filename => skip */ if (!(out= my_fopen(upgrade_info_file, O_TRUNC | O_WRONLY, MYF(0)))) { fprintf(stderr, "Could not create the upgrade info file '%s' in " "the MySQL Servers datadir, errno: %d\n", upgrade_info_file, errno); return; } /* Write new version to file */ fputs(MYSQL_SERVER_VERSION, out); my_fclose(out, MYF(0)); /* Check if the upgrad_info_file was properly created/updated It's not a fatal error -> just print a message if it fails */ if (!upgrade_already_done()) fprintf(stderr, "Could not write to the upgrade info file '%s' in " "the MySQL Servers datadir, errno: %d\n", upgrade_info_file, errno); return; } /* Print connection-related arguments. */ static void print_conn_args(const char *tool_name) { if (conn_args.str[0]) verbose("Running '%s' with connection arguments: %s", tool_name, conn_args.str); else verbose("Running '%s with default connection arguments", tool_name); } /* Check and upgrade(if neccessary) all tables in the server using "mysqlcheck --check-upgrade .." */ static int run_mysqlcheck_upgrade(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--check-upgrade", "--all-databases", "--skip-database=mysql", "--auto-repair", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", NULL); } static int run_mysqlcheck_fixnames(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--all-databases", "--skip-database=mysql", "--fix-db-names", "--fix-table-names", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", NULL); } /** performs the same operation as mysqlcheck_upgrade, but on the mysql db */ static int run_mysqlcheck_mysql_db_upgrade(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--check-upgrade", "--databases", "--auto-repair", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", "mysql", NULL); } /** performs the same operation as mysqlcheck_upgrade, but on the mysql db */ static int run_mysqlcheck_mysql_db_fixnames(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--databases", "--fix-db-names", "--fix-table-names", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", "mysql", NULL); } static const char *expected_errors[]= { "ERROR 1060", /* Duplicate column name */ "ERROR 1061", /* Duplicate key name */ "ERROR 1054", /* Unknown column */ 0 }; static my_bool is_expected_error(const char* line) { const char** error= expected_errors; while (*error) { /* Check if lines starting with ERROR are in the list of expected errors */ if (strncmp(line, "ERROR", 5) != 0 || strncmp(line, *error, strlen(*error)) == 0) return 1; /* Found expected error */ error++; } return 0; } static char* get_line(char* line) { while (*line && *line != '\n') line++; if (*line) line++; return line; } /* Print the current line to stderr */ static void print_line(char* line) { while (*line && *line != '\n') { fputc(*line, stderr); line++; } fputc('\n', stderr); } /* Update all system tables in MySQL Server to current version using "mysql" to execute all the SQL commands compiled into the mysql_fix_privilege_tables array */ static int run_sql_fix_privilege_tables(void) { int found_real_errors= 0; const char **query_ptr; DYNAMIC_STRING ds_script; DYNAMIC_STRING ds_result; DBUG_ENTER("run_sql_fix_privilege_tables"); if (init_dynamic_string(&ds_script, "", 65536, 1024)) die("Out of memory"); if (init_dynamic_string(&ds_result, "", 512, 512)) die("Out of memory"); verbose("Running 'mysql_fix_privilege_tables'..."); /* Individual queries can not be executed independently by invoking a forked mysql client, because the script uses session variables and prepared statements. */ for ( query_ptr= &mysql_fix_privilege_tables[0]; *query_ptr != NULL; query_ptr++ ) { dynstr_append(&ds_script, *query_ptr); } run_query(ds_script.str, &ds_result, /* Collect result */ TRUE); { /* Scan each line of the result for real errors and ignore the expected one(s) like "Duplicate column name", "Unknown column" and "Duplicate key name" since they just indicate the system tables are already up to date */ char *line= ds_result.str; do { if (!is_expected_error(line)) { /* Something unexpected failed, dump error line to screen */ found_real_errors++; print_line(line); } else { char *c; /* We process the output of the child process here. Basically, if a line contains a warning, we'll print it, otherwise, we won't. The first branch handles new-style tools that print their name, then the severity in brackets, the second branch handles old-style tools that just print a severity. */ if ((c= strstr(line, ": ")) && (c < strchr(line, ' ')) && (strncmp(c + 2, "[Warning] ", 10) == 0)) print_line(line); else if ((strncmp(line, "WARNING", 7) == 0) || (strncmp(line, "Warning", 7) == 0)) print_line(line); } } while ((line= get_line(line)) && *line); } dynstr_free(&ds_result); dynstr_free(&ds_script); DBUG_RETURN(found_real_errors); } static const char *load_default_groups[]= { "client", /* Read settings how to connect to server */ "mysql_upgrade", /* Read special settings for mysql_upgrade*/ 0 }; /* Convert the specified version string into the numeric format. */ static ulong STDCALL calc_server_version(char *some_version) { uint major, minor, version; char *point= some_version, *end_point; major= (uint) strtoul(point, &end_point, 10); point=end_point+1; minor= (uint) strtoul(point, &end_point, 10); point=end_point+1; version= (uint) strtoul(point, &end_point, 10); return (ulong) major * 10000L + (ulong)(minor * 100 + version); } /** Check if the server version matches with the server version mysql_upgrade was compiled with. @return 0 match successful 1 failed */ static int check_version_match(void) { DYNAMIC_STRING ds_version; char version_str[NAME_CHAR_LEN + 1]; if (init_dynamic_string(&ds_version, NULL, NAME_CHAR_LEN, NAME_CHAR_LEN)) die("Out of memory"); if (run_query("show variables like 'version'", &ds_version, FALSE) || extract_variable_from_show(&ds_version, version_str)) { dynstr_free(&ds_version); return 1; /* Query failed */ } dynstr_free(&ds_version); if (calc_server_version((char *) version_str) != MYSQL_VERSION_ID) { fprintf(stderr, "Error: Server version (%s) does not match with the " "version of\nthe server (%s) with which this program was built/" "distributed. You can\nuse --skip-version-check to skip this " "check.\n", version_str, MYSQL_SERVER_VERSION); return 1; } else return 0; } int main(int argc, char **argv) { char self_name[FN_REFLEN]; MY_INIT(argv[0]); #if _WIN32 if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0) #endif { strncpy(self_name, argv[0], FN_REFLEN); } if (init_dynamic_string(&ds_args, "", 512, 256) || init_dynamic_string(&conn_args, "", 512, 256)) die("Out of memory"); my_getopt_use_args_separator= TRUE; if (load_defaults("my", load_default_groups, &argc, &argv)) die(NULL); my_getopt_use_args_separator= FALSE; defaults_argv= argv; /* Must be freed by 'free_defaults' */ if (handle_options(&argc, &argv, my_long_options, get_one_option)) die(NULL); if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (tty_password) { opt_password= get_tty_password(NullS); /* add password to defaults file */ dynstr_append_os_quoted(&ds_args, "--password=", opt_password, NullS); dynstr_append(&ds_args, " "); } /* add user to defaults file */ dynstr_append_os_quoted(&ds_args, "--user=", opt_user, NullS); dynstr_append(&ds_args, " "); /* Find mysql */ find_tool(mysql_path, IF_WIN("mysql.exe", "mysql"), self_name); if (!opt_systables_only) { /* Find mysqlcheck */ find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name); } else { printf("The --upgrade-system-tables option was used, databases won't be touched.\n"); } /* Read the mysql_upgrade_info file to check if mysql_upgrade already has been run for this installation of MySQL */ if (!opt_force && upgrade_already_done()) { printf("This installation of MySQL is already upgraded to %s, " "use --force if you still need to run mysql_upgrade\n", MYSQL_SERVER_VERSION); die(NULL); } if (opt_version_check && check_version_match()) die("Upgrade failed"); /* Run "mysqlcheck" and "mysql_fix_privilege_tables.sql" First run mysqlcheck on the system database. Then do the upgrade. And then run mysqlcheck on all tables. */ if ((!opt_systables_only && (run_mysqlcheck_mysql_db_fixnames() || run_mysqlcheck_mysql_db_upgrade())) || run_sql_fix_privilege_tables() || (!opt_systables_only && (run_mysqlcheck_fixnames() || run_mysqlcheck_upgrade()))) { /* The upgrade failed to complete in some way or another, significant error message should have been printed to the screen */ die("Upgrade failed" ); } verbose("OK"); /* Create a file indicating upgrade has been performed */ create_mysql_upgrade_info_file(); free_used_memory(); my_end(my_end_arg); exit(0); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_1571_2
crossvul-cpp_data_good_880_2
/* * Copyright (C) 2014-2019 Firejail Authors * * This file is part of firejail project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "firejail.h" #include <sys/mount.h> #include <sys/stat.h> #include <sys/types.h> #include <dirent.h> static int tmpfs_mounted = 0; // build /run/firejail directory void preproc_build_firejail_dir(void) { struct stat s; // CentOS 6 doesn't have /run directory if (stat(RUN_FIREJAIL_BASEDIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_BASEDIR, 0755); } if (stat(RUN_FIREJAIL_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_DIR, 0755); } if (stat(RUN_FIREJAIL_NETWORK_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_NETWORK_DIR, 0755); } if (stat(RUN_FIREJAIL_BANDWIDTH_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_BANDWIDTH_DIR, 0755); } if (stat(RUN_FIREJAIL_NAME_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_NAME_DIR, 0755); } if (stat(RUN_FIREJAIL_PROFILE_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_PROFILE_DIR, 0755); } if (stat(RUN_FIREJAIL_X11_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_X11_DIR, 0755); } if (stat(RUN_FIREJAIL_APPIMAGE_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_APPIMAGE_DIR, 0755); } if (stat(RUN_FIREJAIL_LIB_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_LIB_DIR, 0755); } if (stat(RUN_MNT_DIR, &s)) { create_empty_dir_as_root(RUN_MNT_DIR, 0755); } create_empty_file_as_root(RUN_RO_FILE, S_IRUSR); create_empty_dir_as_root(RUN_RO_DIR, S_IRUSR); } // build /run/firejail/mnt directory void preproc_mount_mnt_dir(void) { // mount tmpfs on top of /run/firejail/mnt if (!tmpfs_mounted) { if (arg_debug) printf("Mounting tmpfs on %s directory\n", RUN_MNT_DIR); if (mount("tmpfs", RUN_MNT_DIR, "tmpfs", MS_NOSUID | MS_STRICTATIME, "mode=755,gid=0") < 0) errExit("mounting /run/firejail/mnt"); tmpfs_mounted = 1; fs_logger2("tmpfs", RUN_MNT_DIR); #ifdef HAVE_SECCOMP create_empty_dir_as_root(RUN_SECCOMP_DIR, 0755); if (arg_seccomp_block_secondary) copy_file(PATH_SECCOMP_BLOCK_SECONDARY, RUN_SECCOMP_BLOCK_SECONDARY, getuid(), getgid(), 0644); // root needed else { //copy default seccomp files copy_file(PATH_SECCOMP_32, RUN_SECCOMP_32, getuid(), getgid(), 0644); // root needed } if (arg_allow_debuggers) copy_file(PATH_SECCOMP_DEFAULT_DEBUG, RUN_SECCOMP_CFG, getuid(), getgid(), 0644); // root needed else copy_file(PATH_SECCOMP_DEFAULT, RUN_SECCOMP_CFG, getuid(), getgid(), 0644); // root needed if (arg_memory_deny_write_execute) copy_file(PATH_SECCOMP_MDWX, RUN_SECCOMP_MDWX, getuid(), getgid(), 0644); // root needed // as root, create empty RUN_SECCOMP_PROTOCOL and RUN_SECCOMP_POSTEXEC files create_empty_file_as_root(RUN_SECCOMP_PROTOCOL, 0644); if (set_perms(RUN_SECCOMP_PROTOCOL, getuid(), getgid(), 0644)) errExit("set_perms"); create_empty_file_as_root(RUN_SECCOMP_POSTEXEC, 0644); if (set_perms(RUN_SECCOMP_POSTEXEC, getuid(), getgid(), 0644)) errExit("set_perms"); #endif } } static void clean_dir(const char *name, int *pidarr, int start_pid, int max_pids) { DIR *dir; if (!(dir = opendir(name))) { fwarning("cannot clean %s directory\n", name); return; // we live to fight another day! } // clean leftover files struct dirent *entry; char *end; while ((entry = readdir(dir)) != NULL) { pid_t pid = strtol(entry->d_name, &end, 10); pid %= max_pids; if (end == entry->d_name || *end) continue; if (pid < start_pid) continue; if (pidarr[pid] == 0) delete_run_files(pid); } closedir(dir); } // clean run directory void preproc_clean_run(void) { int max_pids=32769; int start_pid = 100; // extract real max_pids FILE *fp = fopen("/proc/sys/kernel/pid_max", "r"); if (fp) { int val; if (fscanf(fp, "%d", &val) == 1) { if (val > 4194304) // this is the max value supported on 64 bit Linux kernels val = 4194304; if (val >= max_pids) max_pids = val + 1; } fclose(fp); } int *pidarr = malloc(max_pids * sizeof(int)); if (!pidarr) errExit("malloc"); memset(pidarr, 0, max_pids * sizeof(int)); // open /proc directory DIR *dir; if (!(dir = opendir("/proc"))) { // sleep 2 seconds and try again sleep(2); if (!(dir = opendir("/proc"))) { fprintf(stderr, "Error: cannot open /proc directory\n"); exit(1); } } // read /proc and populate pidarr with all active processes struct dirent *entry; char *end; while ((entry = readdir(dir)) != NULL) { pid_t pid = strtol(entry->d_name, &end, 10); pid %= max_pids; if (end == entry->d_name || *end) continue; if (pid < start_pid) continue; pidarr[pid] = 1; } closedir(dir); // clean profile and name directories clean_dir(RUN_FIREJAIL_PROFILE_DIR, pidarr, start_pid, max_pids); clean_dir(RUN_FIREJAIL_NAME_DIR, pidarr, start_pid, max_pids); free(pidarr); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_880_2
crossvul-cpp_data_good_4786_1
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT U U M M % % Q Q U U A A NN N T U U MM MM % % Q Q U U AAAAA N N N T U U M M M % % Q QQ U U A A N NN T U U M M % % QQQQ UUU A A N N T UUU M M % % % % IIIII M M PPPP OOO RRRR TTTTT % % I MM MM P P O O R R T % % I M M M PPPP O O RRRR T % % I M M P O O R R T % % IIIII M M P OOO R R T % % % % MagickCore Methods to Import Quantum Pixels % % % % Software Design % % Cristy % % October 1998 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/color-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/cache.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/statistic.h" #include "MagickCore/stream.h" #include "MagickCore/string_.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I m p o r t Q u a n t u m P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ImportQuantumPixels() transfers one or more pixel components from a user % supplied buffer into the image pixel cache of an image. The pixels are % expected in network byte order. It returns MagickTrue if the pixels are % successfully transferred, otherwise MagickFalse. % % The format of the ImportQuantumPixels method is: % % size_t ImportQuantumPixels(const Image *image,CacheView *image_view, % QuantumInfo *quantum_info,const QuantumType quantum_type, % const unsigned char *magick_restrict pixels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image cache view. % % o quantum_info: the quantum info. % % o quantum_type: Declare which pixel components to transfer (red, green, % blue, opacity, RGB, or RGBA). % % o pixels: The pixel components are transferred from this buffer. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(const Image *image,const size_t index, MagickBooleanType *range_exception) { if (index < image->colors) return((Quantum) index); *range_exception=MagickTrue; return((Quantum) 0); } static inline const unsigned char *PushDoublePixel(QuantumInfo *quantum_info, const unsigned char *magick_restrict pixels,double *pixel) { double *p; unsigned char quantum[8]; if (quantum_info->endian == LSBEndian) { quantum[0]=(*pixels++); quantum[1]=(*pixels++); quantum[2]=(*pixels++); quantum[3]=(*pixels++); quantum[4]=(*pixels++); quantum[5]=(*pixels++); quantum[6]=(*pixels++); quantum[7]=(*pixels++); p=(double *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } quantum[7]=(*pixels++); quantum[6]=(*pixels++); quantum[5]=(*pixels++); quantum[4]=(*pixels++); quantum[3]=(*pixels++); quantum[2]=(*pixels++); quantum[1]=(*pixels++); quantum[0]=(*pixels++); p=(double *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } static inline const unsigned char *PushFloatPixel(QuantumInfo *quantum_info, const unsigned char *magick_restrict pixels,float *pixel) { float *p; unsigned char quantum[4]; if (quantum_info->endian == LSBEndian) { quantum[0]=(*pixels++); quantum[1]=(*pixels++); quantum[2]=(*pixels++); quantum[3]=(*pixels++); p=(float *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } quantum[3]=(*pixels++); quantum[2]=(*pixels++); quantum[1]=(*pixels++); quantum[0]=(*pixels++); p=(float *) quantum; *pixel=(*p); *pixel-=quantum_info->minimum; *pixel*=quantum_info->scale; return(pixels); } static inline const unsigned char *PushQuantumPixel(QuantumInfo *quantum_info, const unsigned char *magick_restrict pixels,unsigned int *quantum) { register ssize_t i; register size_t quantum_bits; *quantum=(QuantumAny) 0; for (i=(ssize_t) quantum_info->depth; i > 0L; ) { if (quantum_info->state.bits == 0UL) { quantum_info->state.pixel=(*pixels++); quantum_info->state.bits=8UL; } quantum_bits=(size_t) i; if (quantum_bits > quantum_info->state.bits) quantum_bits=quantum_info->state.bits; i-=(ssize_t) quantum_bits; quantum_info->state.bits-=quantum_bits; *quantum=(unsigned int) ((*quantum << quantum_bits) | ((quantum_info->state.pixel >> quantum_info->state.bits) &~ ((~0UL) << quantum_bits))); } return(pixels); } static inline const unsigned char *PushQuantumLongPixel( QuantumInfo *quantum_info,const unsigned char *magick_restrict pixels, unsigned int *quantum) { register ssize_t i; register size_t quantum_bits; *quantum=0UL; for (i=(ssize_t) quantum_info->depth; i > 0; ) { if (quantum_info->state.bits == 0) { pixels=PushLongPixel(quantum_info->endian,pixels, &quantum_info->state.pixel); quantum_info->state.bits=32U; } quantum_bits=(size_t) i; if (quantum_bits > quantum_info->state.bits) quantum_bits=quantum_info->state.bits; *quantum|=(((quantum_info->state.pixel >> (32U-quantum_info->state.bits)) & quantum_info->state.mask[quantum_bits]) << (quantum_info->depth-i)); i-=(ssize_t) quantum_bits; quantum_info->state.bits-=quantum_bits; } return(pixels); } static void ImportAlphaQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportBGRQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff,range),q); SetPixelGreen(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff,range), q); SetPixelBlue(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { unsigned short pixel; for (x=0; x < (ssize_t) (3*number_pixels-1); x+=2) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch (x % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+1) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } for (bit=0; bit < (ssize_t) (3*number_pixels % 2); bit++) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+bit) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } if (bit != 0) p++; break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportBGRAQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelAlpha(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportBGROQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelOpacity(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportBlackQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportBlueQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportCbYCrYQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 10: { Quantum cbcr[4]; pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x+=4) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } cbcr[i]=(Quantum) (quantum); n++; } p+=quantum_info->pad; SetPixelRed(image,cbcr[1],q); SetPixelGreen(image,cbcr[0],q); SetPixelBlue(image,cbcr[2],q); q+=GetPixelChannels(image); SetPixelRed(image,cbcr[3],q); SetPixelGreen(image,cbcr[0],q); SetPixelBlue(image,cbcr[2],q); q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportCMYKQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportCMYKAQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportCMYKOQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColorSeparatedImageRequired","`%s'",image->filename); return; } switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlack(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlack(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlack(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlack(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportGrayQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixel=0; switch (quantum_info->depth) { case 1: { register Quantum black, white; black=0; white=QuantumRange; if (quantum_info->min_is_white != MagickFalse) { black=QuantumRange; white=0; } for (x=0; x < ((ssize_t) number_pixels-7); x+=8) { for (bit=0; bit < 8; bit++) { SetPixelGray(image,((*p) & (1 << (7-bit))) == 0 ? black : white,q); q+=GetPixelChannels(image); } p++; } for (bit=0; bit < (ssize_t) (number_pixels % 8); bit++) { SetPixelGray(image,((*p) & (0x01 << (7-bit))) == 0 ? black : white,q); q+=GetPixelChannels(image); } if (bit != 0) p++; break; } case 4: { register unsigned char pixel; range=GetQuantumRange(quantum_info->depth); for (x=0; x < ((ssize_t) number_pixels-1); x+=2) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); pixel=(unsigned char) ((*p) & 0xf); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p++; q+=GetPixelChannels(image); } for (bit=0; bit < (ssize_t) (number_pixels % 2); bit++) { pixel=(unsigned char) (*p++ >> 4); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; if (quantum_info->min_is_white != MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGray(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGray(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { if (image->endian == LSBEndian) { for (x=0; x < (ssize_t) (number_pixels-2); x+=3) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff, range),q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff, range),q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff, range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } p=PushLongPixel(quantum_info->endian,p,&pixel); if (x++ < (ssize_t) (number_pixels-1)) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff, range),q); q+=GetPixelChannels(image); } if (x++ < (ssize_t) number_pixels) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff, range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) (number_pixels-2); x+=3) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff,range), q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff,range), q); q+=GetPixelChannels(image); SetPixelGray(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff,range), q); p+=quantum_info->pad; q+=GetPixelChannels(image); } p=PushLongPixel(quantum_info->endian,p,&pixel); if (x++ < (ssize_t) (number_pixels-1)) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff, range),q); q+=GetPixelChannels(image); } if (x++ < (ssize_t) number_pixels) { SetPixelGray(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff, range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { unsigned short pixel; for (x=0; x < (ssize_t) (number_pixels-1); x+=2) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } for (bit=0; bit < (ssize_t) (number_pixels % 2); bit++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } if (bit != 0) p++; break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->min_is_white != MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportGrayAlphaQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 1: { register unsigned char pixel; bit=0; for (x=((ssize_t) number_pixels-3); x > 0; x-=4) { for (bit=0; bit < 8; bit+=2) { pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelGray(image,(Quantum) (pixel == 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); q+=GetPixelChannels(image); } p++; } if ((number_pixels % 4) != 0) for (bit=3; bit >= (ssize_t) (4-(number_pixels % 4)); bit-=2) { pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelGray(image,(Quantum) (pixel != 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); q+=GetPixelChannels(image); } if (bit != 0) p++; break; } case 4: { register unsigned char pixel; range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); pixel=(unsigned char) ((*p) & 0xf); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p++; q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGray(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGray(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGray(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGray(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportGreenQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportIndexQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { MagickBooleanType range_exception; register ssize_t x; ssize_t bit; unsigned int pixel; if (image->storage_class != PseudoClass) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColormappedImageRequired","`%s'",image->filename); return; } range_exception=MagickFalse; switch (quantum_info->depth) { case 1: { register unsigned char pixel; for (x=0; x < ((ssize_t) number_pixels-7); x+=8) { for (bit=0; bit < 8; bit++) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception), q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); } p++; } for (bit=0; bit < (ssize_t) (number_pixels % 8); bit++) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); } break; } case 4: { register unsigned char pixel; for (x=0; x < ((ssize_t) number_pixels-1); x+=2) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); pixel=(unsigned char) ((*p) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p++; q+=GetPixelChannels(image); } for (bit=0; bit < (ssize_t) (number_pixels % 2); bit++) { pixel=(unsigned char) ((*p++ >> 4) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum( (double) QuantumRange*HalfToSinglePrecision(pixel)), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum(pixel), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum(pixel), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } if (range_exception != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "InvalidColormapIndex","`%s'",image->filename); } static void ImportIndexAlphaQuantum(const Image *image, QuantumInfo *quantum_info,const MagickSizeType number_pixels, const unsigned char *magick_restrict p,Quantum *magick_restrict q, ExceptionInfo *exception) { MagickBooleanType range_exception; QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; if (image->storage_class != PseudoClass) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ColormappedImageRequired","`%s'",image->filename); return; } range_exception=MagickFalse; switch (quantum_info->depth) { case 1: { register unsigned char pixel; for (x=((ssize_t) number_pixels-3); x > 0; x-=4) { for (bit=0; bit < 8; bit+=2) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelGray(image,(Quantum) (pixel == 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); SetPixelIndex(image,(Quantum) (pixel == 0 ? 0 : 1),q); q+=GetPixelChannels(image); } } if ((number_pixels % 4) != 0) for (bit=3; bit >= (ssize_t) (4-(number_pixels % 4)); bit-=2) { if (quantum_info->min_is_white == MagickFalse) pixel=(unsigned char) (((*p) & (1 << (7-bit))) == 0 ? 0x00 : 0x01); else pixel=(unsigned char) (((*p) & (1 << (7-bit))) != 0 ? 0x00 : 0x01); SetPixelIndex(image,(Quantum) (pixel == 0 ? 0 : 1),q); SetPixelGray(image,(Quantum) (pixel == 0 ? 0 : QuantumRange),q); SetPixelAlpha(image,((*p) & (1UL << (unsigned char) (6-bit))) == 0 ? TransparentAlpha : OpaqueAlpha,q); q+=GetPixelChannels(image); } break; } case 4: { register unsigned char pixel; range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { pixel=(unsigned char) ((*p >> 4) & 0xf); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); pixel=(unsigned char) ((*p) & 0xf); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p++; q+=GetPixelChannels(image); } break; } case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum( (double) QuantumRange*HalfToSinglePrecision(pixel)), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image, ClampToQuantum(pixel),&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,ClampToQuantum(pixel), &range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelIndex(image,PushColormapIndex(image,pixel,&range_exception),q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) GetPixelIndex(image,q),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } if (range_exception != MagickFalse) (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "InvalidColormapIndex","`%s'",image->filename); } static void ImportOpacityQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportRedQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } } static void ImportRGBQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; ssize_t bit; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); SetPixelAlpha(image,OpaqueAlpha,q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum((pixel >> 22) & 0x3ff,range),q); SetPixelGreen(image,ScaleAnyToQuantum((pixel >> 12) & 0x3ff,range), q); SetPixelBlue(image,ScaleAnyToQuantum((pixel >> 2) & 0x3ff,range),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 12: { range=GetQuantumRange(quantum_info->depth); if (quantum_info->pack == MagickFalse) { unsigned short pixel; for (x=0; x < (ssize_t) (3*number_pixels-1); x+=2) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch (x % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+1) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } for (bit=0; bit < (ssize_t) (3*number_pixels % 2); bit++) { p=PushShortPixel(quantum_info->endian,p,&pixel); switch ((x+bit) % 3) { default: case 0: { SetPixelRed(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 1: { SetPixelGreen(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); break; } case 2: { SetPixelBlue(image,ScaleAnyToQuantum((QuantumAny) (pixel >> 4), range),q); q+=GetPixelChannels(image); break; } } p+=quantum_info->pad; } if (bit != 0) p++; break; } if (quantum_info->quantum == 32U) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumLongPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportRGBAQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelAlpha(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelAlpha(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelAlpha(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelAlpha(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelAlpha(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } static void ImportRGBOQuantum(const Image *image,QuantumInfo *quantum_info, const MagickSizeType number_pixels,const unsigned char *magick_restrict p, Quantum *magick_restrict q,ExceptionInfo *exception) { QuantumAny range; register ssize_t x; unsigned int pixel; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); switch (quantum_info->depth) { case 8: { unsigned char pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushCharPixel(p,&pixel); SetPixelRed(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelGreen(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelBlue(image,ScaleCharToQuantum(pixel),q); p=PushCharPixel(p,&pixel); SetPixelOpacity(image,ScaleCharToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 10: { pixel=0; if (quantum_info->pack == MagickFalse) { register ssize_t i; size_t quantum; ssize_t n; n=0; quantum=0; for (x=0; x < (ssize_t) number_pixels; x++) { for (i=0; i < 4; i++) { switch (n % 3) { case 0: { p=PushLongPixel(quantum_info->endian,p,&pixel); quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 22) & 0x3ff) << 6))); break; } case 1: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 12) & 0x3ff) << 6))); break; } case 2: { quantum=(size_t) (ScaleShortToQuantum((unsigned short) (((pixel >> 2) & 0x3ff) << 6))); break; } } switch (i) { case 0: SetPixelRed(image,(Quantum) quantum,q); break; case 1: SetPixelGreen(image,(Quantum) quantum,q); break; case 2: SetPixelBlue(image,(Quantum) quantum,q); break; case 3: SetPixelOpacity(image,(Quantum) quantum,q); break; } n++; } p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleShortToQuantum((unsigned short) (pixel << 6)),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum((unsigned short) (pixel << 6)), q); q+=GetPixelChannels(image); } break; } case 16: { unsigned short pixel; if (quantum_info->format == FloatingPointQuantumFormat) { for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ClampToQuantum(QuantumRange* HalfToSinglePrecision(pixel)),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleShortToQuantum(pixel),q); p=PushShortPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleShortToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 32: { unsigned int pixel; if (quantum_info->format == FloatingPointQuantumFormat) { float pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushFloatPixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushFloatPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } for (x=0; x < (ssize_t) number_pixels; x++) { p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelRed(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelGreen(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelBlue(image,ScaleLongToQuantum(pixel),q); p=PushLongPixel(quantum_info->endian,p,&pixel); SetPixelOpacity(image,ScaleLongToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } case 64: { if (quantum_info->format == FloatingPointQuantumFormat) { double pixel; for (x=0; x < (ssize_t) number_pixels; x++) { p=PushDoublePixel(quantum_info,p,&pixel); SetPixelRed(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelGreen(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelBlue(image,ClampToQuantum(pixel),q); p=PushDoublePixel(quantum_info,p,&pixel); SetPixelOpacity(image,ClampToQuantum(pixel),q); p+=quantum_info->pad; q+=GetPixelChannels(image); } break; } } default: { range=GetQuantumRange(quantum_info->depth); for (x=0; x < (ssize_t) number_pixels; x++) { p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelRed(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelGreen(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelBlue(image,ScaleAnyToQuantum(pixel,range),q); p=PushQuantumPixel(quantum_info,p,&pixel); SetPixelOpacity(image,ScaleAnyToQuantum(pixel,range),q); q+=GetPixelChannels(image); } break; } } } MagickExport size_t ImportQuantumPixels(const Image *image, CacheView *image_view,QuantumInfo *quantum_info, const QuantumType quantum_type,const unsigned char *magick_restrict pixels, ExceptionInfo *exception) { MagickSizeType number_pixels; register const unsigned char *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; size_t extent; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(quantum_info != (QuantumInfo *) NULL); assert(quantum_info->signature == MagickCoreSignature); if (pixels == (const unsigned char *) NULL) pixels=(const unsigned char *) GetQuantumPixels(quantum_info); x=0; p=pixels; if (image_view == (CacheView *) NULL) { number_pixels=GetImageExtent(image); q=GetAuthenticPixelQueue(image); } else { number_pixels=GetCacheViewExtent(image_view); q=GetCacheViewAuthenticPixelQueue(image_view); } ResetQuantumState(quantum_info); extent=GetQuantumExtent(image,quantum_info,quantum_type); switch (quantum_type) { case AlphaQuantum: { ImportAlphaQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BGRQuantum: { ImportBGRQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BGRAQuantum: { ImportBGRAQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BGROQuantum: { ImportBGROQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BlackQuantum: { ImportBlackQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case BlueQuantum: case YellowQuantum: { ImportBlueQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CMYKQuantum: { ImportCMYKQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CMYKAQuantum: { ImportCMYKAQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CMYKOQuantum: { ImportCMYKOQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case CbYCrYQuantum: { ImportCbYCrYQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case GrayQuantum: { ImportGrayQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case GrayAlphaQuantum: { ImportGrayAlphaQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case GreenQuantum: case MagentaQuantum: { ImportGreenQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case IndexQuantum: { ImportIndexQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case IndexAlphaQuantum: { ImportIndexAlphaQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case OpacityQuantum: { ImportOpacityQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RedQuantum: case CyanQuantum: { ImportRedQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RGBQuantum: case CbYCrQuantum: { ImportRGBQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RGBAQuantum: case CbYCrAQuantum: { ImportRGBAQuantum(image,quantum_info,number_pixels,p,q,exception); break; } case RGBOQuantum: { ImportRGBOQuantum(image,quantum_info,number_pixels,p,q,exception); break; } default: break; } if ((quantum_type == CbYCrQuantum) || (quantum_type == CbYCrAQuantum)) { Quantum quantum; register Quantum *magick_restrict q; q=GetAuthenticPixelQueue(image); if (image_view != (CacheView *) NULL) q=GetCacheViewAuthenticPixelQueue(image_view); for (x=0; x < (ssize_t) number_pixels; x++) { quantum=GetPixelRed(image,q); SetPixelRed(image,GetPixelGreen(image,q),q); SetPixelGreen(image,quantum,q); q+=GetPixelChannels(image); } } if (quantum_info->alpha_type == DisassociatedQuantumAlpha) { double gamma, Sa; register Quantum *magick_restrict q; /* Disassociate alpha. */ q=GetAuthenticPixelQueue(image); if (image_view != (CacheView *) NULL) q=GetCacheViewAuthenticPixelQueue(image_view); for (x=0; x < (ssize_t) number_pixels; x++) { register ssize_t i; if (GetPixelReadMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } Sa=QuantumScale*GetPixelAlpha(image,q); gamma=PerceptibleReciprocal(Sa); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((channel == AlphaPixelChannel) || ((traits & UpdatePixelTrait) == 0)) continue; q[i]=ClampToQuantum(gamma*q[i]); } q+=GetPixelChannels(image); } } return(extent); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_4786_1
crossvul-cpp_data_bad_4896_2
/* * Common NFSv4 ACL handling code. * * Copyright (c) 2002, 2003 The Regents of the University of Michigan. * All rights reserved. * * Marius Aamodt Eriksen <marius@umich.edu> * Jeff Sedlak <jsedlak@umich.edu> * J. Bruce Fields <bfields@umich.edu> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/posix_acl.h> #include "nfsfh.h" #include "nfsd.h" #include "acl.h" #include "vfs.h" #define NFS4_ACL_TYPE_DEFAULT 0x01 #define NFS4_ACL_DIR 0x02 #define NFS4_ACL_OWNER 0x04 /* mode bit translations: */ #define NFS4_READ_MODE (NFS4_ACE_READ_DATA) #define NFS4_WRITE_MODE (NFS4_ACE_WRITE_DATA | NFS4_ACE_APPEND_DATA) #define NFS4_EXECUTE_MODE NFS4_ACE_EXECUTE #define NFS4_ANYONE_MODE (NFS4_ACE_READ_ATTRIBUTES | NFS4_ACE_READ_ACL | NFS4_ACE_SYNCHRONIZE) #define NFS4_OWNER_MODE (NFS4_ACE_WRITE_ATTRIBUTES | NFS4_ACE_WRITE_ACL) /* flags used to simulate posix default ACLs */ #define NFS4_INHERITANCE_FLAGS (NFS4_ACE_FILE_INHERIT_ACE \ | NFS4_ACE_DIRECTORY_INHERIT_ACE) #define NFS4_SUPPORTED_FLAGS (NFS4_INHERITANCE_FLAGS \ | NFS4_ACE_INHERIT_ONLY_ACE \ | NFS4_ACE_IDENTIFIER_GROUP) static u32 mask_from_posix(unsigned short perm, unsigned int flags) { int mask = NFS4_ANYONE_MODE; if (flags & NFS4_ACL_OWNER) mask |= NFS4_OWNER_MODE; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } static u32 deny_mask_from_posix(unsigned short perm, u32 flags) { u32 mask = 0; if (perm & ACL_READ) mask |= NFS4_READ_MODE; if (perm & ACL_WRITE) mask |= NFS4_WRITE_MODE; if ((perm & ACL_WRITE) && (flags & NFS4_ACL_DIR)) mask |= NFS4_ACE_DELETE_CHILD; if (perm & ACL_EXECUTE) mask |= NFS4_EXECUTE_MODE; return mask; } /* XXX: modify functions to return NFS errors; they're only ever * used by nfs code, after all.... */ /* We only map from NFSv4 to POSIX ACLs when setting ACLs, when we err on the * side of being more restrictive, so the mode bit mapping below is * pessimistic. An optimistic version would be needed to handle DENY's, * but we expect to coalesce all ALLOWs and DENYs before mapping to mode * bits. */ static void low_mode_from_nfs4(u32 perm, unsigned short *mode, unsigned int flags) { u32 write_mode = NFS4_WRITE_MODE; if (flags & NFS4_ACL_DIR) write_mode |= NFS4_ACE_DELETE_CHILD; *mode = 0; if ((perm & NFS4_READ_MODE) == NFS4_READ_MODE) *mode |= ACL_READ; if ((perm & write_mode) == write_mode) *mode |= ACL_WRITE; if ((perm & NFS4_EXECUTE_MODE) == NFS4_EXECUTE_MODE) *mode |= ACL_EXECUTE; } static short ace2type(struct nfs4_ace *); static void _posix_to_nfsv4_one(struct posix_acl *, struct nfs4_acl *, unsigned int); int nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl) { struct inode *inode = d_inode(dentry); int error = 0; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; int size = 0; pacl = get_acl(inode, ACL_TYPE_ACCESS); if (!pacl) pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); if (IS_ERR(pacl)) return PTR_ERR(pacl); /* allocate for worst case: one (deny, allow) pair each: */ size += 2 * pacl->a_count; if (S_ISDIR(inode->i_mode)) { flags = NFS4_ACL_DIR; dpacl = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(dpacl)) { error = PTR_ERR(dpacl); goto rel_pacl; } if (dpacl) size += 2 * dpacl->a_count; } *acl = kmalloc(nfs4_acl_bytes(size), GFP_KERNEL); if (*acl == NULL) { error = -ENOMEM; goto out; } (*acl)->naces = 0; _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT); if (dpacl) _posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT); out: posix_acl_release(dpacl); rel_pacl: posix_acl_release(pacl); return error; } struct posix_acl_summary { unsigned short owner; unsigned short users; unsigned short group; unsigned short groups; unsigned short other; unsigned short mask; }; static void summarize_posix_acl(struct posix_acl *acl, struct posix_acl_summary *pas) { struct posix_acl_entry *pa, *pe; /* * Only pas.users and pas.groups need initialization; previous * posix_acl_valid() calls ensure that the other fields will be * initialized in the following loop. But, just to placate gcc: */ memset(pas, 0, sizeof(*pas)); pas->mask = 07; pe = acl->a_entries + acl->a_count; FOREACH_ACL_ENTRY(pa, acl, pe) { switch (pa->e_tag) { case ACL_USER_OBJ: pas->owner = pa->e_perm; break; case ACL_GROUP_OBJ: pas->group = pa->e_perm; break; case ACL_USER: pas->users |= pa->e_perm; break; case ACL_GROUP: pas->groups |= pa->e_perm; break; case ACL_OTHER: pas->other = pa->e_perm; break; case ACL_MASK: pas->mask = pa->e_perm; break; } } /* We'll only care about effective permissions: */ pas->users &= pas->mask; pas->group &= pas->mask; pas->groups &= pas->mask; } /* We assume the acl has been verified with posix_acl_valid. */ static void _posix_to_nfsv4_one(struct posix_acl *pacl, struct nfs4_acl *acl, unsigned int flags) { struct posix_acl_entry *pa, *group_owner_entry; struct nfs4_ace *ace; struct posix_acl_summary pas; unsigned short deny; int eflag = ((flags & NFS4_ACL_TYPE_DEFAULT) ? NFS4_INHERITANCE_FLAGS | NFS4_ACE_INHERIT_ONLY_ACE : 0); BUG_ON(pacl->a_count < 3); summarize_posix_acl(pacl, &pas); pa = pacl->a_entries; ace = acl->aces + acl->naces; /* We could deny everything not granted by the owner: */ deny = ~pas.owner; /* * but it is equivalent (and simpler) to deny only what is not * granted by later entries: */ deny &= pas.users | pas.group | pas.groups | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags | NFS4_ACL_OWNER); ace->whotype = NFS4_ACL_WHO_OWNER; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_USER) { deny = ~(pa->e_perm & pas.mask); deny &= pas.groups | pas.group | pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_uid = pa->e_uid; ace++; acl->naces++; } ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_uid = pa->e_uid; ace++; acl->naces++; pa++; } /* In the case of groups, we apply allow ACEs first, then deny ACEs, * since a user can be in more than one group. */ /* allow ACEs */ group_owner_entry = pa; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pas.group, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; pa++; while (pa->e_tag == ACL_GROUP) { ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = mask_from_posix(pa->e_perm & pas.mask, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_gid = pa->e_gid; ace++; acl->naces++; pa++; } /* deny ACEs */ pa = group_owner_entry; deny = ~pas.group & pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_GROUP; ace++; acl->naces++; } pa++; while (pa->e_tag == ACL_GROUP) { deny = ~(pa->e_perm & pas.mask); deny &= pas.other; if (deny) { ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE; ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP; ace->access_mask = deny_mask_from_posix(deny, flags); ace->whotype = NFS4_ACL_WHO_NAMED; ace->who_gid = pa->e_gid; ace++; acl->naces++; } pa++; } if (pa->e_tag == ACL_MASK) pa++; ace->type = NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE; ace->flag = eflag; ace->access_mask = mask_from_posix(pa->e_perm, flags); ace->whotype = NFS4_ACL_WHO_EVERYONE; acl->naces++; } static bool pace_gt(struct posix_acl_entry *pace1, struct posix_acl_entry *pace2) { if (pace1->e_tag != pace2->e_tag) return pace1->e_tag > pace2->e_tag; if (pace1->e_tag == ACL_USER) return uid_gt(pace1->e_uid, pace2->e_uid); if (pace1->e_tag == ACL_GROUP) return gid_gt(pace1->e_gid, pace2->e_gid); return false; } static void sort_pacl_range(struct posix_acl *pacl, int start, int end) { int sorted = 0, i; /* We just do a bubble sort; easy to do in place, and we're not * expecting acl's to be long enough to justify anything more. */ while (!sorted) { sorted = 1; for (i = start; i < end; i++) { if (pace_gt(&pacl->a_entries[i], &pacl->a_entries[i+1])) { sorted = 0; swap(pacl->a_entries[i], pacl->a_entries[i + 1]); } } } } static void sort_pacl(struct posix_acl *pacl) { /* posix_acl_valid requires that users and groups be in order * by uid/gid. */ int i, j; /* no users or groups */ if (!pacl || pacl->a_count <= 4) return; i = 1; while (pacl->a_entries[i].e_tag == ACL_USER) i++; sort_pacl_range(pacl, 1, i-1); BUG_ON(pacl->a_entries[i].e_tag != ACL_GROUP_OBJ); j = ++i; while (pacl->a_entries[j].e_tag == ACL_GROUP) j++; sort_pacl_range(pacl, i, j-1); return; } /* * While processing the NFSv4 ACE, this maintains bitmasks representing * which permission bits have been allowed and which denied to a given * entity: */ struct posix_ace_state { u32 allow; u32 deny; }; struct posix_user_ace_state { union { kuid_t uid; kgid_t gid; }; struct posix_ace_state perms; }; struct posix_ace_state_array { int n; struct posix_user_ace_state aces[]; }; /* * While processing the NFSv4 ACE, this maintains the partial permissions * calculated so far: */ struct posix_acl_state { int empty; struct posix_ace_state owner; struct posix_ace_state group; struct posix_ace_state other; struct posix_ace_state everyone; struct posix_ace_state mask; /* Deny unused in this case */ struct posix_ace_state_array *users; struct posix_ace_state_array *groups; }; static int init_state(struct posix_acl_state *state, int cnt) { int alloc; memset(state, 0, sizeof(struct posix_acl_state)); state->empty = 1; /* * In the worst case, each individual acl could be for a distinct * named user or group, but we don't know which, so we allocate * enough space for either: */ alloc = sizeof(struct posix_ace_state_array) + cnt*sizeof(struct posix_user_ace_state); state->users = kzalloc(alloc, GFP_KERNEL); if (!state->users) return -ENOMEM; state->groups = kzalloc(alloc, GFP_KERNEL); if (!state->groups) { kfree(state->users); return -ENOMEM; } return 0; } static void free_state(struct posix_acl_state *state) { kfree(state->users); kfree(state->groups); } static inline void add_to_mask(struct posix_acl_state *state, struct posix_ace_state *astate) { state->mask.allow |= astate->allow; } static struct posix_acl * posix_state_to_acl(struct posix_acl_state *state, unsigned int flags) { struct posix_acl_entry *pace; struct posix_acl *pacl; int nace; int i; /* * ACLs with no ACEs are treated differently in the inheritable * and effective cases: when there are no inheritable ACEs, * calls ->set_acl with a NULL ACL structure. */ if (state->empty && (flags & NFS4_ACL_TYPE_DEFAULT)) return NULL; /* * When there are no effective ACEs, the following will end * up setting a 3-element effective posix ACL with all * permissions zero. */ if (!state->users->n && !state->groups->n) nace = 3; else /* Note we also include a MASK ACE in this case: */ nace = 4 + state->users->n + state->groups->n; pacl = posix_acl_alloc(nace, GFP_KERNEL); if (!pacl) return ERR_PTR(-ENOMEM); pace = pacl->a_entries; pace->e_tag = ACL_USER_OBJ; low_mode_from_nfs4(state->owner.allow, &pace->e_perm, flags); for (i=0; i < state->users->n; i++) { pace++; pace->e_tag = ACL_USER; low_mode_from_nfs4(state->users->aces[i].perms.allow, &pace->e_perm, flags); pace->e_uid = state->users->aces[i].uid; add_to_mask(state, &state->users->aces[i].perms); } pace++; pace->e_tag = ACL_GROUP_OBJ; low_mode_from_nfs4(state->group.allow, &pace->e_perm, flags); add_to_mask(state, &state->group); for (i=0; i < state->groups->n; i++) { pace++; pace->e_tag = ACL_GROUP; low_mode_from_nfs4(state->groups->aces[i].perms.allow, &pace->e_perm, flags); pace->e_gid = state->groups->aces[i].gid; add_to_mask(state, &state->groups->aces[i].perms); } if (state->users->n || state->groups->n) { pace++; pace->e_tag = ACL_MASK; low_mode_from_nfs4(state->mask.allow, &pace->e_perm, flags); } pace++; pace->e_tag = ACL_OTHER; low_mode_from_nfs4(state->other.allow, &pace->e_perm, flags); return pacl; } static inline void allow_bits(struct posix_ace_state *astate, u32 mask) { /* Allow all bits in the mask not already denied: */ astate->allow |= mask & ~astate->deny; } static inline void deny_bits(struct posix_ace_state *astate, u32 mask) { /* Deny all bits in the mask not already allowed: */ astate->deny |= mask & ~astate->allow; } static int find_uid(struct posix_acl_state *state, kuid_t uid) { struct posix_ace_state_array *a = state->users; int i; for (i = 0; i < a->n; i++) if (uid_eq(a->aces[i].uid, uid)) return i; /* Not found: */ a->n++; a->aces[i].uid = uid; a->aces[i].perms.allow = state->everyone.allow; a->aces[i].perms.deny = state->everyone.deny; return i; } static int find_gid(struct posix_acl_state *state, kgid_t gid) { struct posix_ace_state_array *a = state->groups; int i; for (i = 0; i < a->n; i++) if (gid_eq(a->aces[i].gid, gid)) return i; /* Not found: */ a->n++; a->aces[i].gid = gid; a->aces[i].perms.allow = state->everyone.allow; a->aces[i].perms.deny = state->everyone.deny; return i; } static void deny_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) deny_bits(&a->aces[i].perms, mask); } static void allow_bits_array(struct posix_ace_state_array *a, u32 mask) { int i; for (i=0; i < a->n; i++) allow_bits(&a->aces[i].perms, mask); } static void process_one_v4_ace(struct posix_acl_state *state, struct nfs4_ace *ace) { u32 mask = ace->access_mask; int i; state->empty = 0; switch (ace2type(ace)) { case ACL_USER_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); } else { deny_bits(&state->owner, mask); } break; case ACL_USER: i = find_uid(state, ace->who_uid); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->users->aces[i].perms, mask); } else { deny_bits(&state->users->aces[i].perms, mask); mask = state->users->aces[i].perms.deny; deny_bits(&state->owner, mask); } break; case ACL_GROUP_OBJ: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->group, mask); } else { deny_bits(&state->group, mask); mask = state->group.deny; deny_bits(&state->owner, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_GROUP: i = find_gid(state, ace->who_gid); if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->groups->aces[i].perms, mask); } else { deny_bits(&state->groups->aces[i].perms, mask); mask = state->groups->aces[i].perms.deny; deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } break; case ACL_OTHER: if (ace->type == NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE) { allow_bits(&state->owner, mask); allow_bits(&state->group, mask); allow_bits(&state->other, mask); allow_bits(&state->everyone, mask); allow_bits_array(state->users, mask); allow_bits_array(state->groups, mask); } else { deny_bits(&state->owner, mask); deny_bits(&state->group, mask); deny_bits(&state->other, mask); deny_bits(&state->everyone, mask); deny_bits_array(state->users, mask); deny_bits_array(state->groups, mask); } } } static int nfs4_acl_nfsv4_to_posix(struct nfs4_acl *acl, struct posix_acl **pacl, struct posix_acl **dpacl, unsigned int flags) { struct posix_acl_state effective_acl_state, default_acl_state; struct nfs4_ace *ace; int ret; ret = init_state(&effective_acl_state, acl->naces); if (ret) return ret; ret = init_state(&default_acl_state, acl->naces); if (ret) goto out_estate; ret = -EINVAL; for (ace = acl->aces; ace < acl->aces + acl->naces; ace++) { if (ace->type != NFS4_ACE_ACCESS_ALLOWED_ACE_TYPE && ace->type != NFS4_ACE_ACCESS_DENIED_ACE_TYPE) goto out_dstate; if (ace->flag & ~NFS4_SUPPORTED_FLAGS) goto out_dstate; if ((ace->flag & NFS4_INHERITANCE_FLAGS) == 0) { process_one_v4_ace(&effective_acl_state, ace); continue; } if (!(flags & NFS4_ACL_DIR)) goto out_dstate; /* * Note that when only one of FILE_INHERIT or DIRECTORY_INHERIT * is set, we're effectively turning on the other. That's OK, * according to rfc 3530. */ process_one_v4_ace(&default_acl_state, ace); if (!(ace->flag & NFS4_ACE_INHERIT_ONLY_ACE)) process_one_v4_ace(&effective_acl_state, ace); } *pacl = posix_state_to_acl(&effective_acl_state, flags); if (IS_ERR(*pacl)) { ret = PTR_ERR(*pacl); *pacl = NULL; goto out_dstate; } *dpacl = posix_state_to_acl(&default_acl_state, flags | NFS4_ACL_TYPE_DEFAULT); if (IS_ERR(*dpacl)) { ret = PTR_ERR(*dpacl); *dpacl = NULL; posix_acl_release(*pacl); *pacl = NULL; goto out_dstate; } sort_pacl(*pacl); sort_pacl(*dpacl); ret = 0; out_dstate: free_state(&default_acl_state); out_estate: free_state(&effective_acl_state); return ret; } __be32 nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfs4_acl *acl) { __be32 error; int host_error; struct dentry *dentry; struct inode *inode; struct posix_acl *pacl = NULL, *dpacl = NULL; unsigned int flags = 0; /* Get inode */ error = fh_verify(rqstp, fhp, 0, NFSD_MAY_SATTR); if (error) return error; dentry = fhp->fh_dentry; inode = d_inode(dentry); if (!inode->i_op->set_acl || !IS_POSIXACL(inode)) return nfserr_attrnotsupp; if (S_ISDIR(inode->i_mode)) flags = NFS4_ACL_DIR; host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags); if (host_error == -EINVAL) return nfserr_attrnotsupp; if (host_error < 0) goto out_nfserr; host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS); if (host_error < 0) goto out_release; if (S_ISDIR(inode->i_mode)) { host_error = inode->i_op->set_acl(inode, dpacl, ACL_TYPE_DEFAULT); } out_release: posix_acl_release(pacl); posix_acl_release(dpacl); out_nfserr: if (host_error == -EOPNOTSUPP) return nfserr_attrnotsupp; else return nfserrno(host_error); } static short ace2type(struct nfs4_ace *ace) { switch (ace->whotype) { case NFS4_ACL_WHO_NAMED: return (ace->flag & NFS4_ACE_IDENTIFIER_GROUP ? ACL_GROUP : ACL_USER); case NFS4_ACL_WHO_OWNER: return ACL_USER_OBJ; case NFS4_ACL_WHO_GROUP: return ACL_GROUP_OBJ; case NFS4_ACL_WHO_EVERYONE: return ACL_OTHER; } BUG(); return -1; } /* * return the size of the struct nfs4_acl required to represent an acl * with @entries entries. */ int nfs4_acl_bytes(int entries) { return sizeof(struct nfs4_acl) + entries * sizeof(struct nfs4_ace); } static struct { char *string; int stringlen; int type; } s2t_map[] = { { .string = "OWNER@", .stringlen = sizeof("OWNER@") - 1, .type = NFS4_ACL_WHO_OWNER, }, { .string = "GROUP@", .stringlen = sizeof("GROUP@") - 1, .type = NFS4_ACL_WHO_GROUP, }, { .string = "EVERYONE@", .stringlen = sizeof("EVERYONE@") - 1, .type = NFS4_ACL_WHO_EVERYONE, }, }; int nfs4_acl_get_whotype(char *p, u32 len) { int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].stringlen == len && 0 == memcmp(s2t_map[i].string, p, len)) return s2t_map[i].type; } return NFS4_ACL_WHO_NAMED; } __be32 nfs4_acl_write_who(struct xdr_stream *xdr, int who) { __be32 *p; int i; for (i = 0; i < ARRAY_SIZE(s2t_map); i++) { if (s2t_map[i].type != who) continue; p = xdr_reserve_space(xdr, s2t_map[i].stringlen + 4); if (!p) return nfserr_resource; p = xdr_encode_opaque(p, s2t_map[i].string, s2t_map[i].stringlen); return 0; } WARN_ON_ONCE(1); return nfserr_serverfault; }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4896_2
crossvul-cpp_data_bad_5075_1
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* _ _ * _ __ ___ ___ __| | ___ ___| | mod_ssl * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL * | | | | | | (_) | (_| | \__ \__ \ | * |_| |_| |_|\___/ \__,_|___|___/___/_| * |_____| * ssl_engine_kernel.c * The SSL engine kernel */ /* ``It took me fifteen years to discover I had no talent for programming, but I couldn't give it up because by that time I was too famous.'' -- Unknown */ #include "ssl_private.h" #include "mod_ssl.h" #include "util_md5.h" #include "scoreboard.h" static void ssl_configure_env(request_rec *r, SSLConnRec *sslconn); #ifdef HAVE_TLSEXT static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s); #endif #define SWITCH_STATUS_LINE "HTTP/1.1 101 Switching Protocols" #define UPGRADE_HEADER "Upgrade: TLS/1.0, HTTP/1.1" #define CONNECTION_HEADER "Connection: Upgrade" /* Perform an upgrade-to-TLS for the given request, per RFC 2817. */ static apr_status_t upgrade_connection(request_rec *r) { struct conn_rec *conn = r->connection; apr_bucket_brigade *bb; SSLConnRec *sslconn; apr_status_t rv; SSL *ssl; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02028) "upgrading connection to TLS"); bb = apr_brigade_create(r->pool, conn->bucket_alloc); rv = ap_fputs(conn->output_filters, bb, SWITCH_STATUS_LINE CRLF UPGRADE_HEADER CRLF CONNECTION_HEADER CRLF CRLF); if (rv == APR_SUCCESS) { APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(conn->bucket_alloc)); rv = ap_pass_brigade(conn->output_filters, bb); } if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02029) "failed to send 101 interim response for connection " "upgrade"); return rv; } ssl_init_ssl_connection(conn, r); sslconn = myConnConfig(conn); ssl = sslconn->ssl; /* Perform initial SSL handshake. */ SSL_set_accept_state(ssl); SSL_do_handshake(ssl); if (!SSL_is_init_finished(ssl)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02030) "TLS upgrade handshake failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); return APR_ECONNABORTED; } return APR_SUCCESS; } /* Perform a speculative (and non-blocking) read from the connection * filters for the given request, to determine whether there is any * pending data to read. Return non-zero if there is, else zero. */ static int has_buffered_data(request_rec *r) { apr_bucket_brigade *bb; apr_off_t len; apr_status_t rv; int result; bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); rv = ap_get_brigade(r->connection->input_filters, bb, AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1); result = rv == APR_SUCCESS && apr_brigade_length(bb, 1, &len) == APR_SUCCESS && len > 0; apr_brigade_destroy(bb); return result; } static int ap_array_same_str_set(apr_array_header_t *s1, apr_array_header_t *s2) { int i; const char *c; if (s1 == s2) { return 1; } else if (!s1 || !s2 || (s1->nelts != s2->nelts)) { return 0; } for (i = 0; i < s1->nelts; i++) { c = APR_ARRAY_IDX(s1, i, const char *); if (!c || !ap_array_str_contains(s2, c)) { return 0; } } return 1; } static int ssl_pk_server_compatible(modssl_pk_server_t *pks1, modssl_pk_server_t *pks2) { if (!pks1 || !pks2) { return 0; } /* both have the same certificates? */ if ((pks1->ca_name_path != pks2->ca_name_path) && (!pks1->ca_name_path || !pks2->ca_name_path || strcmp(pks1->ca_name_path, pks2->ca_name_path))) { return 0; } if ((pks1->ca_name_file != pks2->ca_name_file) && (!pks1->ca_name_file || !pks2->ca_name_file || strcmp(pks1->ca_name_file, pks2->ca_name_file))) { return 0; } if (!ap_array_same_str_set(pks1->cert_files, pks2->cert_files) || !ap_array_same_str_set(pks1->key_files, pks2->key_files)) { return 0; } return 1; } static int ssl_auth_compatible(modssl_auth_ctx_t *a1, modssl_auth_ctx_t *a2) { if (!a1 || !a2) { return 0; } /* both have the same verification */ if ((a1->verify_depth != a2->verify_depth) || (a1->verify_mode != a2->verify_mode)) { return 0; } /* both have the same ca path/file */ if ((a1->ca_cert_path != a2->ca_cert_path) && (!a1->ca_cert_path || !a2->ca_cert_path || strcmp(a1->ca_cert_path, a2->ca_cert_path))) { return 0; } if ((a1->ca_cert_file != a2->ca_cert_file) && (!a1->ca_cert_file || !a2->ca_cert_file || strcmp(a1->ca_cert_file, a2->ca_cert_file))) { return 0; } /* both have the same ca cipher suite string */ if ((a1->cipher_suite != a2->cipher_suite) && (!a1->cipher_suite || !a2->cipher_suite || strcmp(a1->cipher_suite, a2->cipher_suite))) { return 0; } return 1; } static int ssl_ctx_compatible(modssl_ctx_t *ctx1, modssl_ctx_t *ctx2) { if (!ctx1 || !ctx2 || (ctx1->protocol != ctx2->protocol) || !ssl_auth_compatible(&ctx1->auth, &ctx2->auth) || !ssl_pk_server_compatible(ctx1->pks, ctx2->pks)) { return 0; } return 1; } static int ssl_server_compatible(server_rec *s1, server_rec *s2) { SSLSrvConfigRec *sc1 = s1? mySrvConfig(s1) : NULL; SSLSrvConfigRec *sc2 = s2? mySrvConfig(s2) : NULL; /* both use the same TLS protocol? */ if (!sc1 || !sc2 || !ssl_ctx_compatible(sc1->server, sc2->server)) { return 0; } return 1; } /* * Post Read Request Handler */ int ssl_hook_ReadReq(request_rec *r) { SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLConnRec *sslconn; const char *upgrade; #ifdef HAVE_TLSEXT const char *servername; #endif SSL *ssl; /* Perform TLS upgrade here if "SSLEngine optional" is configured, * SSL is not already set up for this connection, and the client * has sent a suitable Upgrade header. */ if (sc->enabled == SSL_ENABLED_OPTIONAL && !myConnConfig(r->connection) && (upgrade = apr_table_get(r->headers_in, "Upgrade")) != NULL && ap_find_token(r->pool, upgrade, "TLS/1.0")) { if (upgrade_connection(r)) { return AP_FILTER_ERROR; } } /* If we are on a slave connection, we do not expect to have an SSLConnRec, * but our master connection might. */ sslconn = myConnConfig(r->connection); if (!(sslconn && sslconn->ssl) && r->connection->master) { sslconn = myConnConfig(r->connection->master); } /* If "SSLEngine optional" is configured, this is not an SSL * connection, and this isn't a subrequest, send an Upgrade * response header. Note this must happen before map_to_storage * and OPTIONS * request processing is completed. */ if (sc->enabled == SSL_ENABLED_OPTIONAL && !(sslconn && sslconn->ssl) && !r->main) { apr_table_setn(r->headers_out, "Upgrade", "TLS/1.0, HTTP/1.1"); apr_table_mergen(r->headers_out, "Connection", "upgrade"); } if (!sslconn) { return DECLINED; } if (sslconn->non_ssl_request == NON_SSL_SET_ERROR_MSG) { apr_table_setn(r->notes, "error-notes", "Reason: You're speaking plain HTTP to an SSL-enabled " "server port.<br />\n Instead use the HTTPS scheme to " "access this URL, please.<br />\n"); /* Now that we have caught this error, forget it. we are done * with using SSL on this request. */ sslconn->non_ssl_request = NON_SSL_OK; return HTTP_BAD_REQUEST; } /* * Get the SSL connection structure and perform the * delayed interlinking from SSL back to request_rec */ ssl = sslconn->ssl; if (!ssl) { return DECLINED; } #ifdef HAVE_TLSEXT /* * Perform SNI checks only on the initial request. In particular, * if these checks detect a problem, the checks shouldn't return an * error again when processing an ErrorDocument redirect for the * original problem. */ if (r->proxyreq != PROXYREQ_PROXY && ap_is_initial_req(r)) { server_rec *handshakeserver = sslconn->server; SSLSrvConfigRec *hssc = mySrvConfig(handshakeserver); if ((servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name))) { /* * The SNI extension supplied a hostname. So don't accept requests * with either no hostname or a hostname that selected a different * virtual host than the one used for the handshake, causing * different SSL parameters to be applied, such as SSLProtocol, * SSLCACertificateFile/Path and SSLCADNRequestFile/Path which * cannot be renegotiated (SSLCA* due to current limitations in * OpenSSL, see: * http://mail-archives.apache.org/mod_mbox/httpd-dev/200806.mbox/%3C48592955.2090303@velox.ch%3E * and * http://mail-archives.apache.org/mod_mbox/httpd-dev/201312.mbox/%3CCAKQ1sVNpOrdiBm-UPw1hEdSN7YQXRRjeaT-MCWbW_7mN%3DuFiOw%40mail.gmail.com%3E * ) */ if (!r->hostname) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02031) "Hostname %s provided via SNI, but no hostname" " provided in HTTP request", servername); return HTTP_BAD_REQUEST; } if (r->server != handshakeserver && !ssl_server_compatible(sslconn->server, r->server)) { /* * The request does not select the virtual host that was * selected by the SNI and its SSL parameters are different */ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02032) "Hostname %s provided via SNI and hostname %s provided" " via HTTP have no compatible SSL setup", servername, r->hostname); return HTTP_MISDIRECTED_REQUEST; } } else if (((sc->strict_sni_vhost_check == SSL_ENABLED_TRUE) || hssc->strict_sni_vhost_check == SSL_ENABLED_TRUE) && r->connection->vhost_lookup_data) { /* * We are using a name based configuration here, but no hostname was * provided via SNI. Don't allow that if are requested to do strict * checking. Check whether this strict checking was set up either in the * server config we used for handshaking or in our current server. * This should avoid insecure configuration by accident. */ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02033) "No hostname was provided via SNI for a name based" " virtual host"); apr_table_setn(r->notes, "error-notes", "Reason: The client software did not provide a " "hostname using Server Name Indication (SNI), " "which is required to access this server.<br />\n"); return HTTP_FORBIDDEN; } } #endif modssl_set_app_data2(ssl, r); /* * Log information about incoming HTTPS requests */ if (APLOGrinfo(r) && ap_is_initial_req(r)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02034) "%s HTTPS request received for child %ld (server %s)", (r->connection->keepalives <= 0 ? "Initial (No.1)" : apr_psprintf(r->pool, "Subsequent (No.%d)", r->connection->keepalives+1)), r->connection->id, ssl_util_vhostid(r->pool, r->server)); } /* SetEnvIf ssl-*-shutdown flags can only be per-server, * so they won't change across keepalive requests */ if (sslconn->shutdown_type == SSL_SHUTDOWN_TYPE_UNSET) { ssl_configure_env(r, sslconn); } return DECLINED; } /* * Move SetEnvIf information from request_rec to conn_rec/BUFF * to allow the close connection handler to use them. */ static void ssl_configure_env(request_rec *r, SSLConnRec *sslconn) { int i; const apr_array_header_t *arr = apr_table_elts(r->subprocess_env); const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts; sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_STANDARD; for (i = 0; i < arr->nelts; i++) { const char *key = elts[i].key; switch (*key) { case 's': /* being case-sensitive here. * and not checking for the -shutdown since these are the only * SetEnvIf "flags" we support */ if (!strncmp(key+1, "sl-", 3)) { key += 4; if (!strncmp(key, "unclean", 7)) { sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_UNCLEAN; } else if (!strncmp(key, "accurate", 8)) { sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_ACCURATE; } return; /* should only ever be one ssl-*-shutdown */ } break; } } } /* * Access Handler */ int ssl_hook_Access(request_rec *r) { SSLDirConfigRec *dc = myDirConfig(r); SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLConnRec *sslconn = myConnConfig(r->connection); SSL *ssl = sslconn ? sslconn->ssl : NULL; server_rec *handshakeserver = sslconn ? sslconn->server : NULL; SSLSrvConfigRec *hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL; SSL_CTX *ctx = NULL; apr_array_header_t *requires; ssl_require_t *ssl_requires; int ok, i; BOOL renegotiate = FALSE, renegotiate_quick = FALSE; X509 *cert; X509 *peercert; X509_STORE *cert_store = NULL; X509_STORE_CTX *cert_store_ctx; STACK_OF(SSL_CIPHER) *cipher_list_old = NULL, *cipher_list = NULL; const SSL_CIPHER *cipher = NULL; int depth, verify_old, verify, n, is_slave = 0; const char *ncipher_suite; /* On a slave connection, we do not expect to have an SSLConnRec, but * our master connection might have one. */ if (!(sslconn && ssl) && r->connection->master) { sslconn = myConnConfig(r->connection->master); ssl = sslconn ? sslconn->ssl : NULL; handshakeserver = sslconn ? sslconn->server : NULL; hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL; is_slave = 1; } if (ssl) { /* * We should have handshaken here (on handshakeserver), * otherwise we are being redirected (ErrorDocument) from * a renegotiation failure below. The access is still * forbidden in the latter case, let ap_die() handle * this recursive (same) error. */ if (!SSL_is_init_finished(ssl)) { return HTTP_FORBIDDEN; } ctx = SSL_get_SSL_CTX(ssl); } /* * Support for SSLRequireSSL directive */ if (dc->bSSLRequired && !ssl) { if ((sc->enabled == SSL_ENABLED_OPTIONAL) && !is_slave) { /* This vhost was configured for optional SSL, just tell the * client that we need to upgrade. */ apr_table_setn(r->err_headers_out, "Upgrade", "TLS/1.0, HTTP/1.1"); apr_table_setn(r->err_headers_out, "Connection", "Upgrade"); return HTTP_UPGRADE_REQUIRED; } ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02219) "access to %s failed, reason: %s", r->filename, "SSL connection required"); /* remember forbidden access for strict require option */ apr_table_setn(r->notes, "ssl-access-forbidden", "1"); return HTTP_FORBIDDEN; } /* * Check to see whether SSL is in use; if it's not, then no * further access control checks are relevant. (the test for * sc->enabled is probably strictly unnecessary) */ if (sc->enabled == SSL_ENABLED_FALSE || !ssl) { return DECLINED; } #ifdef HAVE_SRP /* * Support for per-directory reconfigured SSL connection parameters * * We do not force any renegotiation if the user is already authenticated * via SRP. * */ if (SSL_get_srp_username(ssl)) { return DECLINED; } #endif /* * Support for per-directory reconfigured SSL connection parameters. * * This is implemented by forcing an SSL renegotiation with the * reconfigured parameter suite. But Apache's internal API processing * makes our life very hard here, because when internal sub-requests occur * we nevertheless should avoid multiple unnecessary SSL handshakes (they * require extra network I/O and especially time to perform). * * But the optimization for filtering out the unnecessary handshakes isn't * obvious and trivial. Especially because while Apache is in its * sub-request processing the client could force additional handshakes, * too. And these take place perhaps without our notice. So the only * possibility is to explicitly _ask_ OpenSSL whether the renegotiation * has to be performed or not. It has to performed when some parameters * which were previously known (by us) are not those we've now * reconfigured (as known by OpenSSL) or (in optimized way) at least when * the reconfigured parameter suite is stronger (more restrictions) than * the currently active one. */ /* * Override of SSLCipherSuite * * We provide two options here: * * o The paranoid and default approach where we force a renegotiation when * the cipher suite changed in _any_ way (which is straight-forward but * often forces renegotiations too often and is perhaps not what the * user actually wanted). * * o The optimized and still secure way where we force a renegotiation * only if the currently active cipher is no longer contained in the * reconfigured/new cipher suite. Any other changes are not important * because it's the servers choice to select a cipher from the ones the * client supports. So as long as the current cipher is still in the new * cipher suite we're happy. Because we can assume we would have * selected it again even when other (better) ciphers exists now in the * new cipher suite. This approach is fine because the user explicitly * has to enable this via ``SSLOptions +OptRenegotiate''. So we do no * implicit optimizations. */ ncipher_suite = (dc->szCipherSuite? dc->szCipherSuite : (r->server != handshakeserver)? sc->server->auth.cipher_suite : NULL); if (ncipher_suite && (!sslconn->cipher_suite || strcmp(ncipher_suite, sslconn->cipher_suite))) { /* remember old state */ if (dc->nOptions & SSL_OPT_OPTRENEGOTIATE) { cipher = SSL_get_current_cipher(ssl); } else { cipher_list_old = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl); if (cipher_list_old) { cipher_list_old = sk_SSL_CIPHER_dup(cipher_list_old); } } /* configure new state */ if (is_slave) { /* TODO: this categorically fails changed cipher suite settings * on slave connections. We could do better by * - create a new SSL* from our SSL_CTX and set cipher suite there, * and retrieve ciphers, free afterwards * Modifying the SSL on a slave connection is no good. */ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "cipher-suite"); return HTTP_FORBIDDEN; } if (!SSL_set_cipher_list(ssl, ncipher_suite)) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02253) "Unable to reconfigure (per-directory) " "permitted SSL ciphers"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); if (cipher_list_old) { sk_SSL_CIPHER_free(cipher_list_old); } return HTTP_FORBIDDEN; } /* determine whether a renegotiation has to be forced */ cipher_list = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl); if (dc->nOptions & SSL_OPT_OPTRENEGOTIATE) { /* optimized way */ if ((!cipher && cipher_list) || (cipher && !cipher_list)) { renegotiate = TRUE; } else if (cipher && cipher_list && (sk_SSL_CIPHER_find(cipher_list, cipher) < 0)) { renegotiate = TRUE; } } else { /* paranoid way */ if ((!cipher_list_old && cipher_list) || (cipher_list_old && !cipher_list)) { renegotiate = TRUE; } else if (cipher_list_old && cipher_list) { for (n = 0; !renegotiate && (n < sk_SSL_CIPHER_num(cipher_list)); n++) { const SSL_CIPHER *value = sk_SSL_CIPHER_value(cipher_list, n); if (sk_SSL_CIPHER_find(cipher_list_old, value) < 0) { renegotiate = TRUE; } } for (n = 0; !renegotiate && (n < sk_SSL_CIPHER_num(cipher_list_old)); n++) { const SSL_CIPHER *value = sk_SSL_CIPHER_value(cipher_list_old, n); if (sk_SSL_CIPHER_find(cipher_list, value) < 0) { renegotiate = TRUE; } } } } /* cleanup */ if (cipher_list_old) { sk_SSL_CIPHER_free(cipher_list_old); } if (renegotiate) { if (is_slave) { /* The request causes renegotiation on a slave connection. * This is not allowed since we might have concurrent requests * on this connection. */ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "cipher-suite"); return HTTP_FORBIDDEN; } #ifdef SSL_OP_CIPHER_SERVER_PREFERENCE if (sc->cipher_server_pref == TRUE) { SSL_set_options(ssl, SSL_OP_CIPHER_SERVER_PREFERENCE); } #endif /* tracing */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02220) "Reconfigured cipher suite will force renegotiation"); } } /* * override of SSLVerifyClient * * We force a renegotiation if the reconfigured/new verify type is * stronger than the currently active verify type. * * The order is: none << optional_no_ca << optional << require * * Additionally the following optimization is possible here: When the * currently active verify type is "none" but a client certificate is * already known/present, it's enough to manually force a client * verification but at least skip the I/O-intensive renegotiation * handshake. */ if ((dc->nVerifyClient != SSL_CVERIFY_UNSET) || (sc->server->auth.verify_mode != SSL_CVERIFY_UNSET)) { /* remember old state */ verify_old = SSL_get_verify_mode(ssl); /* configure new state */ verify = SSL_VERIFY_NONE; if ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) || (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE)) { verify |= SSL_VERIFY_PEER_STRICT; } if ((dc->nVerifyClient == SSL_CVERIFY_OPTIONAL) || (dc->nVerifyClient == SSL_CVERIFY_OPTIONAL_NO_CA) || (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL) || (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL_NO_CA)) { verify |= SSL_VERIFY_PEER; } /* TODO: this seems premature since we do not know if there * are any changes required. */ SSL_set_verify(ssl, verify, ssl_callback_SSLVerify); SSL_set_verify_result(ssl, X509_V_OK); /* determine whether we've to force a renegotiation */ if (!renegotiate && verify != verify_old) { if (((verify_old == SSL_VERIFY_NONE) && (verify != SSL_VERIFY_NONE)) || (!(verify_old & SSL_VERIFY_PEER) && (verify & SSL_VERIFY_PEER)) || (!(verify_old & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) && (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))) { renegotiate = TRUE; if (is_slave) { /* The request causes renegotiation on a slave connection. * This is not allowed since we might have concurrent requests * on this connection. */ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "verify-client"); return HTTP_FORBIDDEN; } /* optimization */ if ((dc->nOptions & SSL_OPT_OPTRENEGOTIATE) && (verify_old == SSL_VERIFY_NONE) && ((peercert = SSL_get_peer_certificate(ssl)) != NULL)) { renegotiate_quick = TRUE; X509_free(peercert); } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02255) "Changed client verification type will force " "%srenegotiation", renegotiate_quick ? "quick " : ""); } else if (verify != SSL_VERIFY_NONE) { /* * override of SSLVerifyDepth * * The depth checks are handled by us manually inside the * verify callback function and not by OpenSSL internally * (and our function is aware of both the per-server and * per-directory contexts). So we cannot ask OpenSSL about * the currently verify depth. Instead we remember it in our * SSLConnRec attached to the SSL* of OpenSSL. We've to force * the renegotiation if the reconfigured/new verify depth is * less than the currently active/remembered verify depth * (because this means more restriction on the certificate * chain). */ n = (sslconn->verify_depth != UNSET) ? sslconn->verify_depth : hssc->server->auth.verify_depth; /* determine the new depth */ sslconn->verify_depth = (dc->nVerifyDepth != UNSET) ? dc->nVerifyDepth : sc->server->auth.verify_depth; if (sslconn->verify_depth < n) { renegotiate = TRUE; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02254) "Reduced client verification depth will " "force renegotiation"); } } } /* If we're handling a request for a vhost other than the default one, * then we need to make sure that client authentication is properly * enforced. For clients supplying an SNI extension, the peer * certificate verification has happened in the handshake already * (and r->server == handshakeserver). For non-SNI requests, * an additional check is needed here. If client authentication * is configured as mandatory, then we can only proceed if the * CA list doesn't have to be changed (OpenSSL doesn't provide * an option to change the list for an existing session). */ if ((r->server != handshakeserver) && renegotiate && ((verify & SSL_VERIFY_PEER) || (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))) { #define MODSSL_CFG_CA_NE(f, sc1, sc2) \ (sc1->server->auth.f && \ (!sc2->server->auth.f || \ strNE(sc1->server->auth.f, sc2->server->auth.f))) if (MODSSL_CFG_CA_NE(ca_cert_file, sc, hssc) || MODSSL_CFG_CA_NE(ca_cert_path, sc, hssc)) { if (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02256) "Non-default virtual host with SSLVerify set to " "'require' and VirtualHost-specific CA certificate " "list is only available to clients with TLS server " "name indication (SNI) support"); SSL_set_verify(ssl, verify_old, NULL); return HTTP_FORBIDDEN; } else /* let it pass, possibly with an "incorrect" peer cert, * so make sure the SSL_CLIENT_VERIFY environment variable * will indicate partial success only, later on. */ sslconn->verify_info = "GENEROUS"; } } } /* If a renegotiation is now required for this location, and the * request includes a message body (and the client has not * requested a "100 Continue" response), then the client will be * streaming the request body over the wire already. In that * case, it is not possible to stop and perform a new SSL * handshake immediately; once the SSL library moves to the * "accept" state, it will reject the SSL packets which the client * is sending for the request body. * * To allow authentication to complete in this auth hook, the * solution used here is to fill a (bounded) buffer with the * request body, and then to reinject that request body later. */ if (renegotiate && !renegotiate_quick && (apr_table_get(r->headers_in, "transfer-encoding") || (apr_table_get(r->headers_in, "content-length") && strcmp(apr_table_get(r->headers_in, "content-length"), "0"))) && !r->expecting_100) { int rv; apr_size_t rsize; rsize = dc->nRenegBufferSize == UNSET ? DEFAULT_RENEG_BUFFER_SIZE : dc->nRenegBufferSize; if (rsize > 0) { /* Fill the I/O buffer with the request body if possible. */ rv = ssl_io_buffer_fill(r, rsize); } else { /* If the reneg buffer size is set to zero, just fail. */ rv = HTTP_REQUEST_ENTITY_TOO_LARGE; } if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02257) "could not buffer message body to allow " "SSL renegotiation to proceed"); return rv; } } /* * now do the renegotiation if anything was actually reconfigured */ if (renegotiate) { /* * Now we force the SSL renegotiation by sending the Hello Request * message to the client. Here we have to do a workaround: Actually * OpenSSL returns immediately after sending the Hello Request (the * intent AFAIK is because the SSL/TLS protocol says it's not a must * that the client replies to a Hello Request). But because we insist * on a reply (anything else is an error for us) we have to go to the * ACCEPT state manually. Using SSL_set_accept_state() doesn't work * here because it resets too much of the connection. So we set the * state explicitly and continue the handshake manually. */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02221) "Requesting connection re-negotiation"); if (renegotiate_quick) { STACK_OF(X509) *cert_stack; /* perform just a manual re-verification of the peer */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02258) "Performing quick renegotiation: " "just re-verifying the peer"); cert_stack = (STACK_OF(X509) *)SSL_get_peer_cert_chain(ssl); cert = SSL_get_peer_certificate(ssl); if (!cert_stack && cert) { /* client cert is in the session cache, but there is * no chain, since ssl3_get_client_certificate() * sk_X509_shift-ed the peer cert out of the chain. * we put it back here for the purpose of quick_renegotiation. */ cert_stack = sk_X509_new_null(); sk_X509_push(cert_stack, cert); } if (!cert_stack || (sk_X509_num(cert_stack) == 0)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02222) "Cannot find peer certificate chain"); return HTTP_FORBIDDEN; } if (!(cert_store || (cert_store = SSL_CTX_get_cert_store(ctx)))) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02223) "Cannot find certificate storage"); return HTTP_FORBIDDEN; } if (!cert) { cert = sk_X509_value(cert_stack, 0); } cert_store_ctx = X509_STORE_CTX_new(); X509_STORE_CTX_init(cert_store_ctx, cert_store, cert, cert_stack); depth = SSL_get_verify_depth(ssl); if (depth >= 0) { X509_STORE_CTX_set_depth(cert_store_ctx, depth); } X509_STORE_CTX_set_ex_data(cert_store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx(), (char *)ssl); if (!X509_verify_cert(cert_store_ctx)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02224) "Re-negotiation verification step failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); } SSL_set_verify_result(ssl, X509_STORE_CTX_get_error(cert_store_ctx)); X509_STORE_CTX_cleanup(cert_store_ctx); X509_STORE_CTX_free(cert_store_ctx); if (cert_stack != SSL_get_peer_cert_chain(ssl)) { /* we created this ourselves, so free it */ sk_X509_pop_free(cert_stack, X509_free); } } else { char peekbuf[1]; const char *reneg_support; request_rec *id = r->main ? r->main : r; /* Additional mitigation for CVE-2009-3555: At this point, * before renegotiating, an (entire) request has been read * from the connection. An attacker may have sent further * data to "prefix" any subsequent request by the victim's * client after the renegotiation; this data may already * have been read and buffered. Forcing a connection * closure after the response ensures such data will be * discarded. Legimately pipelined HTTP requests will be * retried anyway with this approach. */ if (has_buffered_data(r)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02259) "insecure SSL re-negotiation required, but " "a pipelined request is present; keepalive " "disabled"); r->connection->keepalive = AP_CONN_CLOSE; } #if defined(SSL_get_secure_renegotiation_support) reneg_support = SSL_get_secure_renegotiation_support(ssl) ? "client does" : "client does not"; #else reneg_support = "server does not"; #endif /* Perform a full renegotiation. */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02260) "Performing full renegotiation: complete handshake " "protocol (%s support secure renegotiation)", reneg_support); SSL_set_session_id_context(ssl, (unsigned char *)&id, sizeof(id)); /* Toggle the renegotiation state to allow the new * handshake to proceed. */ sslconn->reneg_state = RENEG_ALLOW; SSL_renegotiate(ssl); SSL_do_handshake(ssl); if (!SSL_is_init_finished(ssl)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02225) "Re-negotiation request failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); r->connection->keepalive = AP_CONN_CLOSE; return HTTP_FORBIDDEN; } ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02226) "Awaiting re-negotiation handshake"); /* XXX: Should replace setting state with SSL_renegotiate(ssl); * However, this causes failures in perl-framework currently, * perhaps pre-test if we have already negotiated? */ /* Need to trigger renegotiation handshake by reading. * Peeking 0 bytes actually works. * See: http://marc.info/?t=145493359200002&r=1&w=2 */ SSL_peek(ssl, peekbuf, 0); sslconn->reneg_state = RENEG_REJECT; if (!SSL_is_init_finished(ssl)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02261) "Re-negotiation handshake failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); r->connection->keepalive = AP_CONN_CLOSE; return HTTP_FORBIDDEN; } /* Full renegotiation successfull, we now have handshaken with * this server's parameters. */ sslconn->server = r->server; } /* * Remember the peer certificate's DN */ if ((cert = SSL_get_peer_certificate(ssl))) { if (sslconn->client_cert) { X509_free(sslconn->client_cert); } sslconn->client_cert = cert; sslconn->client_dn = NULL; } /* * Finally check for acceptable renegotiation results */ if ((dc->nVerifyClient != SSL_CVERIFY_NONE) || (sc->server->auth.verify_mode != SSL_CVERIFY_NONE)) { BOOL do_verify = ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) || (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE)); if (do_verify && (SSL_get_verify_result(ssl) != X509_V_OK)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02262) "Re-negotiation handshake failed: " "Client verification failed"); return HTTP_FORBIDDEN; } if (do_verify) { if ((peercert = SSL_get_peer_certificate(ssl)) == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02263) "Re-negotiation handshake failed: " "Client certificate missing"); return HTTP_FORBIDDEN; } X509_free(peercert); } } /* * Also check that SSLCipherSuite has been enforced as expected. */ if (cipher_list) { cipher = SSL_get_current_cipher(ssl); if (sk_SSL_CIPHER_find(cipher_list, cipher) < 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02264) "SSL cipher suite not renegotiated: " "access to %s denied using cipher %s", r->filename, SSL_CIPHER_get_name(cipher)); return HTTP_FORBIDDEN; } } /* remember any new cipher suite used in renegotiation */ if (ncipher_suite) { sslconn->cipher_suite = ncipher_suite; } } /* If we're trying to have the user name set from a client * certificate then we need to set it here. This should be safe as * the user name probably isn't important from an auth checking point * of view as the certificate supplied acts in that capacity. * However, if FakeAuth is being used then this isn't the case so * we need to postpone setting the username until later. */ if ((dc->nOptions & SSL_OPT_FAKEBASICAUTH) == 0 && dc->szUserName) { char *val = ssl_var_lookup(r->pool, r->server, r->connection, r, (char *)dc->szUserName); if (val && val[0]) r->user = val; else ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02227) "Failed to set r->user to '%s'", dc->szUserName); } /* * Check SSLRequire boolean expressions */ requires = dc->aRequirement; ssl_requires = (ssl_require_t *)requires->elts; for (i = 0; i < requires->nelts; i++) { ssl_require_t *req = &ssl_requires[i]; const char *errstring; ok = ap_expr_exec(r, req->mpExpr, &errstring); if (ok < 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02265) "access to %s failed, reason: Failed to execute " "SSL requirement expression: %s", r->filename, errstring); /* remember forbidden access for strict require option */ apr_table_setn(r->notes, "ssl-access-forbidden", "1"); return HTTP_FORBIDDEN; } if (ok != 1) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02266) "Access to %s denied for %s " "(requirement expression not fulfilled)", r->filename, r->useragent_ip); ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02228) "Failed expression: %s", req->cpExpr); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02229) "access to %s failed, reason: %s", r->filename, "SSL requirement expression not fulfilled"); /* remember forbidden access for strict require option */ apr_table_setn(r->notes, "ssl-access-forbidden", "1"); return HTTP_FORBIDDEN; } } /* * Else access is granted from our point of view (except vendor * handlers override). But we have to return DECLINED here instead * of OK, because mod_auth and other modules still might want to * deny access. */ return DECLINED; } /* * Authentication Handler: * Fake a Basic authentication from the X509 client certificate. * * This must be run fairly early on to prevent a real authentication from * occuring, in particular it must be run before anything else that * authenticates a user. This means that the Module statement for this * module should be LAST in the Configuration file. */ int ssl_hook_UserCheck(request_rec *r) { SSLConnRec *sslconn = myConnConfig(r->connection); SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLDirConfigRec *dc = myDirConfig(r); char *user; const char *auth_line, *username, *password; /* * Additionally forbid access (again) * when strict require option is used. */ if ((dc->nOptions & SSL_OPT_STRICTREQUIRE) && (apr_table_get(r->notes, "ssl-access-forbidden"))) { return HTTP_FORBIDDEN; } /* * We decline when we are in a subrequest. The Authorization header * would already be present if it was added in the main request. */ if (!ap_is_initial_req(r)) { return DECLINED; } /* * Make sure the user is not able to fake the client certificate * based authentication by just entering an X.509 Subject DN * ("/XX=YYY/XX=YYY/..") as the username and "password" as the * password. */ if ((auth_line = apr_table_get(r->headers_in, "Authorization"))) { if (strcEQ(ap_getword(r->pool, &auth_line, ' '), "Basic")) { while ((*auth_line == ' ') || (*auth_line == '\t')) { auth_line++; } auth_line = ap_pbase64decode(r->pool, auth_line); username = ap_getword_nulls(r->pool, &auth_line, ':'); password = auth_line; if ((username[0] == '/') && strEQ(password, "password")) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02035) "Encountered FakeBasicAuth spoof: %s", username); return HTTP_FORBIDDEN; } } } /* * We decline operation in various situations... * - SSLOptions +FakeBasicAuth not configured * - r->user already authenticated * - ssl not enabled * - client did not present a certificate */ if (!((sc->enabled == SSL_ENABLED_TRUE || sc->enabled == SSL_ENABLED_OPTIONAL) && sslconn && sslconn->ssl && sslconn->client_cert) || !(dc->nOptions & SSL_OPT_FAKEBASICAUTH) || r->user) { return DECLINED; } if (!sslconn->client_dn) { X509_NAME *name = X509_get_subject_name(sslconn->client_cert); char *cp = X509_NAME_oneline(name, NULL, 0); sslconn->client_dn = apr_pstrdup(r->connection->pool, cp); OPENSSL_free(cp); } /* use SSLUserName if defined, otherwise use the full client DN */ if (dc->szUserName) { user = ssl_var_lookup(r->pool, r->server, r->connection, r, (char *)dc->szUserName); if (!user || !user[0]) { ap_log_rerror( APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02434) "Failed to set FakeBasicAuth username to '%s', did not exist in certificate", dc->szUserName); return DECLINED; } } else { user = (char *)sslconn->client_dn; } /* * Fake a password - which one would be immaterial, as, it seems, an empty * password in the users file would match ALL incoming passwords, if only * we were using the standard crypt library routine. Unfortunately, OpenSSL * "fixes" a "bug" in crypt and thus prevents blank passwords from * working. (IMHO what they really fix is a bug in the users of the code * - failing to program correctly for shadow passwords). We need, * therefore, to provide a password. This password can be matched by * adding the string "xxj31ZMTZzkVA" as the password in the user file. * This is just the crypted variant of the word "password" ;-) */ auth_line = apr_pstrcat(r->pool, "Basic ", ap_pbase64encode(r->pool, apr_pstrcat(r->pool, user, ":password", NULL)), NULL); apr_table_setn(r->headers_in, "Authorization", auth_line); ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02036) "Faking HTTP Basic Auth header: \"Authorization: %s\"", auth_line); return DECLINED; } /* authorization phase */ int ssl_hook_Auth(request_rec *r) { SSLDirConfigRec *dc = myDirConfig(r); /* * Additionally forbid access (again) * when strict require option is used. */ if ((dc->nOptions & SSL_OPT_STRICTREQUIRE) && (apr_table_get(r->notes, "ssl-access-forbidden"))) { return HTTP_FORBIDDEN; } return DECLINED; } /* * Fixup Handler */ static const char *const ssl_hook_Fixup_vars[] = { "SSL_VERSION_INTERFACE", "SSL_VERSION_LIBRARY", "SSL_PROTOCOL", "SSL_SECURE_RENEG", "SSL_COMPRESS_METHOD", "SSL_CIPHER", "SSL_CIPHER_EXPORT", "SSL_CIPHER_USEKEYSIZE", "SSL_CIPHER_ALGKEYSIZE", "SSL_CLIENT_VERIFY", "SSL_CLIENT_M_VERSION", "SSL_CLIENT_M_SERIAL", "SSL_CLIENT_V_START", "SSL_CLIENT_V_END", "SSL_CLIENT_V_REMAIN", "SSL_CLIENT_S_DN", "SSL_CLIENT_I_DN", "SSL_CLIENT_A_KEY", "SSL_CLIENT_A_SIG", "SSL_CLIENT_CERT_RFC4523_CEA", "SSL_SERVER_M_VERSION", "SSL_SERVER_M_SERIAL", "SSL_SERVER_V_START", "SSL_SERVER_V_END", "SSL_SERVER_S_DN", "SSL_SERVER_I_DN", "SSL_SERVER_A_KEY", "SSL_SERVER_A_SIG", "SSL_SESSION_ID", "SSL_SESSION_RESUMED", #ifdef HAVE_SRP "SSL_SRP_USER", "SSL_SRP_USERINFO", #endif NULL }; int ssl_hook_Fixup(request_rec *r) { SSLConnRec *sslconn = myConnConfig(r->connection); SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLDirConfigRec *dc = myDirConfig(r); apr_table_t *env = r->subprocess_env; char *var, *val = ""; #ifdef HAVE_TLSEXT const char *servername; #endif STACK_OF(X509) *peer_certs; SSL *ssl; int i; if (!(sslconn && sslconn->ssl) && r->connection->master) { sslconn = myConnConfig(r->connection->master); } /* * Check to see if SSL is on */ if (!(((sc->enabled == SSL_ENABLED_TRUE) || (sc->enabled == SSL_ENABLED_OPTIONAL)) && sslconn && (ssl = sslconn->ssl))) { return DECLINED; } /* * Annotate the SSI/CGI environment with standard SSL information */ /* the always present HTTPS (=HTTP over SSL) flag! */ apr_table_setn(env, "HTTPS", "on"); #ifdef HAVE_TLSEXT /* add content of SNI TLS extension (if supplied with ClientHello) */ if ((servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name))) { apr_table_set(env, "SSL_TLS_SNI", servername); } #endif /* standard SSL environment variables */ if (dc->nOptions & SSL_OPT_STDENVVARS) { modssl_var_extract_dns(env, ssl, r->pool); modssl_var_extract_san_entries(env, ssl, r->pool); for (i = 0; ssl_hook_Fixup_vars[i]; i++) { var = (char *)ssl_hook_Fixup_vars[i]; val = ssl_var_lookup(r->pool, r->server, r->connection, r, var); if (!strIsEmpty(val)) { apr_table_setn(env, var, val); } } } /* * On-demand bloat up the SSI/CGI environment with certificate data */ if (dc->nOptions & SSL_OPT_EXPORTCERTDATA) { val = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_SERVER_CERT"); apr_table_setn(env, "SSL_SERVER_CERT", val); val = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_CLIENT_CERT"); apr_table_setn(env, "SSL_CLIENT_CERT", val); if ((peer_certs = (STACK_OF(X509) *)SSL_get_peer_cert_chain(ssl))) { for (i = 0; i < sk_X509_num(peer_certs); i++) { var = apr_psprintf(r->pool, "SSL_CLIENT_CERT_CHAIN_%d", i); val = ssl_var_lookup(r->pool, r->server, r->connection, r, var); if (val) { apr_table_setn(env, var, val); } } } } #ifdef SSL_get_secure_renegotiation_support apr_table_setn(r->notes, "ssl-secure-reneg", SSL_get_secure_renegotiation_support(ssl) ? "1" : "0"); #endif return DECLINED; } /* _________________________________________________________________ ** ** Authz providers for use with mod_authz_core ** _________________________________________________________________ */ static authz_status ssl_authz_require_ssl_check(request_rec *r, const char *require_line, const void *parsed) { SSLConnRec *sslconn = myConnConfig(r->connection); SSL *ssl = sslconn ? sslconn->ssl : NULL; if (ssl) return AUTHZ_GRANTED; else return AUTHZ_DENIED; } static const char *ssl_authz_require_ssl_parse(cmd_parms *cmd, const char *require_line, const void **parsed) { if (require_line && require_line[0]) return "'Require ssl' does not take arguments"; return NULL; } const authz_provider ssl_authz_provider_require_ssl = { &ssl_authz_require_ssl_check, &ssl_authz_require_ssl_parse, }; static authz_status ssl_authz_verify_client_check(request_rec *r, const char *require_line, const void *parsed) { SSLConnRec *sslconn = myConnConfig(r->connection); SSL *ssl = sslconn ? sslconn->ssl : NULL; if (!ssl) return AUTHZ_DENIED; if (sslconn->verify_error == NULL && sslconn->verify_info == NULL && SSL_get_verify_result(ssl) == X509_V_OK) { X509 *xs = SSL_get_peer_certificate(ssl); if (xs) { X509_free(xs); return AUTHZ_GRANTED; } else { X509_free(xs); } } return AUTHZ_DENIED; } static const char *ssl_authz_verify_client_parse(cmd_parms *cmd, const char *require_line, const void **parsed) { if (require_line && require_line[0]) return "'Require ssl-verify-client' does not take arguments"; return NULL; } const authz_provider ssl_authz_provider_verify_client = { &ssl_authz_verify_client_check, &ssl_authz_verify_client_parse, }; /* _________________________________________________________________ ** ** OpenSSL Callback Functions ** _________________________________________________________________ */ /* * Hand out standard DH parameters, based on the authentication strength */ DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); EVP_PKEY *pkey; int type; #ifdef SSL_CERT_SET_SERVER /* * When multiple certs/keys are configured for the SSL_CTX: make sure * that we get the private key which is indeed used for the current * SSL connection (available in OpenSSL 1.0.2 or later only) */ SSL_set_current_cert(ssl, SSL_CERT_SET_SERVER); #endif pkey = SSL_get_privatekey(ssl); #if OPENSSL_VERSION_NUMBER < 0x10100000L type = pkey ? EVP_PKEY_type(pkey->type) : EVP_PKEY_NONE; #else type = pkey ? EVP_PKEY_base_id(pkey) : EVP_PKEY_NONE; #endif /* * OpenSSL will call us with either keylen == 512 or keylen == 1024 * (see the definition of SSL_EXPORT_PKEYLENGTH in ssl_locl.h). * Adjust the DH parameter length according to the size of the * RSA/DSA private key used for the current connection, and always * use at least 1024-bit parameters. * Note: This may cause interoperability issues with implementations * which limit their DH support to 1024 bit - e.g. Java 7 and earlier. * In this case, SSLCertificateFile can be used to specify fixed * 1024-bit DH parameters (with the effect that OpenSSL skips this * callback). */ if ((type == EVP_PKEY_RSA) || (type == EVP_PKEY_DSA)) { keylen = EVP_PKEY_bits(pkey); } ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "handing out built-in DH parameters for %d-bit authenticated connection", keylen); return modssl_get_dh_params(keylen); } /* * This OpenSSL callback function is called when OpenSSL * does client authentication and verifies the certificate chain. */ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) { /* Get Apache context back through OpenSSL context */ SSL *ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); request_rec *r = (request_rec *)modssl_get_app_data2(ssl); server_rec *s = r ? r->server : mySrvFromConn(conn); SSLSrvConfigRec *sc = mySrvConfig(s); SSLConnRec *sslconn = myConnConfig(conn); SSLDirConfigRec *dc = r ? myDirConfig(r) : sslconn->dc; modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); int crl_check_mode = mctx->crl_check_mask & ~SSL_CRLCHECK_FLAGS; /* Get verify ingredients */ int errnum = X509_STORE_CTX_get_error(ctx); int errdepth = X509_STORE_CTX_get_error_depth(ctx); int depth, verify; /* * Log verification information */ ssl_log_cxerror(SSLLOG_MARK, APLOG_DEBUG, 0, conn, X509_STORE_CTX_get_current_cert(ctx), APLOGNO(02275) "Certificate Verification, depth %d, " "CRL checking mode: %s (%x)", errdepth, crl_check_mode == SSL_CRLCHECK_CHAIN ? "chain" : crl_check_mode == SSL_CRLCHECK_LEAF ? "leaf" : "none", mctx->crl_check_mask); /* * Check for optionally acceptable non-verifiable issuer situation */ if (dc && (dc->nVerifyClient != SSL_CVERIFY_UNSET)) { verify = dc->nVerifyClient; } else { verify = mctx->auth.verify_mode; } if (verify == SSL_CVERIFY_NONE) { /* * SSLProxyVerify is either not configured or set to "none". * (this callback doesn't happen in the server context if SSLVerify * is not configured or set to "none") */ return TRUE; } if (ssl_verify_error_is_optional(errnum) && (verify == SSL_CVERIFY_OPTIONAL_NO_CA)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, conn, APLOGNO(02037) "Certificate Verification: Verifiable Issuer is " "configured as optional, therefore we're accepting " "the certificate"); sslconn->verify_info = "GENEROUS"; ok = TRUE; } /* * Expired certificates vs. "expired" CRLs: by default, OpenSSL * turns X509_V_ERR_CRL_HAS_EXPIRED into a "certificate_expired(45)" * SSL alert, but that's not really the message we should convey to the * peer (at the very least, it's confusing, and in many cases, it's also * inaccurate, as the certificate itself may very well not have expired * yet). We set the X509_STORE_CTX error to something which OpenSSL's * s3_both.c:ssl_verify_alarm_type() maps to SSL_AD_CERTIFICATE_UNKNOWN, * i.e. the peer will receive a "certificate_unknown(46)" alert. * We do not touch errnum, though, so that later on we will still log * the "real" error, as returned by OpenSSL. */ if (!ok && errnum == X509_V_ERR_CRL_HAS_EXPIRED) { X509_STORE_CTX_set_error(ctx, -1); } if (!ok && errnum == X509_V_ERR_UNABLE_TO_GET_CRL && (mctx->crl_check_mask & SSL_CRLCHECK_NO_CRL_FOR_CERT_OK)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, conn, "Certificate Verification: Temporary error (%d): %s: " "optional therefore we're accepting the certificate", errnum, X509_verify_cert_error_string(errnum)); X509_STORE_CTX_set_error(ctx, X509_V_OK); errnum = X509_V_OK; ok = TRUE; } #ifndef OPENSSL_NO_OCSP /* * Perform OCSP-based revocation checks */ if (ok && sc->server->ocsp_enabled == TRUE) { /* If there was an optional verification error, it's not * possible to perform OCSP validation since the issuer may be * missing/untrusted. Fail in that case. */ if (ssl_verify_error_is_optional(errnum)) { X509_STORE_CTX_set_error(ctx, X509_V_ERR_APPLICATION_VERIFICATION); errnum = X509_V_ERR_APPLICATION_VERIFICATION; ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, conn, APLOGNO(02038) "cannot perform OCSP validation for cert " "if issuer has not been verified " "(optional_no_ca configured)"); ok = FALSE; } else { ok = modssl_verify_ocsp(ctx, sc, s, conn, conn->pool); if (!ok) { errnum = X509_STORE_CTX_get_error(ctx); } } } #endif /* * If we already know it's not ok, log the real reason */ if (!ok) { if (APLOGcinfo(conn)) { ssl_log_cxerror(SSLLOG_MARK, APLOG_INFO, 0, conn, X509_STORE_CTX_get_current_cert(ctx), APLOGNO(02276) "Certificate Verification: Error (%d): %s", errnum, X509_verify_cert_error_string(errnum)); } else { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, conn, APLOGNO(02039) "Certificate Verification: Error (%d): %s", errnum, X509_verify_cert_error_string(errnum)); } if (sslconn->client_cert) { X509_free(sslconn->client_cert); sslconn->client_cert = NULL; } sslconn->client_dn = NULL; sslconn->verify_error = X509_verify_cert_error_string(errnum); } /* * Finally check the depth of the certificate verification */ if (dc && (dc->nVerifyDepth != UNSET)) { depth = dc->nVerifyDepth; } else { depth = mctx->auth.verify_depth; } if (errdepth > depth) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, conn, APLOGNO(02040) "Certificate Verification: Certificate Chain too long " "(chain has %d certificates, but maximum allowed are " "only %d)", errdepth, depth); errnum = X509_V_ERR_CERT_CHAIN_TOO_LONG; sslconn->verify_error = X509_verify_cert_error_string(errnum); ok = FALSE; } /* * And finally signal OpenSSL the (perhaps changed) state */ return ok; } #define SSLPROXY_CERT_CB_LOG_FMT \ "Proxy client certificate callback: (%s) " static void modssl_proxy_info_log(conn_rec *c, X509_INFO *info, const char *msg) { ssl_log_cxerror(SSLLOG_MARK, APLOG_DEBUG, 0, c, info->x509, APLOGNO(02277) SSLPROXY_CERT_CB_LOG_FMT "%s, sending", (mySrvConfigFromConn(c))->vhost_id, msg); } /* * caller will decrement the cert and key reference * so we need to increment here to prevent them from * being freed. */ #if OPENSSL_VERSION_NUMBER < 0x10100000L #define modssl_set_cert_info(info, cert, pkey) \ *cert = info->x509; \ CRYPTO_add(&(*cert)->references, +1, CRYPTO_LOCK_X509); \ *pkey = info->x_pkey->dec_pkey; \ CRYPTO_add(&(*pkey)->references, +1, CRYPTO_LOCK_X509_PKEY) #else #define modssl_set_cert_info(info, cert, pkey) \ *cert = info->x509; \ X509_up_ref(*cert); \ *pkey = info->x_pkey->dec_pkey; \ EVP_PKEY_up_ref(*pkey); #endif int ssl_callback_proxy_cert(SSL *ssl, X509 **x509, EVP_PKEY **pkey) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(c); SSLSrvConfigRec *sc = mySrvConfig(s); SSLDirConfigRec *dc = myDirConfigFromConn(c); X509_NAME *ca_name, *issuer, *ca_issuer; X509_INFO *info; X509 *ca_cert; STACK_OF(X509_NAME) *ca_list; STACK_OF(X509_INFO) *certs; STACK_OF(X509) *ca_certs; STACK_OF(X509) **ca_cert_chains; int i, j, k; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02267) SSLPROXY_CERT_CB_LOG_FMT "entered", sc->vhost_id); certs = (dc && dc->proxy) ? dc->proxy->pkp->certs : NULL; if (!certs || (sk_X509_INFO_num(certs) <= 0)) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(02268) SSLPROXY_CERT_CB_LOG_FMT "downstream server wanted client certificate " "but none are configured", sc->vhost_id); return FALSE; } ca_list = SSL_get_client_CA_list(ssl); if (!ca_list || (sk_X509_NAME_num(ca_list) <= 0)) { /* * downstream server didn't send us a list of acceptable CA certs, * so we send the first client cert in the list. */ info = sk_X509_INFO_value(certs, 0); modssl_proxy_info_log(c, info, APLOGNO(02278) "no acceptable CA list"); modssl_set_cert_info(info, x509, pkey); return TRUE; } ca_cert_chains = dc->proxy->pkp->ca_certs; for (i = 0; i < sk_X509_NAME_num(ca_list); i++) { ca_name = sk_X509_NAME_value(ca_list, i); for (j = 0; j < sk_X509_INFO_num(certs); j++) { info = sk_X509_INFO_value(certs, j); issuer = X509_get_issuer_name(info->x509); /* Search certs (by issuer name) one by one*/ if (X509_NAME_cmp(issuer, ca_name) == 0) { modssl_proxy_info_log(c, info, APLOGNO(02279) "found acceptable cert"); modssl_set_cert_info(info, x509, pkey); return TRUE; } if (ca_cert_chains) { /* * Failed to find direct issuer - search intermediates * (by issuer name), if provided. */ ca_certs = ca_cert_chains[j]; for (k = 0; k < sk_X509_num(ca_certs); k++) { ca_cert = sk_X509_value(ca_certs, k); ca_issuer = X509_get_issuer_name(ca_cert); if(X509_NAME_cmp(ca_issuer, ca_name) == 0 ) { modssl_proxy_info_log(c, info, APLOGNO(02280) "found acceptable cert by intermediate CA"); modssl_set_cert_info(info, x509, pkey); return TRUE; } } /* end loop through chained certs */ } } /* end loop through available certs */ } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02269) SSLPROXY_CERT_CB_LOG_FMT "no client certificate found!?", sc->vhost_id); return FALSE; } static void ssl_session_log(server_rec *s, const char *request, IDCONST unsigned char *id, unsigned int idlen, const char *status, const char *result, long timeout) { char buf[MODSSL_SESSION_ID_STRING_LEN]; char timeout_str[56] = {'\0'}; if (!APLOGdebug(s)) { return; } if (timeout) { apr_snprintf(timeout_str, sizeof(timeout_str), "timeout=%lds ", timeout); } ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s, "Inter-Process Session Cache: " "request=%s status=%s id=%s %s(session %s)", request, status, modssl_SSL_SESSION_id2sz(id, idlen, buf, sizeof(buf)), timeout_str, result); } /* * This callback function is executed by OpenSSL whenever a new SSL_SESSION is * added to the internal OpenSSL session cache. We use this hook to spread the * SSL_SESSION also to the inter-process disk-cache to make share it with our * other Apache pre-forked server processes. */ int ssl_callback_NewSessionCacheEntry(SSL *ssl, SSL_SESSION *session) { /* Get Apache context back through OpenSSL context */ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(conn); SSLSrvConfigRec *sc = mySrvConfig(s); long timeout = sc->session_cache_timeout; BOOL rc; IDCONST unsigned char *id; unsigned int idlen; /* * Set the timeout also for the internal OpenSSL cache, because this way * our inter-process cache is consulted only when it's really necessary. */ SSL_set_timeout(session, timeout); /* * Store the SSL_SESSION in the inter-process cache with the * same expire time, so it expires automatically there, too. */ #ifdef OPENSSL_NO_SSL_INTERN id = (unsigned char *)SSL_SESSION_get_id(session, &idlen); #else id = session->session_id; idlen = session->session_id_length; #endif rc = ssl_scache_store(s, id, idlen, apr_time_from_sec(SSL_SESSION_get_time(session) + timeout), session, conn->pool); ssl_session_log(s, "SET", id, idlen, rc == TRUE ? "OK" : "BAD", "caching", timeout); /* * return 0 which means to OpenSSL that the session is still * valid and was not freed by us with SSL_SESSION_free(). */ return 0; } /* * This callback function is executed by OpenSSL whenever a * SSL_SESSION is looked up in the internal OpenSSL cache and it * was not found. We use this to lookup the SSL_SESSION in the * inter-process disk-cache where it was perhaps stored by one * of our other Apache pre-forked server processes. */ SSL_SESSION *ssl_callback_GetSessionCacheEntry(SSL *ssl, IDCONST unsigned char *id, int idlen, int *do_copy) { /* Get Apache context back through OpenSSL context */ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(conn); SSL_SESSION *session; /* * Try to retrieve the SSL_SESSION from the inter-process cache */ session = ssl_scache_retrieve(s, id, idlen, conn->pool); ssl_session_log(s, "GET", id, idlen, session ? "FOUND" : "MISSED", session ? "reuse" : "renewal", 0); /* * Return NULL or the retrieved SSL_SESSION. But indicate (by * setting do_copy to 0) that the reference count on the * SSL_SESSION should not be incremented by the SSL library, * because we will no longer hold a reference to it ourself. */ *do_copy = 0; return session; } /* * This callback function is executed by OpenSSL whenever a * SSL_SESSION is removed from the internal OpenSSL cache. * We use this to remove the SSL_SESSION in the inter-process * disk-cache, too. */ void ssl_callback_DelSessionCacheEntry(SSL_CTX *ctx, SSL_SESSION *session) { server_rec *s; SSLSrvConfigRec *sc; IDCONST unsigned char *id; unsigned int idlen; /* * Get Apache context back through OpenSSL context */ if (!(s = (server_rec *)SSL_CTX_get_app_data(ctx))) { return; /* on server shutdown Apache is already gone */ } sc = mySrvConfig(s); /* * Remove the SSL_SESSION from the inter-process cache */ #ifdef OPENSSL_NO_SSL_INTERN id = (unsigned char *)SSL_SESSION_get_id(session, &idlen); #else id = session->session_id; idlen = session->session_id_length; #endif /* TODO: Do we need a temp pool here, or are we always shutting down? */ ssl_scache_remove(s, id, idlen, sc->mc->pPool); ssl_session_log(s, "REM", id, idlen, "OK", "dead", 0); return; } /* Dump debugginfo trace to the log file. */ static void log_tracing_state(const SSL *ssl, conn_rec *c, server_rec *s, int where, int rc) { /* * create the various trace messages */ if (where & SSL_CB_HANDSHAKE_START) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Handshake: start", MODSSL_LIBRARY_NAME); } else if (where & SSL_CB_HANDSHAKE_DONE) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Handshake: done", MODSSL_LIBRARY_NAME); } else if (where & SSL_CB_LOOP) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Loop: %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (where & SSL_CB_READ) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Read: %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (where & SSL_CB_WRITE) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Write: %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (where & SSL_CB_ALERT) { char *str = (where & SSL_CB_READ) ? "read" : "write"; ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Alert: %s:%s:%s", MODSSL_LIBRARY_NAME, str, SSL_alert_type_string_long(rc), SSL_alert_desc_string_long(rc)); } else if (where & SSL_CB_EXIT) { if (rc == 0) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Exit: failed in %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (rc < 0) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Exit: error in %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } } /* * Because SSL renegotiations can happen at any time (not only after * SSL_accept()), the best way to log the current connection details is * right after a finished handshake. */ if (where & SSL_CB_HANDSHAKE_DONE) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02041) "Protocol: %s, Cipher: %s (%s/%s bits)", ssl_var_lookup(NULL, s, c, NULL, "SSL_PROTOCOL"), ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER"), ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER_USEKEYSIZE"), ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER_ALGKEYSIZE")); } } /* * This callback function is executed while OpenSSL processes the SSL * handshake and does SSL record layer stuff. It's used to trap * client-initiated renegotiations, and for dumping everything to the * log. */ void ssl_callback_Info(const SSL *ssl, int where, int rc) { conn_rec *c; server_rec *s; SSLConnRec *scr; /* Retrieve the conn_rec and the associated SSLConnRec. */ if ((c = (conn_rec *)SSL_get_app_data((SSL *)ssl)) == NULL) { return; } if ((scr = myConnConfig(c)) == NULL) { return; } /* If the reneg state is to reject renegotiations, check the SSL * state machine and move to ABORT if a Client Hello is being * read. */ if (!scr->is_proxy && (where & SSL_CB_HANDSHAKE_START) && scr->reneg_state == RENEG_REJECT) { scr->reneg_state = RENEG_ABORT; ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02042) "rejecting client initiated renegotiation"); } /* If the first handshake is complete, change state to reject any * subsequent client-initiated renegotiation. */ else if ((where & SSL_CB_HANDSHAKE_DONE) && scr->reneg_state == RENEG_INIT) { scr->reneg_state = RENEG_REJECT; } s = mySrvFromConn(c); if (s && APLOGdebug(s)) { log_tracing_state(ssl, c, s, where, rc); } } #ifdef HAVE_TLSEXT /* * This function sets the virtual host from an extended * client hello with a server name indication extension ("SNI", cf. RFC 6066). */ static apr_status_t init_vhost(conn_rec *c, SSL *ssl) { const char *servername; if (c) { SSLConnRec *sslcon = myConnConfig(c); if (sslcon->server != c->base_server) { /* already found the vhost */ return APR_SUCCESS; } servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); if (servername) { if (ap_vhost_iterate_given_conn(c, ssl_find_vhost, (void *)servername)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02043) "SSL virtual host for servername %s found", servername); return APR_SUCCESS; } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02044) "No matching SSL virtual host for servername " "%s found (using default/first virtual host)", servername); /* * RFC 6066 section 3 says "It is NOT RECOMMENDED to send * a warning-level unrecognized_name(112) alert, because * the client's behavior in response to warning-level alerts * is unpredictable." * * To maintain backwards compatibility in mod_ssl, we * no longer send any alert (neither warning- nor fatal-level), * i.e. we take the second action suggested in RFC 6066: * "If the server understood the ClientHello extension but * does not recognize the server name, the server SHOULD take * one of two actions: either abort the handshake by sending * a fatal-level unrecognized_name(112) alert or continue * the handshake." */ } } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02645) "Server name not provided via TLS extension " "(using default/first virtual host)"); } } return APR_NOTFOUND; } /* * This callback function is executed when OpenSSL encounters an extended * client hello with a server name indication extension ("SNI", cf. RFC 6066). */ int ssl_callback_ServerNameIndication(SSL *ssl, int *al, modssl_ctx_t *mctx) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); apr_status_t status = init_vhost(c, ssl); return (status == APR_SUCCESS)? SSL_TLSEXT_ERR_OK : SSL_TLSEXT_ERR_NOACK; } /* * Find a (name-based) SSL virtual host where either the ServerName * or one of the ServerAliases matches the supplied name (to be used * with ap_vhost_iterate_given_conn()) */ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s) { SSLSrvConfigRec *sc; SSL *ssl; BOOL found; SSLConnRec *sslcon; found = ssl_util_vhost_matches(servername, s); /* set SSL_CTX (if matched) */ sslcon = myConnConfig(c); if (found && (ssl = sslcon->ssl) && (sc = mySrvConfig(s))) { SSL_CTX *ctx = SSL_set_SSL_CTX(ssl, sc->server->ssl_ctx); /* * SSL_set_SSL_CTX() only deals with the server cert, * so we need to duplicate a few additional settings * from the ctx by hand */ SSL_set_options(ssl, SSL_CTX_get_options(ctx)); if ((SSL_get_verify_mode(ssl) == SSL_VERIFY_NONE) || (SSL_num_renegotiations(ssl) == 0)) { /* * Only initialize the verification settings from the ctx * if they are not yet set, or if we're called when a new * SSL connection is set up (num_renegotiations == 0). * Otherwise, we would possibly reset a per-directory * configuration which was put into effect by ssl_hook_Access. */ SSL_set_verify(ssl, SSL_CTX_get_verify_mode(ctx), SSL_CTX_get_verify_callback(ctx)); } /* * Adjust the session id context. ssl_init_ssl_connection() * always picks the configuration of the first vhost when * calling SSL_new(), but we want to tie the session to the * vhost we have just switched to. Again, we have to make sure * that we're not overwriting a session id context which was * possibly set in ssl_hook_Access(), before triggering * a renegotiation. */ if (SSL_num_renegotiations(ssl) == 0) { unsigned char *sid_ctx = (unsigned char *)ap_md5_binary(c->pool, (unsigned char *)sc->vhost_id, sc->vhost_id_len); SSL_set_session_id_context(ssl, sid_ctx, APR_MD5_DIGESTSIZE*2); } /* * Save the found server into our SSLConnRec for later * retrieval */ sslcon->server = s; sslcon->cipher_suite = sc->server->auth.cipher_suite; ap_update_child_status_from_server(c->sbh, SERVER_BUSY_READ, c, s); /* * There is one special filter callback, which is set * very early depending on the base_server's log level. * If this is not the first vhost we're now selecting * (and the first vhost doesn't use APLOG_TRACE4), then * we need to set that callback here. */ if (APLOGtrace4(s)) { BIO *rbio = SSL_get_rbio(ssl), *wbio = SSL_get_wbio(ssl); BIO_set_callback(rbio, ssl_io_data_cb); BIO_set_callback_arg(rbio, (void *)ssl); if (wbio && wbio != rbio) { BIO_set_callback(wbio, ssl_io_data_cb); BIO_set_callback_arg(wbio, (void *)ssl); } } return 1; } return 0; } #endif /* HAVE_TLSEXT */ #ifdef HAVE_TLS_SESSION_TICKETS /* * This callback function is executed when OpenSSL needs a key for encrypting/ * decrypting a TLS session ticket (RFC 5077) and a ticket key file has been * configured through SSLSessionTicketKeyFile. */ int ssl_callback_SessionTicket(SSL *ssl, unsigned char *keyname, unsigned char *iv, EVP_CIPHER_CTX *cipher_ctx, HMAC_CTX *hctx, int mode) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(c); SSLSrvConfigRec *sc = mySrvConfig(s); SSLConnRec *sslconn = myConnConfig(c); modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); modssl_ticket_key_t *ticket_key = mctx->ticket_key; if (mode == 1) { /* * OpenSSL is asking for a key for encrypting a ticket, * see s3_srvr.c:ssl3_send_newsession_ticket() */ if (ticket_key == NULL) { /* should never happen, but better safe than sorry */ return -1; } memcpy(keyname, ticket_key->key_name, 16); RAND_bytes(iv, EVP_MAX_IV_LENGTH); EVP_EncryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL, ticket_key->aes_key, iv); HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02289) "TLS session ticket key for %s successfully set, " "creating new session ticket", sc->vhost_id); return 1; } else if (mode == 0) { /* * OpenSSL is asking for the decryption key, * see t1_lib.c:tls_decrypt_ticket() */ /* check key name */ if (ticket_key == NULL || memcmp(keyname, ticket_key->key_name, 16)) { return 0; } EVP_DecryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL, ticket_key->aes_key, iv); HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02290) "TLS session ticket key for %s successfully set, " "decrypting existing session ticket", sc->vhost_id); return 1; } /* OpenSSL is not expected to call us with modes other than 1 or 0 */ return -1; } #endif /* HAVE_TLS_SESSION_TICKETS */ #ifdef HAVE_TLS_ALPN /* * This callback function is executed when the TLS Application-Layer * Protocol Negotiation Extension (ALPN, RFC 7301) is triggered by the Client * Hello, giving a list of desired protocol names (in descending preference) * to the server. * The callback has to select a protocol name or return an error if none of * the clients preferences is supported. * The selected protocol does not have to be on the client list, according * to RFC 7301, so no checks are performed. * The client protocol list is serialized as length byte followed by ASCII * characters (not null-terminated), followed by the next protocol name. */ int ssl_callback_alpn_select(SSL *ssl, const unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen, void *arg) { conn_rec *c = (conn_rec*)SSL_get_app_data(ssl); SSLConnRec *sslconn = myConnConfig(c); apr_array_header_t *client_protos; const char *proposed; size_t len; int i; /* If the connection object is not available, * then there's nothing for us to do. */ if (c == NULL) { return SSL_TLSEXT_ERR_OK; } if (inlen == 0) { /* someone tries to trick us? */ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02837) "ALPN client protocol list empty"); return SSL_TLSEXT_ERR_ALERT_FATAL; } client_protos = apr_array_make(c->pool, 0, sizeof(char *)); for (i = 0; i < inlen; /**/) { unsigned int plen = in[i++]; if (plen + i > inlen) { /* someone tries to trick us? */ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02838) "ALPN protocol identifier too long"); return SSL_TLSEXT_ERR_ALERT_FATAL; } APR_ARRAY_PUSH(client_protos, char *) = apr_pstrndup(c->pool, (const char *)in+i, plen); i += plen; } /* The order the callbacks are invoked from TLS extensions is, unfortunately * not defined and older openssl versions do call ALPN selection before * they callback the SNI. We need to make sure that we know which vhost * we are dealing with so we respect the correct protocols. */ init_vhost(c, ssl); proposed = ap_select_protocol(c, NULL, sslconn->server, client_protos); if (!proposed) { proposed = ap_get_protocol(c); } len = strlen(proposed); if (len > 255) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02840) "ALPN negotiated protocol name too long"); return SSL_TLSEXT_ERR_ALERT_FATAL; } *out = (const unsigned char *)proposed; *outlen = (unsigned char)len; if (strcmp(proposed, ap_get_protocol(c))) { apr_status_t status; status = ap_switch_protocol(c, NULL, sslconn->server, proposed); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02908) "protocol switch to '%s' failed", proposed); return SSL_TLSEXT_ERR_ALERT_FATAL; } } return SSL_TLSEXT_ERR_OK; } #endif /* HAVE_TLS_ALPN */ #ifdef HAVE_SRP int ssl_callback_SRPServerParams(SSL *ssl, int *ad, void *arg) { modssl_ctx_t *mctx = (modssl_ctx_t *)arg; char *username = SSL_get_srp_username(ssl); SRP_user_pwd *u; if (username == NULL #if OPENSSL_VERSION_NUMBER < 0x10100000L || (u = SRP_VBASE_get_by_user(mctx->srp_vbase, username)) == NULL) { #else || (u = SRP_VBASE_get1_by_user(mctx->srp_vbase, username)) == NULL) { #endif *ad = SSL_AD_UNKNOWN_PSK_IDENTITY; return SSL3_AL_FATAL; } if (SSL_set_srp_server_param(ssl, u->N, u->g, u->s, u->v, u->info) < 0) { #if OPENSSL_VERSION_NUMBER >= 0x10100000L SRP_user_pwd_free(u); #endif *ad = SSL_AD_INTERNAL_ERROR; return SSL3_AL_FATAL; } /* reset all other options */ #if OPENSSL_VERSION_NUMBER >= 0x10100000L SRP_user_pwd_free(u); #endif SSL_set_verify(ssl, SSL_VERIFY_NONE, ssl_callback_SSLVerify); return SSL_ERROR_NONE; } #endif /* HAVE_SRP */
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5075_1
crossvul-cpp_data_bad_4813_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIIIIIIII PPPPPPPP LL % % II PP PP LL % % II PP PP LL % % II PP PP LL % % II PPPPPPPP LL % % II PP LL % % II PP LL % % IIIIIIIIII PP LLLLLLLL % % % % % % % % Read/Write Scanalytics IPLab Image Format % % Sean Burke % % 2008.05.07 % % v 0.9 % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/option.h" #include "magick/pixel-accessor.h" #include "magick/property.h" #include "magick/quantum-private.h" #include "magick/static.h" #include "magick/string_.h" #include "magick/module.h" /* Typedef declarations. */ typedef struct _IPLInfo { unsigned int tag, size, time, z, width, height, colors, depth, byteType; } IPLInfo; static MagickBooleanType WriteIPLImage(const ImageInfo *,Image *); /* static void increase (void *pixel, int byteType){ switch(byteType){ case 0:(*((unsigned char *) pixel))++; break; case 1:(*((signed int *) pixel))++; break; case 2:(*((unsigned int *) pixel))++; break; case 3:(*((signed long *) pixel))++; break; default:(*((unsigned int *) pixel))++; break; } } */ /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I P L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsIPL() returns MagickTrue if the image format type, identified by the % magick string, is IPL. % % The format of the IsIPL method is: % % MagickBooleanType IsIPL(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsIPL(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"data",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadIPLImage() reads a Scanalytics IPLab image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % According to the IPLab spec, the data is blocked out in five dimensions: % { t, z, c, y, x }. When we return the image, the latter three are folded % into the standard "Image" structure. The "scenes" (image_info->scene) % correspond to the order: { {t0,z0}, {t0, z1}, ..., {t1,z0}, {t1,z1}... } % The number of scenes is t*z. % % The format of the ReadIPLImage method is: % % Image *ReadIPLImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: The image info. % % o exception: return any errors or warnings in this structure. % */ static void SetHeaderFromIPL(Image *image, IPLInfo *ipl){ image->columns = ipl->width; image->rows = ipl->height; image->depth = ipl->depth; image->x_resolution = 1; image->y_resolution = 1; } static Image *ReadIPLImage(const ImageInfo *image_info,ExceptionInfo *exception) { /* Declare variables. */ Image *image; MagickBooleanType status; register PixelPacket *q; unsigned char magick[12], *pixels; ssize_t count; ssize_t y; size_t t_count=0; size_t length; IPLInfo ipl_info; QuantumFormatType quantum_format; QuantumInfo *quantum_info; QuantumType quantum_type; /* Open Image */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); if ( image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent, GetMagickModule(), "%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); image=AcquireImage(image_info); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read IPL image */ /* Determine endianness If we get back "iiii", we have LSB,"mmmm", MSB */ count=ReadBlob(image,4,magick); (void) count; if((LocaleNCompare((char *) magick,"iiii",4) == 0)) image->endian=LSBEndian; else{ if((LocaleNCompare((char *) magick,"mmmm",4) == 0)) image->endian=MSBEndian; else{ ThrowReaderException(CorruptImageError, "ImproperImageHeader"); } } /* Skip o'er the next 8 bytes (garbage) */ count=ReadBlob(image, 8, magick); /* Excellent, now we read the header unimpeded. */ count=ReadBlob(image,4,magick); if((LocaleNCompare((char *) magick,"data",4) != 0)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); ipl_info.size=ReadBlobLong(image); ipl_info.width=ReadBlobLong(image); ipl_info.height=ReadBlobLong(image); if((ipl_info.width == 0UL) || (ipl_info.height == 0UL)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); ipl_info.colors=ReadBlobLong(image); if(ipl_info.colors == 3){ SetImageColorspace(image,sRGBColorspace);} else { image->colorspace = GRAYColorspace; } ipl_info.z=ReadBlobLong(image); ipl_info.time=ReadBlobLong(image); ipl_info.byteType=ReadBlobLong(image); /* Initialize Quantum Info */ switch (ipl_info.byteType) { case 0: ipl_info.depth=8; quantum_format = UnsignedQuantumFormat; break; case 1: ipl_info.depth=16; quantum_format = SignedQuantumFormat; break; case 2: ipl_info.depth=16; quantum_format = UnsignedQuantumFormat; break; case 3: ipl_info.depth=32; quantum_format = SignedQuantumFormat; break; case 4: ipl_info.depth=32; quantum_format = FloatingPointQuantumFormat; break; case 5: ipl_info.depth=8; quantum_format = UnsignedQuantumFormat; break; case 6: ipl_info.depth=16; quantum_format = UnsignedQuantumFormat; break; case 10: ipl_info.depth=64; quantum_format = FloatingPointQuantumFormat; break; default: ipl_info.depth=16; quantum_format = UnsignedQuantumFormat; break; } /* Set number of scenes of image */ SetHeaderFromIPL(image, &ipl_info); /* Thats all we need if we are pinging. */ if (image_info->ping != MagickFalse) { (void) CloseBlob(image); return(GetFirstImageInList(image)); } length=image->columns; quantum_type=GetQuantumType(image,exception); do { SetHeaderFromIPL(image, &ipl_info); if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows); if (status == MagickFalse) { InheritException(exception,&image->exception); return(DestroyImageList(image)); } /* printf("Length: %.20g, Memory size: %.20g\n", (double) length,(double) image->depth); */ quantum_info=AcquireQuantumInfo(image_info,image); if (quantum_info == (QuantumInfo *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); status=SetQuantumFormat(image,quantum_info,quantum_format); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); pixels=GetQuantumPixels(quantum_info); if(image->columns != ipl_info.width){ /* printf("Columns not set correctly! Wanted: %.20g, got: %.20g\n", (double) ipl_info.width, (double) image->columns); */ } /* Covert IPL binary to pixel packets */ if(ipl_info.colors == 1){ for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GrayQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } else{ for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, RedQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, GreenQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } for(y = 0; y < (ssize_t) image->rows; y++){ (void) ReadBlob(image, length*image->depth/8, pixels); q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; (void) ImportQuantumPixels(image,(CacheView *) NULL,quantum_info, BlueQuantum,pixels,exception); if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } } SetQuantumImageType(image,quantum_type); t_count++; quantum_info = DestroyQuantumInfo(quantum_info); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } if(t_count < ipl_info.z * ipl_info.time){ /* Proceed to next image. */ AcquireNextImage(image_info, image); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while (t_count < ipl_info.z*ipl_info.time); CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterIPLImage() add attributes for the Scanalytics IPL image format to the % list of supported formats. % % */ ModuleExport size_t RegisterIPLImage(void) { MagickInfo *entry; entry=SetMagickInfo("IPL"); entry->decoder=(DecodeImageHandler *) ReadIPLImage; entry->encoder=(EncodeImageHandler *) WriteIPLImage; entry->magick=(IsImageFormatHandler *) IsIPL; entry->adjoin=MagickTrue; entry->description=ConstantString("IPL Image Sequence"); entry->module=ConstantString("IPL"); entry->endian_support=MagickTrue; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterIPLImage() removes format registrations made by the % IPL module from the list of supported formats. % % The format of the UnregisterIPLImage method is: % % UnregisterIPLImage(void) % */ ModuleExport void UnregisterIPLImage(void) { (void) UnregisterMagickInfo("IPL"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e I P L I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteIPLImage() writes an image to a file in Scanalytics IPLabimage format. % % The format of the WriteIPLImage method is: % % MagickBooleanType WriteIPLImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows. % % o image_info: The image info. % % o image: The image. % */ static MagickBooleanType WriteIPLImage(const ImageInfo *image_info,Image *image) { ExceptionInfo *exception; IPLInfo ipl_info; MagickBooleanType status; MagickOffsetType scene; register const PixelPacket *p; QuantumInfo *quantum_info; ssize_t y; unsigned char *pixels; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickSignature); assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=OpenBlob(image_info,image,WriteBinaryBlobMode,&image->exception); if (status == MagickFalse) return(status); scene=0; quantum_info=AcquireQuantumInfo(image_info, image); if ((quantum_info->format == UndefinedQuantumFormat) && (IsHighDynamicRangeImage(image,&image->exception) != MagickFalse)) SetQuantumFormat(image,quantum_info,FloatingPointQuantumFormat); switch(quantum_info->depth){ case 8: ipl_info.byteType = 0; break; case 16: if(quantum_info->format == SignedQuantumFormat){ ipl_info.byteType = 2; } else{ ipl_info.byteType = 1; } break; case 32: if(quantum_info->format == FloatingPointQuantumFormat){ ipl_info.byteType = 3; } else{ ipl_info.byteType = 4; } break; case 64: ipl_info.byteType = 10; break; default: ipl_info.byteType = 2; break; } ipl_info.z = (unsigned int) GetImageListLength(image); /* There is no current method for detecting whether we have T or Z stacks */ ipl_info.time = 1; ipl_info.width = (unsigned int) image->columns; ipl_info.height = (unsigned int) image->rows; (void) TransformImageColorspace(image,sRGBColorspace); if(IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) { ipl_info.colors = 3; } else{ ipl_info.colors = 1; } ipl_info.size = (unsigned int) (28 + ((image->depth)/8)*ipl_info.height*ipl_info.width*ipl_info.colors*ipl_info.z); /* Ok! Calculations are done. Lets write this puppy down! */ /* Write IPL header. */ /* Shockingly (maybe not if you have used IPLab), IPLab itself CANNOT read MSBEndian files! The reader above can, but they cannot. For compatability reasons, I will leave the code in here, but it is all but useless if you want to use IPLab. */ if(image_info->endian == MSBEndian) (void) WriteBlob(image, 4, (const unsigned char *) "mmmm"); else{ image->endian = LSBEndian; (void) WriteBlob(image, 4, (const unsigned char *) "iiii"); } (void) WriteBlobLong(image, 4); (void) WriteBlob(image, 4, (const unsigned char *) "100f"); (void) WriteBlob(image, 4, (const unsigned char *) "data"); (void) WriteBlobLong(image, ipl_info.size); (void) WriteBlobLong(image, ipl_info.width); (void) WriteBlobLong(image, ipl_info.height); (void) WriteBlobLong(image, ipl_info.colors); if(image_info->adjoin == MagickFalse) (void) WriteBlobLong(image, 1); else (void) WriteBlobLong(image, ipl_info.z); (void) WriteBlobLong(image, ipl_info.time); (void) WriteBlobLong(image, ipl_info.byteType); exception=(&image->exception); do { /* Convert MIFF to IPL raster pixels. */ pixels=GetQuantumPixels(quantum_info); if(ipl_info.colors == 1){ /* Red frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, GrayQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); } } if(ipl_info.colors == 3){ /* Red frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, RedQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); } /* Green frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, GreenQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); } /* Blue frame */ for(y = 0; y < (ssize_t) ipl_info.height; y++){ p=GetVirtualPixels(image,0,y,image->columns,1,&image->exception); if (p == (PixelPacket *) NULL) break; (void) ExportQuantumPixels(image,(const CacheView *) NULL, quantum_info, BlueQuantum, pixels,&image->exception); (void) WriteBlob(image, image->columns*image->depth/8, pixels); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } quantum_info=DestroyQuantumInfo(quantum_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; }while (image_info->adjoin != MagickFalse); (void) WriteBlob(image, 4, (const unsigned char *) "fini"); (void) WriteBlobLong(image, 0); CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4813_0
crossvul-cpp_data_bad_5017_0
/* * libndp.c - Neighbour discovery library * Copyright (C) 2013-2015 Jiri Pirko <jiri@resnulli.us> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/select.h> #include <netinet/in.h> #include <netinet/icmp6.h> #include <arpa/inet.h> #include <net/ethernet.h> #include <assert.h> #include <ndp.h> #include "ndp_private.h" #include "list.h" /** * SECTION: logging * @short_description: libndp logging facility */ void ndp_log(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, ...) { va_list args; va_start(args, format); ndp->log_fn(ndp, priority, file, line, fn, format, args); va_end(args); } static void log_stderr(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args) { fprintf(stderr, "libndp: %s: ", fn); vfprintf(stderr, format, args); fprintf(stderr, "\n"); } static int log_priority(const char *priority) { char *endptr; int prio; prio = strtol(priority, &endptr, 10); if (endptr[0] == '\0' || isspace(endptr[0])) return prio; if (strncmp(priority, "err", 3) == 0) return LOG_ERR; if (strncmp(priority, "info", 4) == 0) return LOG_INFO; if (strncmp(priority, "debug", 5) == 0) return LOG_DEBUG; return 0; } /** * ndp_set_log_fn: * @ndp: libndp library context * @log_fn: function to be called for logging messages * * The built-in logging writes to stderr. It can be * overridden by a custom function, to plug log messages * into the user's logging functionality. **/ NDP_EXPORT void ndp_set_log_fn(struct ndp *ndp, void (*log_fn)(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args)) { ndp->log_fn = log_fn; dbg(ndp, "Custom logging function %p registered.", log_fn); } /** * ndp_get_log_priority: * @ndp: libndp library context * * Returns: the current logging priority. **/ NDP_EXPORT int ndp_get_log_priority(struct ndp *ndp) { return ndp->log_priority; } /** * ndp_set_log_priority: * @ndp: libndp library context * @priority: the new logging priority * * Set the current logging priority. The value controls which messages * are logged. **/ NDP_EXPORT void ndp_set_log_priority(struct ndp *ndp, int priority) { ndp->log_priority = priority; } /** * SECTION: helpers * @short_description: various internal helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUG_ON(expr) { if (expr) assert(0); } static void *myzalloc(size_t size) { return calloc(1, size); } static int myrecvfrom6(int sockfd, void *buf, size_t *buflen, int flags, struct in6_addr *addr, uint32_t *ifindex) { struct sockaddr_in6 sin6; unsigned char cbuf[CMSG_SPACE(sizeof(struct in6_pktinfo))]; struct iovec iovec; struct msghdr msghdr; struct cmsghdr *cmsghdr; ssize_t len; iovec.iov_len = *buflen; iovec.iov_base = buf; memset(&msghdr, 0, sizeof(msghdr)); msghdr.msg_name = &sin6; msghdr.msg_namelen = sizeof(sin6); msghdr.msg_iov = &iovec; msghdr.msg_iovlen = 1; msghdr.msg_control = cbuf; msghdr.msg_controllen = sizeof(cbuf); len = recvmsg(sockfd, &msghdr, flags); if (len == -1) return -errno; *buflen = len; /* Set ifindex to scope_id now. But since scope_id gets not * set by kernel for linklocal addresses, use pktinfo to obtain that * value right after. */ *ifindex = sin6.sin6_scope_id; for (cmsghdr = CMSG_FIRSTHDR(&msghdr); cmsghdr; cmsghdr = CMSG_NXTHDR(&msghdr, cmsghdr)) { if (cmsghdr->cmsg_level == IPPROTO_IPV6 && cmsghdr->cmsg_type == IPV6_PKTINFO && cmsghdr->cmsg_len == CMSG_LEN(sizeof(struct in6_pktinfo))) { struct in6_pktinfo *pktinfo; pktinfo = (struct in6_pktinfo *) CMSG_DATA(cmsghdr); *ifindex = pktinfo->ipi6_ifindex; } } *addr = sin6.sin6_addr; return 0; } static int mysendto6(int sockfd, void *buf, size_t buflen, int flags, struct in6_addr *addr, uint32_t ifindex) { struct sockaddr_in6 sin6; ssize_t ret; memset(&sin6, 0, sizeof(sin6)); memcpy(&sin6.sin6_addr, addr, sizeof(sin6.sin6_addr)); sin6.sin6_scope_id = ifindex; resend: ret = sendto(sockfd, buf, buflen, flags, &sin6, sizeof(sin6)); if (ret == -1) { switch(errno) { case EINTR: goto resend; default: return -errno; } } return 0; } static const char *str_in6_addr(struct in6_addr *addr) { static char buf[INET6_ADDRSTRLEN]; return inet_ntop(AF_INET6, addr, buf, sizeof(buf)); } /** * SECTION: NDP implementation * @short_description: functions that actually implements NDP */ static int ndp_sock_open(struct ndp *ndp) { int sock; //struct icmp6_filter flt; int ret; int err; int val; sock = socket(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6); if (sock == -1) { err(ndp, "Failed to create ICMP6 socket."); return -errno; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVPKTINFO."); err = -errno; goto close_sock; } val = 255; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_MULTICAST_HOPS."); err = -errno; goto close_sock; } ndp->sock = sock; return 0; close_sock: close(sock); return err; } static void ndp_sock_close(struct ndp *ndp) { close(ndp->sock); } struct ndp_msggeneric { void *dataptr; /* must be first */ }; struct ndp_msgrs { struct nd_router_solicit *rs; /* must be first */ }; struct ndp_msgra { struct nd_router_advert *ra; /* must be first */ }; struct ndp_msgns { struct nd_neighbor_solicit *ns; /* must be first */ }; struct ndp_msgna { struct nd_neighbor_advert *na; /* must be first */ }; struct ndp_msgr { struct nd_redirect *r; /* must be first */ }; struct ndp_msg { #define NDP_MSG_BUFLEN 1500 unsigned char buf[NDP_MSG_BUFLEN]; size_t len; struct in6_addr addrto; uint32_t ifindex; struct icmp6_hdr * icmp6_hdr; unsigned char * opts_start; /* pointer to buf at the place where opts start */ union { struct ndp_msggeneric generic; struct ndp_msgrs rs; struct ndp_msgra ra; struct ndp_msgns ns; struct ndp_msgna na; struct ndp_msgr r; } nd_msg; }; struct ndp_msg_type_info { #define NDP_STRABBR_SIZE 4 char strabbr[NDP_STRABBR_SIZE]; uint8_t raw_type; size_t raw_struct_size; void (*addrto_adjust)(struct in6_addr *addr); }; static void ndp_msg_addrto_adjust_all_nodes(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x1); } static void ndp_msg_addrto_adjust_all_routers(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x2); } static struct ndp_msg_type_info ndp_msg_type_info_list[] = { [NDP_MSG_RS] = { .strabbr = "RS", .raw_type = ND_ROUTER_SOLICIT, .raw_struct_size = sizeof(struct nd_router_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_routers, }, [NDP_MSG_RA] = { .strabbr = "RA", .raw_type = ND_ROUTER_ADVERT, .raw_struct_size = sizeof(struct nd_router_advert), }, [NDP_MSG_NS] = { .strabbr = "NS", .raw_type = ND_NEIGHBOR_SOLICIT, .raw_struct_size = sizeof(struct nd_neighbor_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_nodes, }, [NDP_MSG_NA] = { .strabbr = "NA", .raw_type = ND_NEIGHBOR_ADVERT, .raw_struct_size = sizeof(struct nd_neighbor_advert), }, [NDP_MSG_R] = { .strabbr = "R", .raw_type = ND_REDIRECT, .raw_struct_size = sizeof(struct nd_redirect), }, }; #define NDP_MSG_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_type_info_list) struct ndp_msg_type_info *ndp_msg_type_info(enum ndp_msg_type msg_type) { return &ndp_msg_type_info_list[msg_type]; } static int ndp_msg_type_by_raw_type(enum ndp_msg_type *p_msg_type, uint8_t raw_type) { int i; for (i = 0; i < NDP_MSG_TYPE_LIST_SIZE; i++) { if (ndp_msg_type_info(i)->raw_type == raw_type) { *p_msg_type = i; return 0; } } return -ENOENT; } static bool ndp_msg_check_valid(struct ndp_msg *msg) { size_t len = ndp_msg_payload_len(msg); enum ndp_msg_type msg_type = ndp_msg_type(msg); if (len < ndp_msg_type_info(msg_type)->raw_struct_size) return false; return true; } static struct ndp_msg *ndp_msg_alloc(void) { struct ndp_msg *msg; msg = myzalloc(sizeof(*msg)); if (!msg) return NULL; msg->icmp6_hdr = (struct icmp6_hdr *) msg->buf; return msg; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type); static void ndp_msg_init(struct ndp_msg *msg, enum ndp_msg_type msg_type) { size_t raw_struct_size = ndp_msg_type_info(msg_type)->raw_struct_size; ndp_msg_type_set(msg, msg_type); msg->len = raw_struct_size; msg->opts_start = msg->buf + raw_struct_size; /* Set-up "first pointers" in all ndp_msgrs, ndp_msgra, ndp_msgns, * ndp_msgna, ndp_msgr structures. */ msg->nd_msg.generic.dataptr = ndp_msg_payload(msg); } /** * ndp_msg_new: * @p_msg: pointer where new message structure address will be stored * @msg_type: message type * * Allocate new message structure of a specified type and initialize it. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_new(struct ndp_msg **p_msg, enum ndp_msg_type msg_type) { struct ndp_msg *msg; if (msg_type == NDP_MSG_ALL) return -EINVAL; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; ndp_msg_init(msg, msg_type); *p_msg = msg; return 0; } /** * ndp_msg_destroy: * * Destroy message structure. **/ NDP_EXPORT void ndp_msg_destroy(struct ndp_msg *msg) { free(msg); } /** * ndp_msg_payload: * @msg: message structure * * Get raw Neighbour discovery packet data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload(struct ndp_msg *msg) { return msg->buf; } /** * ndp_msg_payload_maxlen: * @msg: message structure * * Get raw Neighbour discovery packet data maximum length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_maxlen(struct ndp_msg *msg) { return sizeof(msg->buf); } /** * ndp_msg_payload_len: * @msg: message structure * * Get raw Neighbour discovery packet data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_len(struct ndp_msg *msg) { return msg->len; } /** * ndp_msg_payload_len_set: * @msg: message structure * * Set raw Neighbour discovery packet data length. **/ NDP_EXPORT void ndp_msg_payload_len_set(struct ndp_msg *msg, size_t len) { if (len > sizeof(msg->buf)) len = sizeof(msg->buf); msg->len = len; } /** * ndp_msg_payload_opts: * @msg: message structure * * Get raw Neighbour discovery packet options part data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload_opts(struct ndp_msg *msg) { return msg->opts_start; } static void *ndp_msg_payload_opts_offset(struct ndp_msg *msg, int offset) { unsigned char *ptr = ndp_msg_payload_opts(msg); return ptr + offset; } /** * ndp_msg_payload_opts_len: * @msg: message structure * * Get raw Neighbour discovery packet options part data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_opts_len(struct ndp_msg *msg) { return msg->len - (msg->opts_start - msg->buf); } /** * ndp_msgrs: * @msg: message structure * * Get RS message structure by passed @msg. * * Returns: RS message structure or NULL in case the message is not of type RS. **/ NDP_EXPORT struct ndp_msgrs *ndp_msgrs(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RS) return NULL; return &msg->nd_msg.rs; } /** * ndp_msgra: * @msg: message structure * * Get RA message structure by passed @msg. * * Returns: RA message structure or NULL in case the message is not of type RA. **/ NDP_EXPORT struct ndp_msgra *ndp_msgra(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RA) return NULL; return &msg->nd_msg.ra; } /** * ndp_msgns: * @msg: message structure * * Get NS message structure by passed @msg. * * Returns: NS message structure or NULL in case the message is not of type NS. **/ NDP_EXPORT struct ndp_msgns *ndp_msgns(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NS) return NULL; return &msg->nd_msg.ns; } /** * ndp_msgna: * @msg: message structure * * Get NA message structure by passed @msg. * * Returns: NA message structure or NULL in case the message is not of type NA. **/ NDP_EXPORT struct ndp_msgna *ndp_msgna(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NA) return NULL; return &msg->nd_msg.na; } /** * ndp_msgr: * @msg: message structure * * Get R message structure by passed @msg. * * Returns: R message structure or NULL in case the message is not of type R. **/ NDP_EXPORT struct ndp_msgr *ndp_msgr(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_R) return NULL; return &msg->nd_msg.r; } /** * ndp_msg_type: * @msg: message structure * * Get type of message. * * Returns: Message type **/ NDP_EXPORT enum ndp_msg_type ndp_msg_type(struct ndp_msg *msg) { enum ndp_msg_type msg_type; int err; err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); /* Type should be always set correctly (ensured by ndp_msg_init) */ BUG_ON(err); return msg_type; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type) { msg->icmp6_hdr->icmp6_type = ndp_msg_type_info(msg_type)->raw_type; } /** * ndp_msg_addrto: * @msg: message structure * * Get "to address" of message. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_addrto(struct ndp_msg *msg) { return &msg->addrto; } /** * ndp_msg_ifindex: * @msg: message structure * * Get interface index of message. * * Returns: Interface index **/ NDP_EXPORT uint32_t ndp_msg_ifindex(struct ndp_msg *msg) { return msg->ifindex; } /** * ndp_msg_ifindex_set: * @msg: message structure * * Set raw interface index of message. **/ NDP_EXPORT void ndp_msg_ifindex_set(struct ndp_msg *msg, uint32_t ifindex) { msg->ifindex = ifindex; } /** * ndp_msg_send: * @ndp: libndp library context * @msg: message structure * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send(struct ndp *ndp, struct ndp_msg *msg) { return ndp_msg_send_with_flags(ndp, msg, ND_OPT_NORMAL); } /** * ndp_msg_send_with_flags: * @ndp: libndp library context * @msg: message structure * @flags: option flags within message type * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send_with_flags(struct ndp *ndp, struct ndp_msg *msg, uint8_t flags) { enum ndp_msg_type msg_type = ndp_msg_type(msg); if (ndp_msg_type_info(msg_type)->addrto_adjust) ndp_msg_type_info(msg_type)->addrto_adjust(&msg->addrto); switch (msg_type) { case NDP_MSG_NA: if (flags & ND_OPT_NA_UNSOL) { ndp_msgna_flag_override_set((struct ndp_msgna*)&msg->nd_msg, true); ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, false); ndp_msg_addrto_adjust_all_nodes(&msg->addrto); } else { ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, true); } break; default: break; } return mysendto6(ndp->sock, msg->buf, msg->len, 0, &msg->addrto, msg->ifindex); } /** * SECTION: msgra getters/setters * @short_description: Getters and setters for RA message */ /** * ndp_msgra_curhoplimit: * @msgra: RA message structure * * Get RA curhoplimit. * * Returns: curhoplimit. **/ NDP_EXPORT uint8_t ndp_msgra_curhoplimit(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_curhoplimit; } /** * ndp_msgra_curhoplimit_set: * @msgra: RA message structure * * Set RA curhoplimit. **/ NDP_EXPORT void ndp_msgra_curhoplimit_set(struct ndp_msgra *msgra, uint8_t curhoplimit) { msgra->ra->nd_ra_curhoplimit = curhoplimit; } /** * ndp_msgra_flag_managed: * @msgra: RA message structure * * Get RA managed flag. * * Returns: managed flag. **/ NDP_EXPORT bool ndp_msgra_flag_managed(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_managed_set: * @msgra: RA message structure * * Set RA managed flag. **/ NDP_EXPORT void ndp_msgra_flag_managed_set(struct ndp_msgra *msgra, bool flag_managed) { if (flag_managed) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_other: * @msgra: RA message structure * * Get RA other flag. * * Returns: other flag. **/ NDP_EXPORT bool ndp_msgra_flag_other(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_other_set: * @msgra: RA message structure * * Set RA other flag. **/ NDP_EXPORT void ndp_msgra_flag_other_set(struct ndp_msgra *msgra, bool flag_other) { if (flag_other) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_home_agent: * @msgra: RA message structure * * Get RA home_agent flag. * * Returns: home_agent flag. **/ NDP_EXPORT bool ndp_msgra_flag_home_agent(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_flag_home_agent_set: * @msgra: RA message structure * * Set RA home_agent flag. **/ NDP_EXPORT void ndp_msgra_flag_home_agent_set(struct ndp_msgra *msgra, bool flag_home_agent) { if (flag_home_agent) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_route_preference: * @msgra: RA message structure * * Get route preference. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msgra_route_preference(struct ndp_msgra *msgra) { uint8_t prf = (msgra->ra->nd_ra_flags_reserved >> 3) & 3; /* rfc4191 says: * If the Router Lifetime is zero, the preference value MUST be set to * (00) by the sender and MUST be ignored by the receiver. * If the Reserved (10) value is received, the receiver MUST treat the * value as if it were (00). */ if (prf == 2 || !ndp_msgra_router_lifetime(msgra)) prf = 0; return prf; } /** * ndp_msgra_route_preference_set: * @msgra: RA message structure * @pref: preference * * Set route preference. **/ NDP_EXPORT void ndp_msgra_route_preference_set(struct ndp_msgra *msgra, enum ndp_route_preference pref) { msgra->ra->nd_ra_flags_reserved &= ~(3 << 3); msgra->ra->nd_ra_flags_reserved |= (pref << 3); } /** * ndp_msgra_router_lifetime: * @msgra: RA message structure * * Get RA router lifetime. * * Returns: router lifetime in seconds. **/ NDP_EXPORT uint16_t ndp_msgra_router_lifetime(struct ndp_msgra *msgra) { return ntohs(msgra->ra->nd_ra_router_lifetime); } /** * ndp_msgra_router_lifetime_set: * @msgra: RA message structure * * Set RA router lifetime. **/ NDP_EXPORT void ndp_msgra_router_lifetime_set(struct ndp_msgra *msgra, uint16_t router_lifetime) { msgra->ra->nd_ra_router_lifetime = htons(router_lifetime); } /** * ndp_msgra_reachable_time: * @msgra: RA message structure * * Get RA reachable time. * * Returns: reachable time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_reachable_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_reachable); } /** * ndp_msgra_reachable_time_set: * @msgra: RA message structure * * Set RA reachable time. **/ NDP_EXPORT void ndp_msgra_reachable_time_set(struct ndp_msgra *msgra, uint32_t reachable_time) { msgra->ra->nd_ra_reachable = htonl(reachable_time); } /** * ndp_msgra_retransmit_time: * @msgra: RA message structure * * Get RA retransmit time. * * Returns: retransmit time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_retransmit_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_retransmit); } /** * ndp_msgra_retransmit_time_set: * @msgra: RA message structure * * Set RA retransmit time. **/ NDP_EXPORT void ndp_msgra_retransmit_time_set(struct ndp_msgra *msgra, uint32_t retransmit_time) { msgra->ra->nd_ra_retransmit = htonl(retransmit_time); } /** * SECTION: msgna getters/setters * @short_description: Getters and setters for NA message */ /** * ndp_msgna_flag_router: * @msgna: NA message structure * * Get NA router flag. * * Returns: router flag. **/ NDP_EXPORT bool ndp_msgna_flag_router(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_router_set: * @msgna: NA message structure * * Set NA router flag. **/ NDP_EXPORT void ndp_msgna_flag_router_set(struct ndp_msgna *msgna, bool flag_router) { if (flag_router) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_ROUTER; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_solicited: * @msgna: NA message structure * * Get NA solicited flag. * * Returns: solicited flag. **/ NDP_EXPORT bool ndp_msgna_flag_solicited(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_solicited_set: * @msgna: NA message structure * * Set NA managed flag. **/ NDP_EXPORT void ndp_msgna_flag_solicited_set(struct ndp_msgna *msgna, bool flag_solicited) { if (flag_solicited) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_SOLICITED; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_override: * @msgna: NA message structure * * Get NA override flag. * * Returns: override flag. **/ NDP_EXPORT bool ndp_msgna_flag_override(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_OVERRIDE; } /** * ndp_msgna_flag_override_set: * @msgra: NA message structure * * Set NA override flag. */ NDP_EXPORT void ndp_msgna_flag_override_set(struct ndp_msgna *msgna, bool flag_override) { if (flag_override) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_OVERRIDE; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_OVERRIDE; } /** * SECTION: msg_opt infrastructure * @short_description: Infrastructure for options */ struct ndp_msg_opt_type_info { uint8_t raw_type; size_t raw_struct_size; bool (*check_valid)(void *opt_data); }; static bool ndp_msg_opt_route_check_valid(void *opt_data) { struct __nd_opt_route_info *ri = opt_data; /* rfc4191 says: * If the Reserved (10) value is received, the Route Information Option * MUST be ignored. */ if (((ri->nd_opt_ri_prf_reserved >> 3) & 3) == 2) return false; return true; } static struct ndp_msg_opt_type_info ndp_msg_opt_type_info_list[] = { [NDP_MSG_OPT_SLLADDR] = { .raw_type = ND_OPT_SOURCE_LINKADDR, }, [NDP_MSG_OPT_TLLADDR] = { .raw_type = ND_OPT_TARGET_LINKADDR, }, [NDP_MSG_OPT_PREFIX] = { .raw_type = ND_OPT_PREFIX_INFORMATION, .raw_struct_size = sizeof(struct nd_opt_prefix_info), }, [NDP_MSG_OPT_REDIR] = { .raw_type = ND_OPT_REDIRECTED_HEADER, }, [NDP_MSG_OPT_MTU] = { .raw_type = ND_OPT_MTU, .raw_struct_size = sizeof(struct nd_opt_mtu), }, [NDP_MSG_OPT_ROUTE] = { .raw_type = __ND_OPT_ROUTE_INFO, .raw_struct_size = sizeof(struct __nd_opt_route_info), .check_valid = ndp_msg_opt_route_check_valid, }, [NDP_MSG_OPT_RDNSS] = { .raw_type = __ND_OPT_RDNSS, .raw_struct_size = sizeof(struct __nd_opt_rdnss), }, [NDP_MSG_OPT_DNSSL] = { .raw_type = __ND_OPT_DNSSL, .raw_struct_size = sizeof(struct __nd_opt_dnssl), }, }; #define NDP_MSG_OPT_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_opt_type_info_list) struct ndp_msg_opt_type_info *ndp_msg_opt_type_info(enum ndp_msg_opt_type msg_opt_type) { return &ndp_msg_opt_type_info_list[msg_opt_type]; } struct ndp_msg_opt_type_info *ndp_msg_opt_type_info_by_raw_type(uint8_t raw_type) { struct ndp_msg_opt_type_info *info; int i; for (i = 0; i < NDP_MSG_OPT_TYPE_LIST_SIZE; i++) { info = &ndp_msg_opt_type_info_list[i]; if (info->raw_type == raw_type) return info; } return NULL; } /** * ndp_msg_next_opt_offset: * @msg: message structure * @offset: option payload offset * @opt_type: option type * * Find next offset of option of given type. If offset is -1, start from * beginning, otherwise start from the given offset. * This funstion is internally used by ndp_msg_opt_for_each_offset() macro. * * Returns: offset in opt payload of found opt of -1 in case it was not found. **/ NDP_EXPORT int ndp_msg_next_opt_offset(struct ndp_msg *msg, int offset, enum ndp_msg_opt_type opt_type) { unsigned char *opts_start = ndp_msg_payload_opts(msg); unsigned char *ptr = opts_start; size_t len = ndp_msg_payload_opts_len(msg); uint8_t opt_raw_type = ndp_msg_opt_type_info(opt_type)->raw_type; bool ignore = true; if (offset == -1) { offset = 0; ignore = false; } ptr += offset; len -= offset; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len || len < cur_opt_len) break; if (cur_opt_raw_type == opt_raw_type && !ignore) return ptr - opts_start; ptr += cur_opt_len; len -= cur_opt_len; ignore = false; } return -1; } #define __INVALID_OPT_TYPE_MAGIC 0xff /* * Check for validity of options and mark by magic opt type in case it is not * so ndp_msg_next_opt_offset() will ignore it. */ static bool ndp_msg_check_opts(struct ndp_msg *msg) { unsigned char *ptr = ndp_msg_payload_opts(msg); size_t len = ndp_msg_payload_opts_len(msg); struct ndp_msg_opt_type_info *info; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len) return false; if (len < cur_opt_len) break; info = ndp_msg_opt_type_info_by_raw_type(cur_opt_raw_type); if (info) { if (cur_opt_len < info->raw_struct_size || (info->check_valid && !info->check_valid(ptr))) ptr[0] = __INVALID_OPT_TYPE_MAGIC; } ptr += cur_opt_len; len -= cur_opt_len; } return true; } /** * SECTION: msg_opt getters/setters * @short_description: Getters and setters for options */ /** * ndp_msg_opt_slladdr: * @msg: message structure * @offset: in-message offset * * Get source linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to source linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_slladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_slladdr_len: * @msg: message structure * @offset: in-message offset * * Get source linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: source linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_slladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_tlladdr: * @msg: message structure * @offset: in-message offset * * Get target linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to target linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_tlladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_tlladdr_len: * @msg: message structure * @offset: in-message offset * * Get target linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: target linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_tlladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_prefix: * @msg: message structure * @offset: in-message offset * * Get prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_prefix(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return &pi->nd_opt_pi_prefix; } /** * ndp_msg_opt_prefix_len: * @msg: message structure * @offset: in-message offset * * Get prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_prefix_len(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_prefix_len; } /** * ndp_msg_opt_prefix_valid_time: * @msg: message structure * @offset: in-message offset * * Get prefix valid time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: valid time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_valid_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_valid_time); } /** * ndp_msg_opt_prefix_preferred_time: * @msg: message structure * @offset: in-message offset * * Get prefix preferred time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: preferred time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_preferred_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_preferred_time); } /** * ndp_msg_opt_prefix_flag_on_link: * @msg: message structure * @offset: in-message offset * * Get on-link flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: on-link flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_on_link(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_ONLINK; } /** * ndp_msg_opt_prefix_flag_auto_addr_conf: * @msg: message structure * @offset: in-message offset * * Get autonomous address-configuration flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: autonomous address-configuration flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_auto_addr_conf(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_AUTO; } /** * ndp_msg_opt_prefix_flag_router_addr: * @msg: message structure * @offset: in-message offset * * Get router address flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: router address flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_router_addr(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_RADDR; } /** * ndp_msg_opt_mtu: * @msg: message structure * @offset: in-message offset * * Get MTU. User should check if mtu option is present before calling this. * * Returns: MTU. **/ NDP_EXPORT uint32_t ndp_msg_opt_mtu(struct ndp_msg *msg, int offset) { struct nd_opt_mtu *mtu = ndp_msg_payload_opts_offset(msg, offset); return ntohl(mtu->nd_opt_mtu_mtu); } /** * ndp_msg_opt_route_prefix: * @msg: message structure * @offset: in-message offset * * Get route prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_route_prefix(struct ndp_msg *msg, int offset) { static struct in6_addr prefix; struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); memset(&prefix, 0, sizeof(prefix)); memcpy(&prefix, &ri->nd_opt_ri_prefix, (ri->nd_opt_ri_len - 1) << 3); return &prefix; } /** * ndp_msg_opt_route_prefix_len: * @msg: message structure * @offset: in-message offset * * Get route prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of route prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_route_prefix_len(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ri->nd_opt_ri_prefix_len; } /** * ndp_msg_opt_route_lifetime: * @msg: message structure * @offset: in-message offset * * Get route lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_route_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ntohl(ri->nd_opt_ri_lifetime); } /** * ndp_msg_opt_route_preference: * @msg: message structure * @offset: in-message offset * * Get route preference. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msg_opt_route_preference(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return (ri->nd_opt_ri_prf_reserved >> 3) & 3; } /** * ndp_msg_opt_rdnss_lifetime: * @msg: message structure * @offset: in-message offset * * Get Recursive DNS Server lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_rdnss_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); return ntohl(rdnss->nd_opt_rdnss_lifetime); } /** * ndp_msg_opt_rdnss_addr: * @msg: message structure * @offset: in-message offset * @addr_index: address index * * Get Recursive DNS Server address. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_rdnss_addr(struct ndp_msg *msg, int offset, int addr_index) { static struct in6_addr addr; struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); size_t len = rdnss->nd_opt_rdnss_len << 3; /* convert to bytes */ len -= in_struct_offset(struct __nd_opt_rdnss, nd_opt_rdnss_addresses); if ((addr_index + 1) * sizeof(addr) > len) return NULL; memcpy(&addr, &rdnss->nd_opt_rdnss_addresses[addr_index * sizeof(addr)], sizeof(addr)); return &addr; } /** * ndp_msg_opt_dnssl_lifetime: * @msg: message structure * @offset: in-message offset * * Get DNS Search List lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_dnssl_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); return ntohl(dnssl->nd_opt_dnssl_lifetime); } /** * ndp_msg_opt_dnssl_domain: * @msg: message structure * @offset: in-message offset * @domain_index: domain index * * Get DNS Search List domain. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT char *ndp_msg_opt_dnssl_domain(struct ndp_msg *msg, int offset, int domain_index) { int i; static char buf[256]; struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); size_t len = dnssl->nd_opt_dnssl_len << 3; /* convert to bytes */ char *ptr; len -= in_struct_offset(struct __nd_opt_dnssl, nd_opt_dnssl_domains); ptr = dnssl->nd_opt_dnssl_domains; i = 0; while (len > 0) { size_t buf_len = 0; while (len > 0) { uint8_t dom_len = *ptr; ptr++; len--; if (!dom_len) break; if (dom_len > len) return NULL; if (buf_len + dom_len + 1 > sizeof(buf)) return NULL; memcpy(buf + buf_len, ptr, dom_len); buf[buf_len + dom_len] = '.'; ptr += dom_len; len -= dom_len; buf_len += dom_len + 1; } if (!buf_len) break; buf[buf_len - 1] = '\0'; /* overwrite final '.' */ if (i++ == domain_index) return buf; } return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg); static int ndp_sock_recv(struct ndp *ndp) { struct ndp_msg *msg; enum ndp_msg_type msg_type; size_t len; int err; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; len = ndp_msg_payload_maxlen(msg); err = myrecvfrom6(ndp->sock, msg->buf, &len, 0, &msg->addrto, &msg->ifindex); if (err) { err(ndp, "Failed to receive message"); goto free_msg; } dbg(ndp, "rcvd from: %s, ifindex: %u", str_in6_addr(&msg->addrto), msg->ifindex); if (len < sizeof(*msg->icmp6_hdr)) { warn(ndp, "rcvd icmp6 packet too short (%luB)", len); err = 0; goto free_msg; } err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); if (err) { err = 0; goto free_msg; } ndp_msg_init(msg, msg_type); ndp_msg_payload_len_set(msg, len); if (!ndp_msg_check_valid(msg)) { warn(ndp, "rcvd invalid ND message"); err = 0; goto free_msg; } dbg(ndp, "rcvd %s, len: %zuB", ndp_msg_type_info(msg_type)->strabbr, len); if (!ndp_msg_check_opts(msg)) { err = 0; goto free_msg; } err = ndp_call_handlers(ndp, msg);; free_msg: ndp_msg_destroy(msg); return err; } /** * SECTION: msgrcv handler * @short_description: msgrcv handler and related stuff */ struct ndp_msgrcv_handler_item { struct list_item list; ndp_msgrcv_handler_func_t func; enum ndp_msg_type msg_type; uint32_t ifindex; void * priv; }; static struct ndp_msgrcv_handler_item * ndp_find_msgrcv_handler_item(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) if (handler_item->func == func && handler_item->msg_type == msg_type && handler_item->ifindex == ifindex && handler_item->priv == priv) return handler_item; return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg) { struct ndp_msgrcv_handler_item *handler_item; int err; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) { if (handler_item->msg_type != NDP_MSG_ALL && handler_item->msg_type != ndp_msg_type(msg)) continue; if (handler_item->ifindex && handler_item->ifindex != msg->ifindex) continue; err = handler_item->func(ndp, msg, handler_item->priv); if (err) return err; } return 0; } /** * ndp_msgrcv_handler_register: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Registers custom @func handler which is going to be called when * specified @msg_type is received. If one wants the function to be * called for all message types, pass NDP_MSG_ALL, * Note that @ifindex can be set to filter only messages received on * specified interface. For @func to be called for messages received on * all interfaces, just set 0. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msgrcv_handler_register(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; if (ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv)) return -EEXIST; if (!func) return -EINVAL; handler_item = malloc(sizeof(*handler_item)); if (!handler_item) return -ENOMEM; handler_item->func = func; handler_item->msg_type = msg_type; handler_item->ifindex = ifindex; handler_item->priv = priv; list_add_tail(&ndp->msgrcv_handler_list, &handler_item->list); return 0; } /** * ndp_msgrcv_handler_unregister: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Unregisters custom @func handler. * **/ NDP_EXPORT void ndp_msgrcv_handler_unregister(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; handler_item = ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv); if (!handler_item) return; list_del(&handler_item->list); free(handler_item); } /** * SECTION: event fd * @short_description: event filedescriptor related stuff */ /** * ndp_get_eventfd: * @ndp: libndp library context * * Get eventfd filedesctiptor. * * Returns: fd. **/ NDP_EXPORT int ndp_get_eventfd(struct ndp *ndp) { return ndp->sock; } /** * ndp_call_eventfd_handler: * @ndp: libndp library context * * Call eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_call_eventfd_handler(struct ndp *ndp) { return ndp_sock_recv(ndp); } /** * ndp_callall_eventfd_handler: * @ndp: libndp library context * * Call all pending events on eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_callall_eventfd_handler(struct ndp *ndp) { fd_set rfds; int fdmax; struct timeval tv; int fd = ndp_get_eventfd(ndp); int ret; int err; memset(&tv, 0, sizeof(tv)); FD_ZERO(&rfds); FD_SET(fd, &rfds); fdmax = fd + 1; while (true) { ret = select(fdmax, &rfds, NULL, NULL, &tv); if (ret == -1) return -errno; if (!FD_ISSET(fd, &rfds)) return 0; err = ndp_call_eventfd_handler(ndp); if (err) return err; } } /** * SECTION: Exported context functions * @short_description: Core context functions exported to user */ /** * ndp_open: * @p_ndp: pointer where new libndp library context address will be stored * * Allocates and initializes library context, opens raw socket. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_open(struct ndp **p_ndp) { struct ndp *ndp; const char *env; int err; ndp = myzalloc(sizeof(*ndp)); if (!ndp) return -ENOMEM; ndp->log_fn = log_stderr; ndp->log_priority = LOG_ERR; /* environment overwrites config */ env = getenv("NDP_LOG"); if (env != NULL) ndp_set_log_priority(ndp, log_priority(env)); dbg(ndp, "ndp context %p created.", ndp); dbg(ndp, "log_priority=%d", ndp->log_priority); list_init(&ndp->msgrcv_handler_list); err = ndp_sock_open(ndp); if (err) goto free_ndp; *p_ndp = ndp; return 0; free_ndp: free(ndp); return err; } /** * ndp_close: * @ndp: libndp library context * * Do library context cleanup. **/ NDP_EXPORT void ndp_close(struct ndp *ndp) { ndp_sock_close(ndp); free(ndp); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5017_0
crossvul-cpp_data_bad_5198_0
/* * linux/fs/open.c * * Copyright (C) 1991, 1992 Linus Torvalds */ #include <linux/string.h> #include <linux/mm.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/fsnotify.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/namei.h> #include <linux/backing-dev.h> #include <linux/capability.h> #include <linux/securebits.h> #include <linux/security.h> #include <linux/mount.h> #include <linux/fcntl.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/fs.h> #include <linux/personality.h> #include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/rcupdate.h> #include <linux/audit.h> #include <linux/falloc.h> #include <linux/fs_struct.h> #include <linux/ima.h> #include <linux/dnotify.h> #include <linux/compat.h> #include "internal.h" int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, struct file *filp) { int ret; struct iattr newattrs; /* Not pretty: "inode->i_size" shouldn't really be signed. But it is. */ if (length < 0) return -EINVAL; newattrs.ia_size = length; newattrs.ia_valid = ATTR_SIZE | time_attrs; if (filp) { newattrs.ia_file = filp; newattrs.ia_valid |= ATTR_FILE; } /* Remove suid, sgid, and file capabilities on truncate too */ ret = dentry_needs_remove_privs(dentry); if (ret < 0) return ret; if (ret) newattrs.ia_valid |= ret | ATTR_FORCE; inode_lock(dentry->d_inode); /* Note any delegations or leases have already been broken: */ ret = notify_change(dentry, &newattrs, NULL); inode_unlock(dentry->d_inode); return ret; } long vfs_truncate(struct path *path, loff_t length) { struct inode *inode; long error; inode = path->dentry->d_inode; /* For directories it's -EISDIR, for other non-regulars - -EINVAL */ if (S_ISDIR(inode->i_mode)) return -EISDIR; if (!S_ISREG(inode->i_mode)) return -EINVAL; error = mnt_want_write(path->mnt); if (error) goto out; error = inode_permission(inode, MAY_WRITE); if (error) goto mnt_drop_write_and_out; error = -EPERM; if (IS_APPEND(inode)) goto mnt_drop_write_and_out; error = get_write_access(inode); if (error) goto mnt_drop_write_and_out; /* * Make sure that there are no leases. get_write_access() protects * against the truncate racing with a lease-granting setlease(). */ error = break_lease(inode, O_WRONLY); if (error) goto put_write_and_out; error = locks_verify_truncate(inode, NULL, length); if (!error) error = security_path_truncate(path); if (!error) error = do_truncate(path->dentry, length, 0, NULL); put_write_and_out: put_write_access(inode); mnt_drop_write_and_out: mnt_drop_write(path->mnt); out: return error; } EXPORT_SYMBOL_GPL(vfs_truncate); static long do_sys_truncate(const char __user *pathname, loff_t length) { unsigned int lookup_flags = LOOKUP_FOLLOW; struct path path; int error; if (length < 0) /* sorry, but loff_t says... */ return -EINVAL; retry: error = user_path_at(AT_FDCWD, pathname, lookup_flags, &path); if (!error) { error = vfs_truncate(&path, length); path_put(&path); } if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(truncate, const char __user *, path, long, length) { return do_sys_truncate(path, length); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(truncate, const char __user *, path, compat_off_t, length) { return do_sys_truncate(path, length); } #endif static long do_sys_ftruncate(unsigned int fd, loff_t length, int small) { struct inode *inode; struct dentry *dentry; struct fd f; int error; error = -EINVAL; if (length < 0) goto out; error = -EBADF; f = fdget(fd); if (!f.file) goto out; /* explicitly opened as large or we are on 64-bit box */ if (f.file->f_flags & O_LARGEFILE) small = 0; dentry = f.file->f_path.dentry; inode = dentry->d_inode; error = -EINVAL; if (!S_ISREG(inode->i_mode) || !(f.file->f_mode & FMODE_WRITE)) goto out_putf; error = -EINVAL; /* Cannot ftruncate over 2^31 bytes without large file support */ if (small && length > MAX_NON_LFS) goto out_putf; error = -EPERM; if (IS_APPEND(inode)) goto out_putf; sb_start_write(inode->i_sb); error = locks_verify_truncate(inode, f.file, length); if (!error) error = security_path_truncate(&f.file->f_path); if (!error) error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file); sb_end_write(inode->i_sb); out_putf: fdput(f); out: return error; } SYSCALL_DEFINE2(ftruncate, unsigned int, fd, unsigned long, length) { return do_sys_ftruncate(fd, length, 1); } #ifdef CONFIG_COMPAT COMPAT_SYSCALL_DEFINE2(ftruncate, unsigned int, fd, compat_ulong_t, length) { return do_sys_ftruncate(fd, length, 1); } #endif /* LFS versions of truncate are only needed on 32 bit machines */ #if BITS_PER_LONG == 32 SYSCALL_DEFINE2(truncate64, const char __user *, path, loff_t, length) { return do_sys_truncate(path, length); } SYSCALL_DEFINE2(ftruncate64, unsigned int, fd, loff_t, length) { return do_sys_ftruncate(fd, length, 0); } #endif /* BITS_PER_LONG == 32 */ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) { struct inode *inode = file_inode(file); long ret; if (offset < 0 || len <= 0) return -EINVAL; /* Return error if mode is not supported */ if (mode & ~FALLOC_FL_SUPPORTED_MASK) return -EOPNOTSUPP; /* Punch hole and zero range are mutually exclusive */ if ((mode & (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) == (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; /* Punch hole must have keep size set */ if ((mode & FALLOC_FL_PUNCH_HOLE) && !(mode & FALLOC_FL_KEEP_SIZE)) return -EOPNOTSUPP; /* Collapse range should only be used exclusively. */ if ((mode & FALLOC_FL_COLLAPSE_RANGE) && (mode & ~FALLOC_FL_COLLAPSE_RANGE)) return -EINVAL; /* Insert range should only be used exclusively. */ if ((mode & FALLOC_FL_INSERT_RANGE) && (mode & ~FALLOC_FL_INSERT_RANGE)) return -EINVAL; if (!(file->f_mode & FMODE_WRITE)) return -EBADF; /* * We can only allow pure fallocate on append only files */ if ((mode & ~FALLOC_FL_KEEP_SIZE) && IS_APPEND(inode)) return -EPERM; if (IS_IMMUTABLE(inode)) return -EPERM; /* * We cannot allow any fallocate operation on an active swapfile */ if (IS_SWAPFILE(inode)) return -ETXTBSY; /* * Revalidate the write permissions, in case security policy has * changed since the files were opened. */ ret = security_file_permission(file, MAY_WRITE); if (ret) return ret; if (S_ISFIFO(inode->i_mode)) return -ESPIPE; /* * Let individual file system decide if it supports preallocation * for directories or not. */ if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ if (((offset + len) > inode->i_sb->s_maxbytes) || ((offset + len) < 0)) return -EFBIG; if (!file->f_op->fallocate) return -EOPNOTSUPP; sb_start_write(inode->i_sb); ret = file->f_op->fallocate(file, mode, offset, len); /* * Create inotify and fanotify events. * * To keep the logic simple always create events if fallocate succeeds. * This implies that events are even created if the file size remains * unchanged, e.g. when using flag FALLOC_FL_KEEP_SIZE. */ if (ret == 0) fsnotify_modify(file); sb_end_write(inode->i_sb); return ret; } EXPORT_SYMBOL_GPL(vfs_fallocate); SYSCALL_DEFINE4(fallocate, int, fd, int, mode, loff_t, offset, loff_t, len) { struct fd f = fdget(fd); int error = -EBADF; if (f.file) { error = vfs_fallocate(f.file, mode, offset, len); fdput(f); } return error; } /* * access() needs to use the real uid/gid, not the effective uid/gid. * We do this by temporarily clearing all FS-related capabilities and * switching the fsuid/fsgid around to the real ones. */ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode) { const struct cred *old_cred; struct cred *override_cred; struct path path; struct inode *inode; int res; unsigned int lookup_flags = LOOKUP_FOLLOW; if (mode & ~S_IRWXO) /* where's F_OK, X_OK, W_OK, R_OK? */ return -EINVAL; override_cred = prepare_creds(); if (!override_cred) return -ENOMEM; override_cred->fsuid = override_cred->uid; override_cred->fsgid = override_cred->gid; if (!issecure(SECURE_NO_SETUID_FIXUP)) { /* Clear the capabilities if we switch to a non-root user */ kuid_t root_uid = make_kuid(override_cred->user_ns, 0); if (!uid_eq(override_cred->uid, root_uid)) cap_clear(override_cred->cap_effective); else override_cred->cap_effective = override_cred->cap_permitted; } old_cred = override_creds(override_cred); retry: res = user_path_at(dfd, filename, lookup_flags, &path); if (res) goto out; inode = d_backing_inode(path.dentry); if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) { /* * MAY_EXEC on regular files is denied if the fs is mounted * with the "noexec" flag. */ res = -EACCES; if (path_noexec(&path)) goto out_path_release; } res = inode_permission(inode, mode | MAY_ACCESS); /* SuS v2 requires we report a read only fs too */ if (res || !(mode & S_IWOTH) || special_file(inode->i_mode)) goto out_path_release; /* * This is a rare case where using __mnt_is_readonly() * is OK without a mnt_want/drop_write() pair. Since * no actual write to the fs is performed here, we do * not need to telegraph to that to anyone. * * By doing this, we accept that this access is * inherently racy and know that the fs may change * state before we even see this result. */ if (__mnt_is_readonly(path.mnt)) res = -EROFS; out_path_release: path_put(&path); if (retry_estale(res, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: revert_creds(old_cred); put_cred(override_cred); return res; } SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) { return sys_faccessat(AT_FDCWD, filename, mode); } SYSCALL_DEFINE1(chdir, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; set_fs_pwd(current->fs, &path); dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE1(fchdir, unsigned int, fd) { struct fd f = fdget_raw(fd); struct inode *inode; int error = -EBADF; error = -EBADF; if (!f.file) goto out; inode = file_inode(f.file); error = -ENOTDIR; if (!S_ISDIR(inode->i_mode)) goto out_putf; error = inode_permission(inode, MAY_EXEC | MAY_CHDIR); if (!error) set_fs_pwd(current->fs, &f.file->f_path); out_putf: fdput(f); out: return error; } SYSCALL_DEFINE1(chroot, const char __user *, filename) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; retry: error = user_path_at(AT_FDCWD, filename, lookup_flags, &path); if (error) goto out; error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); if (error) goto dput_and_out; error = -EPERM; if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT)) goto dput_and_out; error = security_path_chroot(&path); if (error) goto dput_and_out; set_fs_root(current->fs, &path); error = 0; dput_and_out: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } static int chmod_common(struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; struct iattr newattrs; int error; error = mnt_want_write(path->mnt); if (error) return error; retry_deleg: inode_lock(inode); error = security_path_chmod(path, mode); if (error) goto out_unlock; newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; error = notify_change(path->dentry, &newattrs, &delegated_inode); out_unlock: inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path->mnt); return error; } SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) { struct fd f = fdget(fd); int err = -EBADF; if (f.file) { audit_file(f.file); err = chmod_common(&f.file->f_path, mode); fdput(f); } return err; } SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, umode_t, mode) { struct path path; int error; unsigned int lookup_flags = LOOKUP_FOLLOW; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (!error) { error = chmod_common(&path, mode); path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } } return error; } SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) { return sys_fchmodat(AT_FDCWD, filename, mode); } static int chown_common(struct path *path, uid_t user, gid_t group) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; int error; struct iattr newattrs; kuid_t uid; kgid_t gid; uid = make_kuid(current_user_ns(), user); gid = make_kgid(current_user_ns(), group); retry_deleg: newattrs.ia_valid = ATTR_CTIME; if (user != (uid_t) -1) { if (!uid_valid(uid)) return -EINVAL; newattrs.ia_valid |= ATTR_UID; newattrs.ia_uid = uid; } if (group != (gid_t) -1) { if (!gid_valid(gid)) return -EINVAL; newattrs.ia_valid |= ATTR_GID; newattrs.ia_gid = gid; } if (!S_ISDIR(inode->i_mode)) newattrs.ia_valid |= ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV; inode_lock(inode); error = security_path_chown(path, uid, gid); if (!error) error = notify_change(path->dentry, &newattrs, &delegated_inode); inode_unlock(inode); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } return error; } SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user, gid_t, group, int, flag) { struct path path; int error = -EINVAL; int lookup_flags; if ((flag & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0) goto out; lookup_flags = (flag & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; if (flag & AT_EMPTY_PATH) lookup_flags |= LOOKUP_EMPTY; retry: error = user_path_at(dfd, filename, lookup_flags, &path); if (error) goto out; error = mnt_want_write(path.mnt); if (error) goto out_release; error = chown_common(&path, user, group); mnt_drop_write(path.mnt); out_release: path_put(&path); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out: return error; } SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group) { return sys_fchownat(AT_FDCWD, filename, user, group, 0); } SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group) { return sys_fchownat(AT_FDCWD, filename, user, group, AT_SYMLINK_NOFOLLOW); } SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group) { struct fd f = fdget(fd); int error = -EBADF; if (!f.file) goto out; error = mnt_want_write_file(f.file); if (error) goto out_fput; audit_file(f.file); error = chown_common(&f.file->f_path, user, group); mnt_drop_write_file(f.file); out_fput: fdput(f); out: return error; } int open_check_o_direct(struct file *f) { /* NB: we're sure to have correct a_ops only after f_op->open */ if (f->f_flags & O_DIRECT) { if (!f->f_mapping->a_ops || !f->f_mapping->a_ops->direct_IO) return -EINVAL; } return 0; } static int do_dentry_open(struct file *f, struct inode *inode, int (*open)(struct inode *, struct file *), const struct cred *cred) { static const struct file_operations empty_fops = {}; int error; f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; path_get(&f->f_path); f->f_inode = inode; f->f_mapping = inode->i_mapping; if (unlikely(f->f_flags & O_PATH)) { f->f_mode = FMODE_PATH; f->f_op = &empty_fops; return 0; } if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { error = get_write_access(inode); if (unlikely(error)) goto cleanup_file; error = __mnt_want_write(f->f_path.mnt); if (unlikely(error)) { put_write_access(inode); goto cleanup_file; } f->f_mode |= FMODE_WRITER; } /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ if (S_ISREG(inode->i_mode)) f->f_mode |= FMODE_ATOMIC_POS; f->f_op = fops_get(inode->i_fop); if (unlikely(WARN_ON(!f->f_op))) { error = -ENODEV; goto cleanup_all; } error = security_file_open(f, cred); if (error) goto cleanup_all; error = break_lease(inode, f->f_flags); if (error) goto cleanup_all; if (!open) open = f->f_op->open; if (open) { error = open(inode, f); if (error) goto cleanup_all; } if ((f->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) i_readcount_inc(inode); if ((f->f_mode & FMODE_READ) && likely(f->f_op->read || f->f_op->read_iter)) f->f_mode |= FMODE_CAN_READ; if ((f->f_mode & FMODE_WRITE) && likely(f->f_op->write || f->f_op->write_iter)) f->f_mode |= FMODE_CAN_WRITE; f->f_flags &= ~(O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC); file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); return 0; cleanup_all: fops_put(f->f_op); if (f->f_mode & FMODE_WRITER) { put_write_access(inode); __mnt_drop_write(f->f_path.mnt); } cleanup_file: path_put(&f->f_path); f->f_path.mnt = NULL; f->f_path.dentry = NULL; f->f_inode = NULL; return error; } /** * finish_open - finish opening a file * @file: file pointer * @dentry: pointer to dentry * @open: open callback * @opened: state of open * * This can be used to finish opening a file passed to i_op->atomic_open(). * * If the open callback is set to NULL, then the standard f_op->open() * filesystem callback is substituted. * * NB: the dentry reference is _not_ consumed. If, for example, the dentry is * the return value of d_splice_alias(), then the caller needs to perform dput() * on it after finish_open(). * * On successful return @file is a fully instantiated open file. After this, if * an error occurs in ->atomic_open(), it needs to clean up with fput(). * * Returns zero on success or -errno if the open failed. */ int finish_open(struct file *file, struct dentry *dentry, int (*open)(struct inode *, struct file *), int *opened) { int error; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ file->f_path.dentry = dentry; error = do_dentry_open(file, d_backing_inode(dentry), open, current_cred()); if (!error) *opened |= FILE_OPENED; return error; } EXPORT_SYMBOL(finish_open); /** * finish_no_open - finish ->atomic_open() without opening the file * * @file: file pointer * @dentry: dentry or NULL (as returned from ->lookup()) * * This can be used to set the result of a successful lookup in ->atomic_open(). * * NB: unlike finish_open() this function does consume the dentry reference and * the caller need not dput() it. * * Returns "1" which must be the return value of ->atomic_open() after having * called this function. */ int finish_no_open(struct file *file, struct dentry *dentry) { file->f_path.dentry = dentry; return 1; } EXPORT_SYMBOL(finish_no_open); char *file_path(struct file *filp, char *buf, int buflen) { return d_path(&filp->f_path, buf, buflen); } EXPORT_SYMBOL(file_path); /** * vfs_open - open the file at the given path * @path: path to open * @file: newly allocated file with f_flag initialized * @cred: credentials to use */ int vfs_open(const struct path *path, struct file *file, const struct cred *cred) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; file->f_path = *path; if (dentry->d_flags & DCACHE_OP_SELECT_INODE) { inode = dentry->d_op->d_select_inode(dentry, file->f_flags); if (IS_ERR(inode)) return PTR_ERR(inode); } return do_dentry_open(file, inode, NULL, cred); } struct file *dentry_open(const struct path *path, int flags, const struct cred *cred) { int error; struct file *f; validate_creds(cred); /* We must always pass in a valid mount pointer. */ BUG_ON(!path->mnt); f = get_empty_filp(); if (!IS_ERR(f)) { f->f_flags = flags; error = vfs_open(path, f, cred); if (!error) { /* from now on we need fput() to dispose of f */ error = open_check_o_direct(f); if (error) { fput(f); f = ERR_PTR(error); } } else { put_filp(f); f = ERR_PTR(error); } } return f; } EXPORT_SYMBOL(dentry_open); static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) { int lookup_flags = 0; int acc_mode = ACC_MODE(flags); if (flags & (O_CREAT | __O_TMPFILE)) op->mode = (mode & S_IALLUGO) | S_IFREG; else op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY & ~O_CLOEXEC; /* * O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only * check for O_DSYNC if the need any syncing at all we enforce it's * always set instead of having to deal with possibly weird behaviour * for malicious applications setting only __O_SYNC. */ if (flags & __O_SYNC) flags |= O_DSYNC; if (flags & __O_TMPFILE) { if ((flags & O_TMPFILE_MASK) != O_TMPFILE) return -EINVAL; if (!(acc_mode & MAY_WRITE)) return -EINVAL; } else if (flags & O_PATH) { /* * If we have O_PATH in the open flag. Then we * cannot have anything other than the below set of flags */ flags &= O_DIRECTORY | O_NOFOLLOW | O_PATH; acc_mode = 0; } op->open_flag = flags; /* O_TRUNC implies we need access checks for write permissions */ if (flags & O_TRUNC) acc_mode |= MAY_WRITE; /* Allow the LSM permission hook to distinguish append access from general write access. */ if (flags & O_APPEND) acc_mode |= MAY_APPEND; op->acc_mode = acc_mode; op->intent = flags & O_PATH ? 0 : LOOKUP_OPEN; if (flags & O_CREAT) { op->intent |= LOOKUP_CREATE; if (flags & O_EXCL) op->intent |= LOOKUP_EXCL; } if (flags & O_DIRECTORY) lookup_flags |= LOOKUP_DIRECTORY; if (!(flags & O_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; op->lookup_flags = lookup_flags; return 0; } /** * file_open_name - open file and return file pointer * * @name: struct filename containing path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *file_open_name(struct filename *name, int flags, umode_t mode) { struct open_flags op; int err = build_open_flags(flags, mode, &op); return err ? ERR_PTR(err) : do_filp_open(AT_FDCWD, name, &op); } /** * filp_open - open file and return file pointer * * @filename: path to open * @flags: open flags as per the open(2) second argument * @mode: mode for the new file if O_CREAT is set, else ignored * * This is the helper to open a file from kernelspace if you really * have to. But in generally you should not do this, so please move * along, nothing to see here.. */ struct file *filp_open(const char *filename, int flags, umode_t mode) { struct filename *name = getname_kernel(filename); struct file *file = ERR_CAST(name); if (!IS_ERR(name)) { file = file_open_name(name, flags, mode); putname(name); } return file; } EXPORT_SYMBOL(filp_open); struct file *file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *filename, int flags, umode_t mode) { struct open_flags op; int err = build_open_flags(flags, mode, &op); if (err) return ERR_PTR(err); return do_file_open_root(dentry, mnt, filename, &op); } EXPORT_SYMBOL(file_open_root); long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) { struct open_flags op; int fd = build_open_flags(flags, mode, &op); struct filename *tmp; if (fd) return fd; tmp = getname(filename); if (IS_ERR(tmp)) return PTR_ERR(tmp); fd = get_unused_fd_flags(flags); if (fd >= 0) { struct file *f = do_filp_open(dfd, tmp, &op); if (IS_ERR(f)) { put_unused_fd(fd); fd = PTR_ERR(f); } else { fsnotify_open(f); fd_install(fd, f); } } putname(tmp); return fd; } SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(AT_FDCWD, filename, flags, mode); } SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, umode_t, mode) { if (force_o_largefile()) flags |= O_LARGEFILE; return do_sys_open(dfd, filename, flags, mode); } #ifndef __alpha__ /* * For backward compatibility? Maybe this should be moved * into arch/i386 instead? */ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { return sys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode); } #endif /* * "id" is the POSIX thread ID. We use the * files pointer for this.. */ int filp_close(struct file *filp, fl_owner_t id) { int retval = 0; if (!file_count(filp)) { printk(KERN_ERR "VFS: Close: file count is 0\n"); return 0; } if (filp->f_op->flush) retval = filp->f_op->flush(filp, id); if (likely(!(filp->f_mode & FMODE_PATH))) { dnotify_flush(filp, id); locks_remove_posix(filp, id); } fput(filp); return retval; } EXPORT_SYMBOL(filp_close); /* * Careful here! We test whether the file pointer is NULL before * releasing the fd. This ensures that one clone task can't release * an fd while another clone is opening it. */ SYSCALL_DEFINE1(close, unsigned int, fd) { int retval = __close_fd(current->files, fd); /* can't restart close syscall because file table entry was cleared */ if (unlikely(retval == -ERESTARTSYS || retval == -ERESTARTNOINTR || retval == -ERESTARTNOHAND || retval == -ERESTART_RESTARTBLOCK)) retval = -EINTR; return retval; } EXPORT_SYMBOL(sys_close); /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. */ SYSCALL_DEFINE0(vhangup) { if (capable(CAP_SYS_TTY_CONFIG)) { tty_vhangup_self(); return 0; } return -EPERM; } /* * Called when an inode is about to be open. * We use this to disallow opening large files on 32bit systems if * the caller didn't specify O_LARGEFILE. On 64bit systems we force * on this flag in sys_open. */ int generic_file_open(struct inode * inode, struct file * filp) { if (!(filp->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) return -EOVERFLOW; return 0; } EXPORT_SYMBOL(generic_file_open); /* * This is used by subsystems that don't want seekable * file descriptors. The function is not supposed to ever fail, the only * reason it returns an 'int' and not 'void' is so that it can be plugged * directly into file_operations structure. */ int nonseekable_open(struct inode *inode, struct file *filp) { filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); return 0; } EXPORT_SYMBOL(nonseekable_open);
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5198_0
crossvul-cpp_data_bad_4896_1
/* * Process version 3 NFSACL requests. * * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> */ #include "nfsd.h" /* FIXME: nfsacl.h is a broken header */ #include <linux/nfsacl.h> #include <linux/gfp.h> #include "cache.h" #include "xdr3.h" #include "vfs.h" #define RETURN_STATUS(st) { resp->status = (st); return (st); } /* * NULL call. */ static __be32 nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { return nfs_ok; } /* * Get the Access and/or Default ACL of a file. */ static __be32 nfsd3_proc_getacl(struct svc_rqst * rqstp, struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp) { struct posix_acl *acl; struct inode *inode; svc_fh *fh; __be32 nfserr = 0; fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); if (nfserr) RETURN_STATUS(nfserr); inode = d_inode(fh->fh_dentry); if (argp->mask & ~NFS_ACL_MASK) RETURN_STATUS(nfserr_inval); resp->mask = argp->mask; if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { acl = get_acl(inode, ACL_TYPE_ACCESS); if (acl == NULL) { /* Solaris returns the inode's minimum ACL. */ acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); } if (IS_ERR(acl)) { nfserr = nfserrno(PTR_ERR(acl)); goto fail; } resp->acl_access = acl; } if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { /* Check how Solaris handles requests for the Default ACL of a non-directory! */ acl = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) { nfserr = nfserrno(PTR_ERR(acl)); goto fail; } resp->acl_default = acl; } /* resp->acl_{access,default} are released in nfs3svc_release_getacl. */ RETURN_STATUS(0); fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); RETURN_STATUS(nfserr); } /* * Set the Access and/or Default ACL of a file. */ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp, struct nfsd3_setaclargs *argp, struct nfsd3_attrstat *resp) { struct inode *inode; svc_fh *fh; __be32 nfserr = 0; int error; fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR); if (nfserr) goto out; inode = d_inode(fh->fh_dentry); if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { error = -EOPNOTSUPP; goto out_errno; } error = fh_want_write(fh); if (error) goto out_errno; error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); if (error) goto out_drop_write; error = inode->i_op->set_acl(inode, argp->acl_default, ACL_TYPE_DEFAULT); out_drop_write: fh_drop_write(fh); out_errno: nfserr = nfserrno(error); out: /* argp->acl_{access,default} may have been allocated in nfs3svc_decode_setaclargs. */ posix_acl_release(argp->acl_access); posix_acl_release(argp->acl_default); RETURN_STATUS(nfserr); } /* * XDR decode functions */ static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclargs *args) { p = nfs3svc_decode_fh(p, &args->fh); if (!p) return 0; args->mask = ntohl(*p); p++; return xdr_argsize_check(rqstp, p); } static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_setaclargs *args) { struct kvec *head = rqstp->rq_arg.head; unsigned int base; int n; p = nfs3svc_decode_fh(p, &args->fh); if (!p) return 0; args->mask = ntohl(*p++); if (args->mask & ~NFS_ACL_MASK || !xdr_argsize_check(rqstp, p)) return 0; base = (char *)p - (char *)head->iov_base; n = nfsacl_decode(&rqstp->rq_arg, base, NULL, (args->mask & NFS_ACL) ? &args->acl_access : NULL); if (n > 0) n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, (args->mask & NFS_DFACL) ? &args->acl_default : NULL); return (n > 0); } /* * XDR encode functions */ /* GETACL */ static int nfs3svc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { struct dentry *dentry = resp->fh.fh_dentry; p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh); if (resp->status == 0 && dentry && d_really_is_positive(dentry)) { struct inode *inode = d_inode(dentry); struct kvec *head = rqstp->rq_res.head; unsigned int base; int n; int w; *p++ = htonl(resp->mask); if (!xdr_ressize_check(rqstp, p)) return 0; base = (char *)p - (char *)head->iov_base; rqstp->rq_res.page_len = w = nfsacl_size( (resp->mask & NFS_ACL) ? resp->acl_access : NULL, (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); while (w > 0) { if (!*(rqstp->rq_next_page++)) return 0; w -= PAGE_SIZE; } n = nfsacl_encode(&rqstp->rq_res, base, inode, resp->acl_access, resp->mask & NFS_ACL, 0); if (n > 0) n = nfsacl_encode(&rqstp->rq_res, base + n, inode, resp->acl_default, resp->mask & NFS_DFACL, NFS_ACL_DEFAULT); if (n <= 0) return 0; } else if (!xdr_ressize_check(rqstp, p)) return 0; return 1; } /* SETACL */ static int nfs3svc_encode_setaclres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_attrstat *resp) { p = nfs3svc_encode_post_op_attr(rqstp, p, &resp->fh); return xdr_ressize_check(rqstp, p); } /* * XDR release functions */ static int nfs3svc_release_getacl(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { fh_put(&resp->fh); posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); return 1; } #define nfs3svc_decode_voidargs NULL #define nfs3svc_release_void NULL #define nfsd3_setaclres nfsd3_attrstat #define nfsd3_voidres nfsd3_voidargs struct nfsd3_voidargs { int dummy; }; #define PROC(name, argt, rest, relt, cache, respsize) \ { (svc_procfunc) nfsd3_proc_##name, \ (kxdrproc_t) nfs3svc_decode_##argt##args, \ (kxdrproc_t) nfs3svc_encode_##rest##res, \ (kxdrproc_t) nfs3svc_release_##relt, \ sizeof(struct nfsd3_##argt##args), \ sizeof(struct nfsd3_##rest##res), \ 0, \ cache, \ respsize, \ } #define ST 1 /* status*/ #define AT 21 /* attributes */ #define pAT (1+AT) /* post attributes - conditional */ #define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */ static struct svc_procedure nfsd_acl_procedures3[] = { PROC(null, void, void, void, RC_NOCACHE, ST), PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)), PROC(setacl, setacl, setacl, fhandle, RC_NOCACHE, ST+pAT), }; struct svc_version nfsd_acl_version3 = { .vs_vers = 3, .vs_nproc = 3, .vs_proc = nfsd_acl_procedures3, .vs_dispatch = nfsd_dispatch, .vs_xdrsize = NFS3_SVC_XDRSIZE, .vs_hidden = 0, };
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4896_1
crossvul-cpp_data_good_5093_0
/* * socket.c * * Copyright (C) 2012 Martin Szulecki <m.szulecki@libimobiledevice.org> * Copyright (C) 2012 Nikias Bassen <nikias@gmx.li> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <sys/stat.h> #ifdef WIN32 #include <winsock2.h> #include <windows.h> static int wsa_init = 0; #else #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <netdb.h> #include <arpa/inet.h> #endif #include "socket.h" #define RECV_TIMEOUT 20000 static int verbose = 0; void socket_set_verbose(int level) { verbose = level; } #ifndef WIN32 int socket_create_unix(const char *filename) { struct sockaddr_un name; int sock; size_t size; #ifdef SO_NOSIGPIPE int yes = 1; #endif // remove if still present unlink(filename); /* Create the socket. */ sock = socket(PF_LOCAL, SOCK_STREAM, 0); if (sock < 0) { perror("socket"); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sock, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sock); return -1; } #endif /* Bind a name to the socket. */ name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = '\0'; /* The size of the address is the offset of the start of the filename, plus its length, plus one for the terminating null byte. Alternatively you can just do: size = SUN_LEN (&name); */ size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (bind(sock, (struct sockaddr *) &name, size) < 0) { perror("bind"); socket_close(sock); return -1; } if (listen(sock, 10) < 0) { perror("listen"); socket_close(sock); return -1; } return sock; } int socket_connect_unix(const char *filename) { struct sockaddr_un name; int sfd = -1; size_t size; struct stat fst; #ifdef SO_NOSIGPIPE int yes = 1; #endif // check if socket file exists... if (stat(filename, &fst) != 0) { if (verbose >= 2) fprintf(stderr, "%s: stat '%s': %s\n", __func__, filename, strerror(errno)); return -1; } // ... and if it is a unix domain socket if (!S_ISSOCK(fst.st_mode)) { if (verbose >= 2) fprintf(stderr, "%s: File '%s' is not a socket!\n", __func__, filename); return -1; } // make a new socket if ((sfd = socket(PF_LOCAL, SOCK_STREAM, 0)) < 0) { if (verbose >= 2) fprintf(stderr, "%s: socket: %s\n", __func__, strerror(errno)); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #endif // and connect to 'filename' name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = 0; size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (connect(sfd, (struct sockaddr *) &name, size) < 0) { socket_close(sfd); if (verbose >= 2) fprintf(stderr, "%s: connect: %s\n", __func__, strerror(errno)); return -1; } return sfd; } #endif int socket_create(uint16_t port) { int sfd = -1; int yes = 1; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif struct sockaddr_in saddr; if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #endif memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); saddr.sin_port = htons(port); if (0 > bind(sfd, (struct sockaddr *) &saddr, sizeof(saddr))) { perror("bind()"); socket_close(sfd); return -1; } if (listen(sfd, 1) == -1) { perror("listen()"); socket_close(sfd); return -1; } return sfd; } int socket_connect(const char *addr, uint16_t port) { int sfd = -1; int yes = 1; struct hostent *hp; struct sockaddr_in saddr; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif if (!addr) { errno = EINVAL; return -1; } if ((hp = gethostbyname(addr)) == NULL) { if (verbose >= 2) fprintf(stderr, "%s: unknown host '%s'\n", __func__, addr); return -1; } if (!hp->h_addr) { if (verbose >= 2) fprintf(stderr, "%s: gethostbyname returned NULL address!\n", __func__); return -1; } if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #ifdef SO_NOSIGPIPE if (setsockopt(sfd, SOL_SOCKET, SO_NOSIGPIPE, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } #endif memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = *(uint32_t *) hp->h_addr; saddr.sin_port = htons(port); if (connect(sfd, (struct sockaddr *) &saddr, sizeof(saddr)) < 0) { perror("connect"); socket_close(sfd); return -2; } return sfd; } int socket_check_fd(int fd, fd_mode fdm, unsigned int timeout) { fd_set fds; int sret; int eagain; struct timeval to; struct timeval *pto; if (fd < 0) { if (verbose >= 2) fprintf(stderr, "ERROR: invalid fd in check_fd %d\n", fd); return -1; } FD_ZERO(&fds); FD_SET(fd, &fds); if (timeout > 0) { to.tv_sec = (time_t) (timeout / 1000); to.tv_usec = (time_t) ((timeout - (to.tv_sec * 1000)) * 1000); pto = &to; } else { pto = NULL; } sret = -1; do { eagain = 0; switch (fdm) { case FDM_READ: sret = select(fd + 1, &fds, NULL, NULL, pto); break; case FDM_WRITE: sret = select(fd + 1, NULL, &fds, NULL, pto); break; case FDM_EXCEPT: sret = select(fd + 1, NULL, NULL, &fds, pto); break; default: return -1; } if (sret < 0) { switch (errno) { case EINTR: // interrupt signal in select if (verbose >= 2) fprintf(stderr, "%s: EINTR\n", __func__); eagain = 1; break; case EAGAIN: if (verbose >= 2) fprintf(stderr, "%s: EAGAIN\n", __func__); break; default: if (verbose >= 2) fprintf(stderr, "%s: select failed: %s\n", __func__, strerror(errno)); return -1; } } } while (eagain); return sret; } int socket_accept(int fd, uint16_t port) { #ifdef WIN32 int addr_len; #else socklen_t addr_len; #endif int result; struct sockaddr_in addr; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); addr.sin_port = htons(port); addr_len = sizeof(addr); result = accept(fd, (struct sockaddr*)&addr, &addr_len); return result; } int socket_shutdown(int fd, int how) { return shutdown(fd, how); } int socket_close(int fd) { #ifdef WIN32 return closesocket(fd); #else return close(fd); #endif } int socket_receive(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, 0, RECV_TIMEOUT); } int socket_peek(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, MSG_PEEK, RECV_TIMEOUT); } int socket_receive_timeout(int fd, void *data, size_t length, int flags, unsigned int timeout) { int res; int result; // check if data is available res = socket_check_fd(fd, FDM_READ, timeout); if (res <= 0) { return res; } // if we get here, there _is_ data available result = recv(fd, data, length, flags); if (res > 0 && result == 0) { // but this is an error condition if (verbose >= 3) fprintf(stderr, "%s: fd=%d recv returned 0\n", __func__, fd); return -EAGAIN; } if (result < 0) { return -errno; } return result; } int socket_send(int fd, void *data, size_t length) { int flags = 0; #ifdef MSG_NOSIGNAL flags |= MSG_NOSIGNAL; #endif return send(fd, data, length, flags); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5093_0
crossvul-cpp_data_bad_4786_0
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M EEEEE M M OOO RRRR Y Y % % MM MM E MM MM O O R R Y Y % % M M M EEE M M M O O RRRR Y % % M M E M M O O R R Y % % M M EEEEE M M OOO R R Y % % % % % % MagickCore Memory Allocation Methods % % % % Software Design % % Cristy % % July 1998 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segregate our memory requirements from any program that calls our API. This % should help reduce the risk of others changing our program state or causing % memory corruption. % % Our custom memory allocation manager implements a best-fit allocation policy % using segregated free lists. It uses a linear distribution of size classes % for lower sizes and a power of two distribution of size classes at higher % sizes. It is based on the paper, "Fast Memory Allocation using Lazy Fits." % written by Yoo C. Chung. % % By default, ANSI memory methods are called (e.g. malloc). Use the % custom memory allocator by defining MAGICKCORE_ZERO_CONFIGURATION_SUPPORT % to allocate memory with private anonymous mapping rather than from the % heap. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/string_.h" #include "MagickCore/utility-private.h" /* Define declarations. */ #define BlockFooter(block,size) \ ((size_t *) ((char *) (block)+(size)-2*sizeof(size_t))) #define BlockHeader(block) ((size_t *) (block)-1) #define BlockSize 4096 #define BlockThreshold 1024 #define MaxBlockExponent 16 #define MaxBlocks ((BlockThreshold/(4*sizeof(size_t)))+MaxBlockExponent+1) #define MaxSegments 1024 #define MemoryGuard ((0xdeadbeef << 31)+0xdeafdeed) #define NextBlock(block) ((char *) (block)+SizeOfBlock(block)) #define NextBlockInList(block) (*(void **) (block)) #define PreviousBlock(block) ((char *) (block)-(*((size_t *) (block)-2))) #define PreviousBlockBit 0x01 #define PreviousBlockInList(block) (*((void **) (block)+1)) #define SegmentSize (2*1024*1024) #define SizeMask (~0x01) #define SizeOfBlock(block) (*BlockHeader(block) & SizeMask) /* Typedef declarations. */ typedef enum { UndefinedVirtualMemory, AlignedVirtualMemory, MapVirtualMemory, UnalignedVirtualMemory } VirtualMemoryType; typedef struct _DataSegmentInfo { void *allocation, *bound; MagickBooleanType mapped; size_t length; struct _DataSegmentInfo *previous, *next; } DataSegmentInfo; typedef struct _MagickMemoryMethods { AcquireMemoryHandler acquire_memory_handler; ResizeMemoryHandler resize_memory_handler; DestroyMemoryHandler destroy_memory_handler; } MagickMemoryMethods; struct _MemoryInfo { char filename[MagickPathExtent]; VirtualMemoryType type; size_t length; void *blob; size_t signature; }; typedef struct _MemoryPool { size_t allocation; void *blocks[MaxBlocks+1]; size_t number_segments; DataSegmentInfo *segments[MaxSegments], segment_pool[MaxSegments]; } MemoryPool; /* Global declarations. */ #if defined _MSC_VER static void* MSCMalloc(size_t size) { return malloc(size); } static void* MSCRealloc(void* ptr, size_t size) { return realloc(ptr, size); } static void MSCFree(void* ptr) { free(ptr); } #endif static MagickMemoryMethods memory_methods = { #if defined _MSC_VER (AcquireMemoryHandler) MSCMalloc, (ResizeMemoryHandler) MSCRealloc, (DestroyMemoryHandler) MSCFree #else (AcquireMemoryHandler) malloc, (ResizeMemoryHandler) realloc, (DestroyMemoryHandler) free #endif }; #if defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) static MemoryPool memory_pool; static SemaphoreInfo *memory_semaphore = (SemaphoreInfo *) NULL; static volatile DataSegmentInfo *free_segments = (DataSegmentInfo *) NULL; /* Forward declarations. */ static MagickBooleanType ExpandHeap(size_t); #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e A l i g n e d M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireAlignedMemory() returns a pointer to a block of memory at least size % bytes whose address is a multiple of 16*sizeof(void *). % % The format of the AcquireAlignedMemory method is: % % void *AcquireAlignedMemory(const size_t count,const size_t quantum) % % A description of each parameter follows: % % o count: the number of quantum elements to allocate. % % o quantum: the number of bytes in each quantum. % */ static MagickBooleanType CheckMemoryOverflow(const size_t count, const size_t quantum) { size_t size; size=count*quantum; if ((count == 0) || (quantum != (size/count))) { errno=ENOMEM; return(MagickTrue); } return(MagickFalse); } MagickExport void *AcquireAlignedMemory(const size_t count,const size_t quantum) { #define AlignedExtent(size,alignment) \ (((size)+((alignment)-1)) & ~((alignment)-1)) size_t alignment, extent, size; void *memory; if (CheckMemoryOverflow(count,quantum) != MagickFalse) return((void *) NULL); memory=NULL; alignment=CACHE_LINE_SIZE; size=count*quantum; extent=AlignedExtent(size,alignment); if ((size == 0) || (alignment < sizeof(void *)) || (extent < size)) return((void *) NULL); #if defined(MAGICKCORE_HAVE_POSIX_MEMALIGN) if (posix_memalign(&memory,alignment,extent) != 0) memory=NULL; #elif defined(MAGICKCORE_HAVE__ALIGNED_MALLOC) memory=_aligned_malloc(extent,alignment); #else { void *p; extent=(size+alignment-1)+sizeof(void *); if (extent > size) { p=malloc(extent); if (p != NULL) { memory=(void *) AlignedExtent((size_t) p+sizeof(void *),alignment); *((void **) memory-1)=p; } } } #endif return(memory); } #if defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e B l o c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireBlock() returns a pointer to a block of memory at least size bytes % suitably aligned for any use. % % The format of the AcquireBlock method is: % % void *AcquireBlock(const size_t size) % % A description of each parameter follows: % % o size: the size of the memory in bytes to allocate. % */ static inline size_t AllocationPolicy(size_t size) { register size_t blocksize; /* The linear distribution. */ assert(size != 0); assert(size % (4*sizeof(size_t)) == 0); if (size <= BlockThreshold) return(size/(4*sizeof(size_t))); /* Check for the largest block size. */ if (size > (size_t) (BlockThreshold*(1L << (MaxBlockExponent-1L)))) return(MaxBlocks-1L); /* Otherwise use a power of two distribution. */ blocksize=BlockThreshold/(4*sizeof(size_t)); for ( ; size > BlockThreshold; size/=2) blocksize++; assert(blocksize > (BlockThreshold/(4*sizeof(size_t)))); assert(blocksize < (MaxBlocks-1L)); return(blocksize); } static inline void InsertFreeBlock(void *block,const size_t i) { register void *next, *previous; size_t size; size=SizeOfBlock(block); previous=(void *) NULL; next=memory_pool.blocks[i]; while ((next != (void *) NULL) && (SizeOfBlock(next) < size)) { previous=next; next=NextBlockInList(next); } PreviousBlockInList(block)=previous; NextBlockInList(block)=next; if (previous != (void *) NULL) NextBlockInList(previous)=block; else memory_pool.blocks[i]=block; if (next != (void *) NULL) PreviousBlockInList(next)=block; } static inline void RemoveFreeBlock(void *block,const size_t i) { register void *next, *previous; next=NextBlockInList(block); previous=PreviousBlockInList(block); if (previous == (void *) NULL) memory_pool.blocks[i]=next; else NextBlockInList(previous)=next; if (next != (void *) NULL) PreviousBlockInList(next)=previous; } static void *AcquireBlock(size_t size) { register size_t i; register void *block; /* Find free block. */ size=(size_t) (size+sizeof(size_t)+6*sizeof(size_t)-1) & -(4U*sizeof(size_t)); i=AllocationPolicy(size); block=memory_pool.blocks[i]; while ((block != (void *) NULL) && (SizeOfBlock(block) < size)) block=NextBlockInList(block); if (block == (void *) NULL) { i++; while (memory_pool.blocks[i] == (void *) NULL) i++; block=memory_pool.blocks[i]; if (i >= MaxBlocks) return((void *) NULL); } assert((*BlockHeader(NextBlock(block)) & PreviousBlockBit) == 0); assert(SizeOfBlock(block) >= size); RemoveFreeBlock(block,AllocationPolicy(SizeOfBlock(block))); if (SizeOfBlock(block) > size) { size_t blocksize; void *next; /* Split block. */ next=(char *) block+size; blocksize=SizeOfBlock(block)-size; *BlockHeader(next)=blocksize; *BlockFooter(next,blocksize)=blocksize; InsertFreeBlock(next,AllocationPolicy(blocksize)); *BlockHeader(block)=size | (*BlockHeader(block) & ~SizeMask); } assert(size == SizeOfBlock(block)); *BlockHeader(NextBlock(block))|=PreviousBlockBit; memory_pool.allocation+=size; return(block); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMemory() returns a pointer to a block of memory at least size % bytes suitably aligned for any use. % % The format of the AcquireMagickMemory method is: % % void *AcquireMagickMemory(const size_t size) % % A description of each parameter follows: % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *AcquireMagickMemory(const size_t size) { register void *memory; #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) memory=memory_methods.acquire_memory_handler(size == 0 ? 1UL : size); #else if (memory_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&memory_semaphore); if (free_segments == (DataSegmentInfo *) NULL) { LockSemaphoreInfo(memory_semaphore); if (free_segments == (DataSegmentInfo *) NULL) { register ssize_t i; assert(2*sizeof(size_t) > (size_t) (~SizeMask)); (void) ResetMagickMemory(&memory_pool,0,sizeof(memory_pool)); memory_pool.allocation=SegmentSize; memory_pool.blocks[MaxBlocks]=(void *) (-1); for (i=0; i < MaxSegments; i++) { if (i != 0) memory_pool.segment_pool[i].previous= (&memory_pool.segment_pool[i-1]); if (i != (MaxSegments-1)) memory_pool.segment_pool[i].next=(&memory_pool.segment_pool[i+1]); } free_segments=(&memory_pool.segment_pool[0]); } UnlockSemaphoreInfo(memory_semaphore); } LockSemaphoreInfo(memory_semaphore); memory=AcquireBlock(size == 0 ? 1UL : size); if (memory == (void *) NULL) { if (ExpandHeap(size == 0 ? 1UL : size) != MagickFalse) memory=AcquireBlock(size == 0 ? 1UL : size); } UnlockSemaphoreInfo(memory_semaphore); #endif return(memory); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t u m M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantumMemory() returns a pointer to a block of memory at least % count * quantum bytes suitably aligned for any use. % % The format of the AcquireQuantumMemory method is: % % void *AcquireQuantumMemory(const size_t count,const size_t quantum) % % A description of each parameter follows: % % o count: the number of quantum elements to allocate. % % o quantum: the number of bytes in each quantum. % */ MagickExport void *AcquireQuantumMemory(const size_t count,const size_t quantum) { size_t extent; if (CheckMemoryOverflow(count,quantum) != MagickFalse) return((void *) NULL); extent=count*quantum; return(AcquireMagickMemory(extent)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e V i r t u a l M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireVirtualMemory() allocates a pointer to a block of memory at least size % bytes suitably aligned for any use. % % The format of the AcquireVirtualMemory method is: % % MemoryInfo *AcquireVirtualMemory(const size_t count,const size_t quantum) % % A description of each parameter follows: % % o count: the number of quantum elements to allocate. % % o quantum: the number of bytes in each quantum. % */ MagickExport MemoryInfo *AcquireVirtualMemory(const size_t count, const size_t quantum) { MemoryInfo *memory_info; size_t extent; if (CheckMemoryOverflow(count,quantum) != MagickFalse) return((MemoryInfo *) NULL); memory_info=(MemoryInfo *) MagickAssumeAligned(AcquireAlignedMemory(1, sizeof(*memory_info))); if (memory_info == (MemoryInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(memory_info,0,sizeof(*memory_info)); extent=count*quantum; memory_info->length=extent; memory_info->signature=MagickCoreSignature; if (AcquireMagickResource(MemoryResource,extent) != MagickFalse) { memory_info->blob=AcquireAlignedMemory(1,extent); if (memory_info->blob != NULL) { memory_info->type=AlignedVirtualMemory; return(memory_info); } } RelinquishMagickResource(MemoryResource,extent); if (AcquireMagickResource(MapResource,extent) != MagickFalse) { /* Heap memory failed, try anonymous memory mapping. */ memory_info->blob=MapBlob(-1,IOMode,0,extent); if (memory_info->blob != NULL) { memory_info->type=MapVirtualMemory; return(memory_info); } if (AcquireMagickResource(DiskResource,extent) != MagickFalse) { int file; /* Anonymous memory mapping failed, try file-backed memory mapping. If the MapResource request failed, there is no point in trying file-backed memory mapping. */ file=AcquireUniqueFileResource(memory_info->filename); if (file != -1) { if ((lseek(file,extent-1,SEEK_SET) == (extent-1)) && (write(file,"",1) == 1)) { memory_info->blob=MapBlob(file,IOMode,0,extent); if (memory_info->blob != NULL) { (void) close(file); memory_info->type=MapVirtualMemory; return(memory_info); } } /* File-backed memory mapping failed, delete the temporary file. */ (void) close(file); (void) RelinquishUniqueFileResource(memory_info->filename); *memory_info->filename = '\0'; } } RelinquishMagickResource(DiskResource,extent); } RelinquishMagickResource(MapResource,extent); if (memory_info->blob == NULL) { memory_info->blob=AcquireMagickMemory(extent); if (memory_info->blob != NULL) memory_info->type=UnalignedVirtualMemory; } if (memory_info->blob == NULL) memory_info=RelinquishVirtualMemory(memory_info); return(memory_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y M a g i c k M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyMagickMemory() copies size bytes from memory area source to the % destination. Copying between objects that overlap will take place % correctly. It returns destination. % % The format of the CopyMagickMemory method is: % % void *CopyMagickMemory(void *destination,const void *source, % const size_t size) % % A description of each parameter follows: % % o destination: the destination. % % o source: the source. % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *CopyMagickMemory(void *destination,const void *source, const size_t size) { register const unsigned char *p; register unsigned char *q; assert(destination != (void *) NULL); assert(source != (const void *) NULL); p=(const unsigned char *) source; q=(unsigned char *) destination; if (((q+size) < p) || (q > (p+size))) switch (size) { default: return(memcpy(destination,source,size)); case 8: *q++=(*p++); case 7: *q++=(*p++); case 6: *q++=(*p++); case 5: *q++=(*p++); case 4: *q++=(*p++); case 3: *q++=(*p++); case 2: *q++=(*p++); case 1: *q++=(*p++); case 0: return(destination); } return(memmove(destination,source,size)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y M a g i c k M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagickMemory() deallocates memory associated with the memory manager. % % The format of the DestroyMagickMemory method is: % % DestroyMagickMemory(void) % */ MagickExport void DestroyMagickMemory(void) { #if defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) register ssize_t i; if (memory_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&memory_semaphore); LockSemaphoreInfo(memory_semaphore); for (i=0; i < (ssize_t) memory_pool.number_segments; i++) if (memory_pool.segments[i]->mapped == MagickFalse) memory_methods.destroy_memory_handler( memory_pool.segments[i]->allocation); else (void) UnmapBlob(memory_pool.segments[i]->allocation, memory_pool.segments[i]->length); free_segments=(DataSegmentInfo *) NULL; (void) ResetMagickMemory(&memory_pool,0,sizeof(memory_pool)); UnlockSemaphoreInfo(memory_semaphore); RelinquishSemaphoreInfo(&memory_semaphore); #endif } #if defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + E x p a n d H e a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExpandHeap() get more memory from the system. It returns MagickTrue on % success otherwise MagickFalse. % % The format of the ExpandHeap method is: % % MagickBooleanType ExpandHeap(size_t size) % % A description of each parameter follows: % % o size: the size of the memory in bytes we require. % */ static MagickBooleanType ExpandHeap(size_t size) { DataSegmentInfo *segment_info; MagickBooleanType mapped; register ssize_t i; register void *block; size_t blocksize; void *segment; blocksize=((size+12*sizeof(size_t))+SegmentSize-1) & -SegmentSize; assert(memory_pool.number_segments < MaxSegments); segment=MapBlob(-1,IOMode,0,blocksize); mapped=segment != (void *) NULL ? MagickTrue : MagickFalse; if (segment == (void *) NULL) segment=(void *) memory_methods.acquire_memory_handler(blocksize); if (segment == (void *) NULL) return(MagickFalse); segment_info=(DataSegmentInfo *) free_segments; free_segments=segment_info->next; segment_info->mapped=mapped; segment_info->length=blocksize; segment_info->allocation=segment; segment_info->bound=(char *) segment+blocksize; i=(ssize_t) memory_pool.number_segments-1; for ( ; (i >= 0) && (memory_pool.segments[i]->allocation > segment); i--) memory_pool.segments[i+1]=memory_pool.segments[i]; memory_pool.segments[i+1]=segment_info; memory_pool.number_segments++; size=blocksize-12*sizeof(size_t); block=(char *) segment_info->allocation+4*sizeof(size_t); *BlockHeader(block)=size | PreviousBlockBit; *BlockFooter(block,size)=size; InsertFreeBlock(block,AllocationPolicy(size)); block=NextBlock(block); assert(block < segment_info->bound); *BlockHeader(block)=2*sizeof(size_t); *BlockHeader(NextBlock(block))=PreviousBlockBit; return(MagickTrue); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a g i c k M e m o r y M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickMemoryMethods() gets the methods to acquire, resize, and destroy % memory. % % The format of the GetMagickMemoryMethods() method is: % % void GetMagickMemoryMethods(AcquireMemoryHandler *acquire_memory_handler, % ResizeMemoryHandler *resize_memory_handler, % DestroyMemoryHandler *destroy_memory_handler) % % A description of each parameter follows: % % o acquire_memory_handler: method to acquire memory (e.g. malloc). % % o resize_memory_handler: method to resize memory (e.g. realloc). % % o destroy_memory_handler: method to destroy memory (e.g. free). % */ MagickExport void GetMagickMemoryMethods( AcquireMemoryHandler *acquire_memory_handler, ResizeMemoryHandler *resize_memory_handler, DestroyMemoryHandler *destroy_memory_handler) { assert(acquire_memory_handler != (AcquireMemoryHandler *) NULL); assert(resize_memory_handler != (ResizeMemoryHandler *) NULL); assert(destroy_memory_handler != (DestroyMemoryHandler *) NULL); *acquire_memory_handler=memory_methods.acquire_memory_handler; *resize_memory_handler=memory_methods.resize_memory_handler; *destroy_memory_handler=memory_methods.destroy_memory_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e m o r y B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMemoryBlob() returns the virtual memory blob associated with the % specified MemoryInfo structure. % % The format of the GetVirtualMemoryBlob method is: % % void *GetVirtualMemoryBlob(const MemoryInfo *memory_info) % % A description of each parameter follows: % % o memory_info: The MemoryInfo structure. */ MagickExport void *GetVirtualMemoryBlob(const MemoryInfo *memory_info) { assert(memory_info != (const MemoryInfo *) NULL); assert(memory_info->signature == MagickCoreSignature); return(memory_info->blob); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h A l i g n e d M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishAlignedMemory() frees memory acquired with AcquireAlignedMemory() % or reuse. % % The format of the RelinquishAlignedMemory method is: % % void *RelinquishAlignedMemory(void *memory) % % A description of each parameter follows: % % o memory: A pointer to a block of memory to free for reuse. % */ MagickExport void *RelinquishAlignedMemory(void *memory) { if (memory == (void *) NULL) return((void *) NULL); #if defined(MAGICKCORE_HAVE_POSIX_MEMALIGN) free(memory); #elif defined(MAGICKCORE_HAVE__ALIGNED_MALLOC) _aligned_free(memory); #else free(*((void **) memory-1)); #endif return(NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMemory() frees memory acquired with AcquireMagickMemory() % or AcquireQuantumMemory() for reuse. % % The format of the RelinquishMagickMemory method is: % % void *RelinquishMagickMemory(void *memory) % % A description of each parameter follows: % % o memory: A pointer to a block of memory to free for reuse. % */ MagickExport void *RelinquishMagickMemory(void *memory) { if (memory == (void *) NULL) return((void *) NULL); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) memory_methods.destroy_memory_handler(memory); #else LockSemaphoreInfo(memory_semaphore); assert((SizeOfBlock(memory) % (4*sizeof(size_t))) == 0); assert((*BlockHeader(NextBlock(memory)) & PreviousBlockBit) != 0); if ((*BlockHeader(memory) & PreviousBlockBit) == 0) { void *previous; /* Coalesce with previous adjacent block. */ previous=PreviousBlock(memory); RemoveFreeBlock(previous,AllocationPolicy(SizeOfBlock(previous))); *BlockHeader(previous)=(SizeOfBlock(previous)+SizeOfBlock(memory)) | (*BlockHeader(previous) & ~SizeMask); memory=previous; } if ((*BlockHeader(NextBlock(NextBlock(memory))) & PreviousBlockBit) == 0) { void *next; /* Coalesce with next adjacent block. */ next=NextBlock(memory); RemoveFreeBlock(next,AllocationPolicy(SizeOfBlock(next))); *BlockHeader(memory)=(SizeOfBlock(memory)+SizeOfBlock(next)) | (*BlockHeader(memory) & ~SizeMask); } *BlockFooter(memory,SizeOfBlock(memory))=SizeOfBlock(memory); *BlockHeader(NextBlock(memory))&=(~PreviousBlockBit); InsertFreeBlock(memory,AllocationPolicy(SizeOfBlock(memory))); UnlockSemaphoreInfo(memory_semaphore); #endif return((void *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h V i r t u a l M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishVirtualMemory() frees memory acquired with AcquireVirtualMemory(). % % The format of the RelinquishVirtualMemory method is: % % MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info) % % A description of each parameter follows: % % o memory_info: A pointer to a block of memory to free for reuse. % */ MagickExport MemoryInfo *RelinquishVirtualMemory(MemoryInfo *memory_info) { assert(memory_info != (MemoryInfo *) NULL); assert(memory_info->signature == MagickCoreSignature); if (memory_info->blob != (void *) NULL) switch (memory_info->type) { case AlignedVirtualMemory: { memory_info->blob=RelinquishAlignedMemory(memory_info->blob); RelinquishMagickResource(MemoryResource,memory_info->length); break; } case MapVirtualMemory: { (void) UnmapBlob(memory_info->blob,memory_info->length); memory_info->blob=NULL; RelinquishMagickResource(MapResource,memory_info->length); if (*memory_info->filename != '\0') { (void) RelinquishUniqueFileResource(memory_info->filename); RelinquishMagickResource(DiskResource,memory_info->length); } break; } case UnalignedVirtualMemory: default: { memory_info->blob=RelinquishMagickMemory(memory_info->blob); break; } } memory_info->signature=(~MagickCoreSignature); memory_info=(MemoryInfo *) RelinquishAlignedMemory(memory_info); return(memory_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t M a g i c k M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetMagickMemory() fills the first size bytes of the memory area pointed to % by memory with the constant byte c. % % The format of the ResetMagickMemory method is: % % void *ResetMagickMemory(void *memory,int byte,const size_t size) % % A description of each parameter follows: % % o memory: a pointer to a memory allocation. % % o byte: set the memory to this value. % % o size: size of the memory to reset. % */ MagickExport void *ResetMagickMemory(void *memory,int byte,const size_t size) { assert(memory != (void *) NULL); return(memset(memory,byte,size)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e M a g i c k M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeMagickMemory() changes the size of the memory and returns a pointer to % the (possibly moved) block. The contents will be unchanged up to the % lesser of the new and old sizes. % % The format of the ResizeMagickMemory method is: % % void *ResizeMagickMemory(void *memory,const size_t size) % % A description of each parameter follows: % % o memory: A pointer to a memory allocation. % % o size: the new size of the allocated memory. % */ #if defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) static inline void *ResizeBlock(void *block,size_t size) { register void *memory; if (block == (void *) NULL) return(AcquireBlock(size)); memory=AcquireBlock(size); if (memory == (void *) NULL) return((void *) NULL); if (size <= (SizeOfBlock(block)-sizeof(size_t))) (void) memcpy(memory,block,size); else (void) memcpy(memory,block,SizeOfBlock(block)-sizeof(size_t)); memory_pool.allocation+=size; return(memory); } #endif MagickExport void *ResizeMagickMemory(void *memory,const size_t size) { register void *block; if (memory == (void *) NULL) return(AcquireMagickMemory(size)); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) block=memory_methods.resize_memory_handler(memory,size == 0 ? 1UL : size); if (block == (void *) NULL) memory=RelinquishMagickMemory(memory); #else LockSemaphoreInfo(memory_semaphore); block=ResizeBlock(memory,size == 0 ? 1UL : size); if (block == (void *) NULL) { if (ExpandHeap(size == 0 ? 1UL : size) == MagickFalse) { UnlockSemaphoreInfo(memory_semaphore); memory=RelinquishMagickMemory(memory); ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); } block=ResizeBlock(memory,size == 0 ? 1UL : size); assert(block != (void *) NULL); } UnlockSemaphoreInfo(memory_semaphore); memory=RelinquishMagickMemory(memory); #endif return(block); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s i z e Q u a n t u m M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResizeQuantumMemory() changes the size of the memory and returns a pointer % to the (possibly moved) block. The contents will be unchanged up to the % lesser of the new and old sizes. % % The format of the ResizeQuantumMemory method is: % % void *ResizeQuantumMemory(void *memory,const size_t count, % const size_t quantum) % % A description of each parameter follows: % % o memory: A pointer to a memory allocation. % % o count: the number of quantum elements to allocate. % % o quantum: the number of bytes in each quantum. % */ MagickExport void *ResizeQuantumMemory(void *memory,const size_t count, const size_t quantum) { size_t extent; if (CheckMemoryOverflow(count,quantum) != MagickFalse) { memory=RelinquishMagickMemory(memory); return((void *) NULL); } extent=count*quantum; return(ResizeMagickMemory(memory,extent)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a g i c k M e m o r y M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMagickMemoryMethods() sets the methods to acquire, resize, and destroy % memory. Your custom memory methods must be set prior to the % MagickCoreGenesis() method. % % The format of the SetMagickMemoryMethods() method is: % % SetMagickMemoryMethods(AcquireMemoryHandler acquire_memory_handler, % ResizeMemoryHandler resize_memory_handler, % DestroyMemoryHandler destroy_memory_handler) % % A description of each parameter follows: % % o acquire_memory_handler: method to acquire memory (e.g. malloc). % % o resize_memory_handler: method to resize memory (e.g. realloc). % % o destroy_memory_handler: method to destroy memory (e.g. free). % */ MagickExport void SetMagickMemoryMethods( AcquireMemoryHandler acquire_memory_handler, ResizeMemoryHandler resize_memory_handler, DestroyMemoryHandler destroy_memory_handler) { /* Set memory methods. */ if (acquire_memory_handler != (AcquireMemoryHandler) NULL) memory_methods.acquire_memory_handler=acquire_memory_handler; if (resize_memory_handler != (ResizeMemoryHandler) NULL) memory_methods.resize_memory_handler=resize_memory_handler; if (destroy_memory_handler != (DestroyMemoryHandler) NULL) memory_methods.destroy_memory_handler=destroy_memory_handler; }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4786_0
crossvul-cpp_data_good_2374_0
/* -*- mode: c; c-file-style: "bsd"; indent-tabs-mode: t -*- */ /* * Copyright 1993 OpenVision Technologies, Inc., All Rights Reserved. * */ #include <k5-int.h> #include <gssrpc/rpc.h> #include <gssapi/gssapi_krb5.h> /* for gss_nt_krb5_name */ #include <syslog.h> #include <kadm5/kadm_rpc.h> #include <krb5.h> #include <kadm5/admin.h> #include <adm_proto.h> #include "misc.h" #include "kadm5/server_internal.h" extern void *global_server_handle; static int check_rpcsec_auth(struct svc_req *); /* * Function: kadm_1 * * Purpose: RPC proccessing procedure. * originally generated from rpcgen * * Arguments: * rqstp (input) rpc request structure * transp (input) rpc transport structure * (input/output) * <return value> * * Requires: * Effects: * Modifies: */ void kadm_1(rqstp, transp) struct svc_req *rqstp; register SVCXPRT *transp; { union { cprinc_arg create_principal_2_arg; dprinc_arg delete_principal_2_arg; mprinc_arg modify_principal_2_arg; rprinc_arg rename_principal_2_arg; gprinc_arg get_principal_2_arg; chpass_arg chpass_principal_2_arg; chrand_arg chrand_principal_2_arg; cpol_arg create_policy_2_arg; dpol_arg delete_policy_2_arg; mpol_arg modify_policy_2_arg; gpol_arg get_policy_2_arg; setkey_arg setkey_principal_2_arg; setv4key_arg setv4key_principal_2_arg; cprinc3_arg create_principal3_2_arg; chpass3_arg chpass_principal3_2_arg; chrand3_arg chrand_principal3_2_arg; setkey3_arg setkey_principal3_2_arg; } argument; char *result; bool_t (*xdr_argument)(), (*xdr_result)(); char *(*local)(); if (rqstp->rq_cred.oa_flavor != AUTH_GSSAPI && !check_rpcsec_auth(rqstp)) { krb5_klog_syslog(LOG_ERR, "Authentication attempt failed: %s, " "RPC authentication flavor %d", client_addr(rqstp->rq_xprt), rqstp->rq_cred.oa_flavor); svcerr_weakauth(transp); return; } switch (rqstp->rq_proc) { case NULLPROC: (void) svc_sendreply(transp, xdr_void, (char *)NULL); return; case CREATE_PRINCIPAL: xdr_argument = xdr_cprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) create_principal_2_svc; break; case DELETE_PRINCIPAL: xdr_argument = xdr_dprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) delete_principal_2_svc; break; case MODIFY_PRINCIPAL: xdr_argument = xdr_mprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) modify_principal_2_svc; break; case RENAME_PRINCIPAL: xdr_argument = xdr_rprinc_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) rename_principal_2_svc; break; case GET_PRINCIPAL: xdr_argument = xdr_gprinc_arg; xdr_result = xdr_gprinc_ret; local = (char *(*)()) get_principal_2_svc; break; case GET_PRINCS: xdr_argument = xdr_gprincs_arg; xdr_result = xdr_gprincs_ret; local = (char *(*)()) get_princs_2_svc; break; case CHPASS_PRINCIPAL: xdr_argument = xdr_chpass_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) chpass_principal_2_svc; break; case SETV4KEY_PRINCIPAL: xdr_argument = xdr_setv4key_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) setv4key_principal_2_svc; break; case SETKEY_PRINCIPAL: xdr_argument = xdr_setkey_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) setkey_principal_2_svc; break; case CHRAND_PRINCIPAL: xdr_argument = xdr_chrand_arg; xdr_result = xdr_chrand_ret; local = (char *(*)()) chrand_principal_2_svc; break; case CREATE_POLICY: xdr_argument = xdr_cpol_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) create_policy_2_svc; break; case DELETE_POLICY: xdr_argument = xdr_dpol_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) delete_policy_2_svc; break; case MODIFY_POLICY: xdr_argument = xdr_mpol_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) modify_policy_2_svc; break; case GET_POLICY: xdr_argument = xdr_gpol_arg; xdr_result = xdr_gpol_ret; local = (char *(*)()) get_policy_2_svc; break; case GET_POLS: xdr_argument = xdr_gpols_arg; xdr_result = xdr_gpols_ret; local = (char *(*)()) get_pols_2_svc; break; case GET_PRIVS: xdr_argument = xdr_u_int32; xdr_result = xdr_getprivs_ret; local = (char *(*)()) get_privs_2_svc; break; case INIT: xdr_argument = xdr_u_int32; xdr_result = xdr_generic_ret; local = (char *(*)()) init_2_svc; break; case CREATE_PRINCIPAL3: xdr_argument = xdr_cprinc3_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) create_principal3_2_svc; break; case CHPASS_PRINCIPAL3: xdr_argument = xdr_chpass3_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) chpass_principal3_2_svc; break; case CHRAND_PRINCIPAL3: xdr_argument = xdr_chrand3_arg; xdr_result = xdr_chrand_ret; local = (char *(*)()) chrand_principal3_2_svc; break; case SETKEY_PRINCIPAL3: xdr_argument = xdr_setkey3_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) setkey_principal3_2_svc; break; case PURGEKEYS: xdr_argument = xdr_purgekeys_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) purgekeys_2_svc; break; case GET_STRINGS: xdr_argument = xdr_gstrings_arg; xdr_result = xdr_gstrings_ret; local = (char *(*)()) get_strings_2_svc; break; case SET_STRING: xdr_argument = xdr_sstring_arg; xdr_result = xdr_generic_ret; local = (char *(*)()) set_string_2_svc; break; default: krb5_klog_syslog(LOG_ERR, "Invalid KADM5 procedure number: %s, %d", client_addr(rqstp->rq_xprt), rqstp->rq_proc); svcerr_noproc(transp); return; } memset(&argument, 0, sizeof(argument)); if (!svc_getargs(transp, xdr_argument, &argument)) { svcerr_decode(transp); return; } result = (*local)(&argument, rqstp); if (result != NULL && !svc_sendreply(transp, xdr_result, result)) { krb5_klog_syslog(LOG_ERR, "WARNING! Unable to send function results, " "continuing."); svcerr_systemerr(transp); } if (!svc_freeargs(transp, xdr_argument, &argument)) { krb5_klog_syslog(LOG_ERR, "WARNING! Unable to free arguments, " "continuing."); } return; } static int check_rpcsec_auth(struct svc_req *rqstp) { gss_ctx_id_t ctx; krb5_context kctx; OM_uint32 maj_stat, min_stat; gss_name_t name; krb5_principal princ; int ret, success; krb5_data *c1, *c2, *realm; gss_buffer_desc gss_str; kadm5_server_handle_t handle; size_t slen; char *sdots; success = 0; handle = (kadm5_server_handle_t)global_server_handle; if (rqstp->rq_cred.oa_flavor != RPCSEC_GSS) return 0; ctx = rqstp->rq_svccred; maj_stat = gss_inquire_context(&min_stat, ctx, NULL, &name, NULL, NULL, NULL, NULL, NULL); if (maj_stat != GSS_S_COMPLETE) { krb5_klog_syslog(LOG_ERR, _("check_rpcsec_auth: failed " "inquire_context, stat=%u"), maj_stat); log_badauth(maj_stat, min_stat, rqstp->rq_xprt, NULL); goto fail_name; } kctx = handle->context; ret = gss_to_krb5_name_1(rqstp, kctx, name, &princ, &gss_str); if (ret == 0) goto fail_name; slen = gss_str.length; trunc_name(&slen, &sdots); /* * Since we accept with GSS_C_NO_NAME, the client can authenticate * against the entire kdb. Therefore, ensure that the service * name is something reasonable. */ if (krb5_princ_size(kctx, princ) != 2) goto fail_princ; c1 = krb5_princ_component(kctx, princ, 0); c2 = krb5_princ_component(kctx, princ, 1); realm = krb5_princ_realm(kctx, princ); success = data_eq_string(*realm, handle->params.realm) && data_eq_string(*c1, "kadmin") && !data_eq_string(*c2, "history"); fail_princ: if (!success) { krb5_klog_syslog(LOG_ERR, _("bad service principal %.*s%s"), (int) slen, (char *) gss_str.value, sdots); } gss_release_buffer(&min_stat, &gss_str); krb5_free_principal(kctx, princ); fail_name: gss_release_name(&min_stat, &name); return success; } int gss_to_krb5_name_1(struct svc_req *rqstp, krb5_context ctx, gss_name_t gss_name, krb5_principal *princ, gss_buffer_t gss_str) { OM_uint32 status, minor_stat; gss_OID gss_type; char *str; int success; status = gss_display_name(&minor_stat, gss_name, gss_str, &gss_type); if ((status != GSS_S_COMPLETE) || (gss_type != gss_nt_krb5_name)) { krb5_klog_syslog(LOG_ERR, _("gss_to_krb5_name: failed display_name " "status %d"), status); log_badauth(status, minor_stat, rqstp->rq_xprt, NULL); return 0; } str = malloc(gss_str->length +1); if (str == NULL) return 0; *str = '\0'; strncat(str, gss_str->value, gss_str->length); success = (krb5_parse_name(ctx, str, princ) == 0); free(str); return success; }
./CrossVul/dataset_final_sorted/CWE-284/c/good_2374_0
crossvul-cpp_data_good_3852_0
/* * NETLINK Kernel-user communication protocol. * * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith * added netlink_proto_exit * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> * use nlk_sk, as sk->protinfo is on a diet 8) * Fri Jul 22 19:51:12 MEST 2005 Harald Welte <laforge@gnumonks.org> * - inc module use count of module that owns * the kernel socket in case userspace opens * socket of same protocol * - remove all module support, since netlink is * mandatory if CONFIG_NET=y these days */ #include <linux/module.h> #include <linux/capability.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/socket.h> #include <linux/un.h> #include <linux/fcntl.h> #include <linux/termios.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/fs.h> #include <linux/slab.h> #include <asm/uaccess.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/security.h> #include <linux/jhash.h> #include <linux/jiffies.h> #include <linux/random.h> #include <linux/bitops.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/audit.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include <net/sock.h> #include <net/scm.h> #include <net/netlink.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) #define NLGRPLONGS(x) (NLGRPSZ(x)/sizeof(unsigned long)) struct netlink_sock { /* struct sock has to be the first member of netlink_sock */ struct sock sk; u32 pid; u32 dst_pid; u32 dst_group; u32 flags; u32 subscriptions; u32 ngroups; unsigned long *groups; unsigned long state; wait_queue_head_t wait; struct netlink_callback *cb; struct mutex *cb_mutex; struct mutex cb_def_mutex; void (*netlink_rcv)(struct sk_buff *skb); void (*netlink_bind)(int group); struct module *module; }; struct listeners { struct rcu_head rcu; unsigned long masks[0]; }; #define NETLINK_KERNEL_SOCKET 0x1 #define NETLINK_RECV_PKTINFO 0x2 #define NETLINK_BROADCAST_SEND_ERROR 0x4 #define NETLINK_RECV_NO_ENOBUFS 0x8 static inline struct netlink_sock *nlk_sk(struct sock *sk) { return container_of(sk, struct netlink_sock, sk); } static inline int netlink_is_kernel(struct sock *sk) { return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; } struct nl_pid_hash { struct hlist_head *table; unsigned long rehash_time; unsigned int mask; unsigned int shift; unsigned int entries; unsigned int max_shift; u32 rnd; }; struct netlink_table { struct nl_pid_hash hash; struct hlist_head mc_list; struct listeners __rcu *listeners; unsigned int nl_nonroot; unsigned int groups; struct mutex *cb_mutex; struct module *module; void (*bind)(int group); int registered; }; static struct netlink_table *nl_table; static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); static int netlink_dump(struct sock *sk); static DEFINE_RWLOCK(nl_table_lock); static atomic_t nl_table_users = ATOMIC_INIT(0); static ATOMIC_NOTIFIER_HEAD(netlink_chain); static inline u32 netlink_group_mask(u32 group) { return group ? 1 << (group - 1) : 0; } static inline struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) { return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; } static void netlink_destroy_callback(struct netlink_callback *cb) { kfree_skb(cb->skb); kfree(cb); } static void netlink_consume_callback(struct netlink_callback *cb) { consume_skb(cb->skb); kfree(cb); } static void netlink_sock_destruct(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->cb) { if (nlk->cb->done) nlk->cb->done(nlk->cb); netlink_destroy_callback(nlk->cb); } skb_queue_purge(&sk->sk_receive_queue); if (!sock_flag(sk, SOCK_DEAD)) { printk(KERN_ERR "Freeing alive netlink socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(nlk_sk(sk)->groups); } /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on * SMP. Look, when several writers sleep and reader wakes them up, all but one * immediately hit write lock and grab all the cpus. Exclusive sleep solves * this, _but_ remember, it adds useless work on UP machines. */ void netlink_table_grab(void) __acquires(nl_table_lock) { might_sleep(); write_lock_irq(&nl_table_lock); if (atomic_read(&nl_table_users)) { DECLARE_WAITQUEUE(wait, current); add_wait_queue_exclusive(&nl_table_wait, &wait); for (;;) { set_current_state(TASK_UNINTERRUPTIBLE); if (atomic_read(&nl_table_users) == 0) break; write_unlock_irq(&nl_table_lock); schedule(); write_lock_irq(&nl_table_lock); } __set_current_state(TASK_RUNNING); remove_wait_queue(&nl_table_wait, &wait); } } void netlink_table_ungrab(void) __releases(nl_table_lock) { write_unlock_irq(&nl_table_lock); wake_up(&nl_table_wait); } static inline void netlink_lock_table(void) { /* read_lock() synchronizes us to netlink_table_grab */ read_lock(&nl_table_lock); atomic_inc(&nl_table_users); read_unlock(&nl_table_lock); } static inline void netlink_unlock_table(void) { if (atomic_dec_and_test(&nl_table_users)) wake_up(&nl_table_wait); } static struct sock *netlink_lookup(struct net *net, int protocol, u32 pid) { struct nl_pid_hash *hash = &nl_table[protocol].hash; struct hlist_head *head; struct sock *sk; struct hlist_node *node; read_lock(&nl_table_lock); head = nl_pid_hashfn(hash, pid); sk_for_each(sk, node, head) { if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) { sock_hold(sk); goto found; } } sk = NULL; found: read_unlock(&nl_table_lock); return sk; } static struct hlist_head *nl_pid_hash_zalloc(size_t size) { if (size <= PAGE_SIZE) return kzalloc(size, GFP_ATOMIC); else return (struct hlist_head *) __get_free_pages(GFP_ATOMIC | __GFP_ZERO, get_order(size)); } static void nl_pid_hash_free(struct hlist_head *table, size_t size) { if (size <= PAGE_SIZE) kfree(table); else free_pages((unsigned long)table, get_order(size)); } static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) { unsigned int omask, mask, shift; size_t osize, size; struct hlist_head *otable, *table; int i; omask = mask = hash->mask; osize = size = (mask + 1) * sizeof(*table); shift = hash->shift; if (grow) { if (++shift > hash->max_shift) return 0; mask = mask * 2 + 1; size *= 2; } table = nl_pid_hash_zalloc(size); if (!table) return 0; otable = hash->table; hash->table = table; hash->mask = mask; hash->shift = shift; get_random_bytes(&hash->rnd, sizeof(hash->rnd)); for (i = 0; i <= omask; i++) { struct sock *sk; struct hlist_node *node, *tmp; sk_for_each_safe(sk, node, tmp, &otable[i]) __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); } nl_pid_hash_free(otable, osize); hash->rehash_time = jiffies + 10 * 60 * HZ; return 1; } static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) { int avg = hash->entries >> hash->shift; if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) return 1; if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { nl_pid_hash_rehash(hash, 0); return 1; } return 0; } static const struct proto_ops netlink_ops; static void netlink_update_listeners(struct sock *sk) { struct netlink_table *tbl = &nl_table[sk->sk_protocol]; struct hlist_node *node; unsigned long mask; unsigned int i; for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { mask = 0; sk_for_each_bound(sk, node, &tbl->mc_list) { if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) mask |= nlk_sk(sk)->groups[i]; } tbl->listeners->masks[i] = mask; } /* this function is only called with the netlink table "grabbed", which * makes sure updates are visible before bind or setsockopt return. */ } static int netlink_insert(struct sock *sk, struct net *net, u32 pid) { struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; int err = -EADDRINUSE; struct sock *osk; struct hlist_node *node; int len; netlink_table_grab(); head = nl_pid_hashfn(hash, pid); len = 0; sk_for_each(osk, node, head) { if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid)) break; len++; } if (node) goto err; err = -EBUSY; if (nlk_sk(sk)->pid) goto err; err = -ENOMEM; if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) goto err; if (len && nl_pid_hash_dilute(hash, len)) head = nl_pid_hashfn(hash, pid); hash->entries++; nlk_sk(sk)->pid = pid; sk_add_node(sk, head); err = 0; err: netlink_table_ungrab(); return err; } static void netlink_remove(struct sock *sk) { netlink_table_grab(); if (sk_del_node_init(sk)) nl_table[sk->sk_protocol].hash.entries--; if (nlk_sk(sk)->subscriptions) __sk_del_bind_node(sk); netlink_table_ungrab(); } static struct proto netlink_proto = { .name = "NETLINK", .owner = THIS_MODULE, .obj_size = sizeof(struct netlink_sock), }; static int __netlink_create(struct net *net, struct socket *sock, struct mutex *cb_mutex, int protocol) { struct sock *sk; struct netlink_sock *nlk; sock->ops = &netlink_ops; sk = sk_alloc(net, PF_NETLINK, GFP_KERNEL, &netlink_proto); if (!sk) return -ENOMEM; sock_init_data(sock, sk); nlk = nlk_sk(sk); if (cb_mutex) { nlk->cb_mutex = cb_mutex; } else { nlk->cb_mutex = &nlk->cb_def_mutex; mutex_init(nlk->cb_mutex); } init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; sk->sk_protocol = protocol; return 0; } static int netlink_create(struct net *net, struct socket *sock, int protocol, int kern) { struct module *module = NULL; struct mutex *cb_mutex; struct netlink_sock *nlk; void (*bind)(int group); int err = 0; sock->state = SS_UNCONNECTED; if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= MAX_LINKS) return -EPROTONOSUPPORT; netlink_lock_table(); #ifdef CONFIG_MODULES if (!nl_table[protocol].registered) { netlink_unlock_table(); request_module("net-pf-%d-proto-%d", PF_NETLINK, protocol); netlink_lock_table(); } #endif if (nl_table[protocol].registered && try_module_get(nl_table[protocol].module)) module = nl_table[protocol].module; else err = -EPROTONOSUPPORT; cb_mutex = nl_table[protocol].cb_mutex; bind = nl_table[protocol].bind; netlink_unlock_table(); if (err < 0) goto out; err = __netlink_create(net, sock, cb_mutex, protocol); if (err < 0) goto out_module; local_bh_disable(); sock_prot_inuse_add(net, &netlink_proto, 1); local_bh_enable(); nlk = nlk_sk(sock->sk); nlk->module = module; nlk->netlink_bind = bind; out: return err; out_module: module_put(module); goto out; } static int netlink_release(struct socket *sock) { struct sock *sk = sock->sk; struct netlink_sock *nlk; if (!sk) return 0; netlink_remove(sk); sock_orphan(sk); nlk = nlk_sk(sk); /* * OK. Socket is unlinked, any packets that arrive now * will be purged. */ sock->sk = NULL; wake_up_interruptible_all(&nlk->wait); skb_queue_purge(&sk->sk_write_queue); if (nlk->pid) { struct netlink_notify n = { .net = sock_net(sk), .protocol = sk->sk_protocol, .pid = nlk->pid, }; atomic_notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); } module_put(nlk->module); netlink_table_grab(); if (netlink_is_kernel(sk)) { BUG_ON(nl_table[sk->sk_protocol].registered == 0); if (--nl_table[sk->sk_protocol].registered == 0) { kfree(nl_table[sk->sk_protocol].listeners); nl_table[sk->sk_protocol].module = NULL; nl_table[sk->sk_protocol].registered = 0; } } else if (nlk->subscriptions) { netlink_update_listeners(sk); } netlink_table_ungrab(); kfree(nlk->groups); nlk->groups = NULL; local_bh_disable(); sock_prot_inuse_add(sock_net(sk), &netlink_proto, -1); local_bh_enable(); sock_put(sk); return 0; } static int netlink_autobind(struct socket *sock) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; struct hlist_head *head; struct sock *osk; struct hlist_node *node; s32 pid = task_tgid_vnr(current); int err; static s32 rover = -4097; retry: cond_resched(); netlink_table_grab(); head = nl_pid_hashfn(hash, pid); sk_for_each(osk, node, head) { if (!net_eq(sock_net(osk), net)) continue; if (nlk_sk(osk)->pid == pid) { /* Bind collision, search negative pid values. */ pid = rover--; if (rover > -4097) rover = -4097; netlink_table_ungrab(); goto retry; } } netlink_table_ungrab(); err = netlink_insert(sk, net, pid); if (err == -EADDRINUSE) goto retry; /* If 2 threads race to autobind, that is fine. */ if (err == -EBUSY) err = 0; return err; } static inline int netlink_capable(const struct socket *sock, unsigned int flag) { return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || capable(CAP_NET_ADMIN); } static void netlink_update_subscriptions(struct sock *sk, unsigned int subscriptions) { struct netlink_sock *nlk = nlk_sk(sk); if (nlk->subscriptions && !subscriptions) __sk_del_bind_node(sk); else if (!nlk->subscriptions && subscriptions) sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); nlk->subscriptions = subscriptions; } static int netlink_realloc_groups(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); unsigned int groups; unsigned long *new_groups; int err = 0; netlink_table_grab(); groups = nl_table[sk->sk_protocol].groups; if (!nl_table[sk->sk_protocol].registered) { err = -ENOENT; goto out_unlock; } if (nlk->ngroups >= groups) goto out_unlock; new_groups = krealloc(nlk->groups, NLGRPSZ(groups), GFP_ATOMIC); if (new_groups == NULL) { err = -ENOMEM; goto out_unlock; } memset((char *)new_groups + NLGRPSZ(nlk->ngroups), 0, NLGRPSZ(groups) - NLGRPSZ(nlk->ngroups)); nlk->groups = new_groups; nlk->ngroups = groups; out_unlock: netlink_table_ungrab(); return err; } static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sock *sk = sock->sk; struct net *net = sock_net(sk); struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; int err; if (nladdr->nl_family != AF_NETLINK) return -EINVAL; /* Only superuser is allowed to listen multicasts */ if (nladdr->nl_groups) { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); if (err) return err; } if (nlk->pid) { if (nladdr->nl_pid != nlk->pid) return -EINVAL; } else { err = nladdr->nl_pid ? netlink_insert(sk, net, nladdr->nl_pid) : netlink_autobind(sock); if (err) return err; } if (!nladdr->nl_groups && (nlk->groups == NULL || !(u32)nlk->groups[0])) return 0; netlink_table_grab(); netlink_update_subscriptions(sk, nlk->subscriptions + hweight32(nladdr->nl_groups) - hweight32(nlk->groups[0])); nlk->groups[0] = (nlk->groups[0] & ~0xffffffffUL) | nladdr->nl_groups; netlink_update_listeners(sk); netlink_table_ungrab(); if (nlk->netlink_bind && nlk->groups[0]) { int i; for (i=0; i<nlk->ngroups; i++) { if (test_bit(i, nlk->groups)) nlk->netlink_bind(i); } } return 0; } static int netlink_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) { int err = 0; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; if (alen < sizeof(addr->sa_family)) return -EINVAL; if (addr->sa_family == AF_UNSPEC) { sk->sk_state = NETLINK_UNCONNECTED; nlk->dst_pid = 0; nlk->dst_group = 0; return 0; } if (addr->sa_family != AF_NETLINK) return -EINVAL; /* Only superuser is allowed to send multicasts */ if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) return -EPERM; if (!nlk->pid) err = netlink_autobind(sock); if (err == 0) { sk->sk_state = NETLINK_CONNECTED; nlk->dst_pid = nladdr->nl_pid; nlk->dst_group = ffs(nladdr->nl_groups); } return err; } static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); nladdr->nl_family = AF_NETLINK; nladdr->nl_pad = 0; *addr_len = sizeof(*nladdr); if (peer) { nladdr->nl_pid = nlk->dst_pid; nladdr->nl_groups = netlink_group_mask(nlk->dst_group); } else { nladdr->nl_pid = nlk->pid; nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; } return 0; } static void netlink_overrun(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (!(nlk->flags & NETLINK_RECV_NO_ENOBUFS)) { if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); } } atomic_inc(&sk->sk_drops); } static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) { struct sock *sock; struct netlink_sock *nlk; sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid); if (!sock) return ERR_PTR(-ECONNREFUSED); /* Don't bother queuing skb if kernel socket has no input function */ nlk = nlk_sk(sock); if (sock->sk_state == NETLINK_CONNECTED && nlk->dst_pid != nlk_sk(ssk)->pid) { sock_put(sock); return ERR_PTR(-ECONNREFUSED); } return sock; } struct sock *netlink_getsockbyfilp(struct file *filp) { struct inode *inode = filp->f_path.dentry->d_inode; struct sock *sock; if (!S_ISSOCK(inode->i_mode)) return ERR_PTR(-ENOTSOCK); sock = SOCKET_I(inode)->sk; if (sock->sk_family != AF_NETLINK) return ERR_PTR(-EINVAL); sock_hold(sock); return sock; } /* * Attach a skb to a netlink socket. * The caller must hold a reference to the destination socket. On error, the * reference is dropped. The skb is not send to the destination, just all * all error checks are performed and memory in the queue is reserved. * Return values: * < 0: error. skb freed, reference to sock dropped. * 0: continue * 1: repeat lookup - reference dropped while waiting for socket memory. */ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, long *timeo, struct sock *ssk) { struct netlink_sock *nlk; nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) { DECLARE_WAITQUEUE(wait, current); if (!*timeo) { if (!ssk || netlink_is_kernel(ssk)) netlink_overrun(sk); sock_put(sk); kfree_skb(skb); return -EAGAIN; } __set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&nlk->wait, &wait); if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || test_bit(0, &nlk->state)) && !sock_flag(sk, SOCK_DEAD)) *timeo = schedule_timeout(*timeo); __set_current_state(TASK_RUNNING); remove_wait_queue(&nlk->wait, &wait); sock_put(sk); if (signal_pending(current)) { kfree_skb(skb); return sock_intr_errno(*timeo); } return 1; } skb_set_owner_r(skb, sk); return 0; } static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = skb->len; skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_data_ready(sk, len); return len; } int netlink_sendskb(struct sock *sk, struct sk_buff *skb) { int len = __netlink_sendskb(sk, skb); sock_put(sk); return len; } void netlink_detachskb(struct sock *sk, struct sk_buff *skb) { kfree_skb(skb); sock_put(sk); } static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) { int delta; skb_orphan(skb); delta = skb->end - skb->tail; if (delta * 2 < skb->truesize) return skb; if (skb_shared(skb)) { struct sk_buff *nskb = skb_clone(skb, allocation); if (!nskb) return skb; consume_skb(skb); skb = nskb; } if (!pskb_expand_head(skb, 0, -delta, allocation)) skb->truesize -= delta; return skb; } static void netlink_rcv_wake(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); if (skb_queue_empty(&sk->sk_receive_queue)) clear_bit(0, &nlk->state); if (!test_bit(0, &nlk->state)) wake_up_interruptible(&nlk->wait); } static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb) { int ret; struct netlink_sock *nlk = nlk_sk(sk); ret = -ECONNREFUSED; if (nlk->netlink_rcv != NULL) { ret = skb->len; skb_set_owner_r(skb, sk); nlk->netlink_rcv(skb); consume_skb(skb); } else { kfree_skb(skb); } sock_put(sk); return ret; } int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) { struct sock *sk; int err; long timeo; skb = netlink_trim(skb, gfp_any()); timeo = sock_sndtimeo(ssk, nonblock); retry: sk = netlink_getsockbypid(ssk, pid); if (IS_ERR(sk)) { kfree_skb(skb); return PTR_ERR(sk); } if (netlink_is_kernel(sk)) return netlink_unicast_kernel(sk, skb); if (sk_filter(sk, skb)) { err = skb->len; kfree_skb(skb); sock_put(sk); return err; } err = netlink_attachskb(sk, skb, &timeo, ssk); if (err == 1) goto retry; if (err) return err; return netlink_sendskb(sk, skb); } EXPORT_SYMBOL(netlink_unicast); int netlink_has_listeners(struct sock *sk, unsigned int group) { int res = 0; struct listeners *listeners; BUG_ON(!netlink_is_kernel(sk)); rcu_read_lock(); listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); if (group - 1 < nl_table[sk->sk_protocol].groups) res = test_bit(group - 1, listeners->masks); rcu_read_unlock(); return res; } EXPORT_SYMBOL_GPL(netlink_has_listeners); static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) { struct netlink_sock *nlk = nlk_sk(sk); if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && !test_bit(0, &nlk->state)) { skb_set_owner_r(skb, sk); __netlink_sendskb(sk, skb); return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); } return -1; } struct netlink_broadcast_data { struct sock *exclude_sk; struct net *net; u32 pid; u32 group; int failure; int delivery_failure; int congested; int delivered; gfp_t allocation; struct sk_buff *skb, *skb2; int (*tx_filter)(struct sock *dsk, struct sk_buff *skb, void *data); void *tx_data; }; static int do_one_broadcast(struct sock *sk, struct netlink_broadcast_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int val; if (p->exclude_sk == sk) goto out; if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; if (!net_eq(sock_net(sk), p->net)) goto out; if (p->failure) { netlink_overrun(sk); goto out; } sock_hold(sk); if (p->skb2 == NULL) { if (skb_shared(p->skb)) { p->skb2 = skb_clone(p->skb, p->allocation); } else { p->skb2 = skb_get(p->skb); /* * skb ownership may have been set when * delivered to a previous socket. */ skb_orphan(p->skb2); } } if (p->skb2 == NULL) { netlink_overrun(sk); /* Clone failed. Notify ALL listeners. */ p->failure = 1; if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) p->delivery_failure = 1; } else if (p->tx_filter && p->tx_filter(sk, p->skb2, p->tx_data)) { kfree_skb(p->skb2); p->skb2 = NULL; } else if (sk_filter(sk, p->skb2)) { kfree_skb(p->skb2); p->skb2 = NULL; } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { netlink_overrun(sk); if (nlk->flags & NETLINK_BROADCAST_SEND_ERROR) p->delivery_failure = 1; } else { p->congested |= val; p->delivered = 1; p->skb2 = NULL; } sock_put(sk); out: return 0; } int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation, int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), void *filter_data) { struct net *net = sock_net(ssk); struct netlink_broadcast_data info; struct hlist_node *node; struct sock *sk; skb = netlink_trim(skb, allocation); info.exclude_sk = ssk; info.net = net; info.pid = pid; info.group = group; info.failure = 0; info.delivery_failure = 0; info.congested = 0; info.delivered = 0; info.allocation = allocation; info.skb = skb; info.skb2 = NULL; info.tx_filter = filter; info.tx_data = filter_data; /* While we sleep in clone, do not allow to change socket list */ netlink_lock_table(); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) do_one_broadcast(sk, &info); consume_skb(skb); netlink_unlock_table(); if (info.delivery_failure) { kfree_skb(info.skb2); return -ENOBUFS; } consume_skb(info.skb2); if (info.delivered) { if (info.congested && (allocation & __GFP_WAIT)) yield(); return 0; } return -ESRCH; } EXPORT_SYMBOL(netlink_broadcast_filtered); int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, u32 group, gfp_t allocation) { return netlink_broadcast_filtered(ssk, skb, pid, group, allocation, NULL, NULL); } EXPORT_SYMBOL(netlink_broadcast); struct netlink_set_err_data { struct sock *exclude_sk; u32 pid; u32 group; int code; }; static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) { struct netlink_sock *nlk = nlk_sk(sk); int ret = 0; if (sk == p->exclude_sk) goto out; if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) goto out; if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || !test_bit(p->group - 1, nlk->groups)) goto out; if (p->code == ENOBUFS && nlk->flags & NETLINK_RECV_NO_ENOBUFS) { ret = 1; goto out; } sk->sk_err = p->code; sk->sk_error_report(sk); out: return ret; } /** * netlink_set_err - report error to broadcast listeners * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() * @pid: the PID of a process that we want to skip (if any) * @groups: the broadcast group that will notice the error * @code: error code, must be negative (as usual in kernelspace) * * This function returns the number of broadcast listeners that have set the * NETLINK_RECV_NO_ENOBUFS socket option. */ int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) { struct netlink_set_err_data info; struct hlist_node *node; struct sock *sk; int ret = 0; info.exclude_sk = ssk; info.pid = pid; info.group = group; /* sk->sk_err wants a positive error value */ info.code = -code; read_lock(&nl_table_lock); sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) ret += do_one_set_err(sk, &info); read_unlock(&nl_table_lock); return ret; } EXPORT_SYMBOL(netlink_set_err); /* must be called with netlink table grabbed */ static void netlink_update_socket_mc(struct netlink_sock *nlk, unsigned int group, int is_new) { int old, new = !!is_new, subscriptions; old = test_bit(group - 1, nlk->groups); subscriptions = nlk->subscriptions - old + new; if (new) __set_bit(group - 1, nlk->groups); else __clear_bit(group - 1, nlk->groups); netlink_update_subscriptions(&nlk->sk, subscriptions); netlink_update_listeners(&nlk->sk); } static int netlink_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); unsigned int val = 0; int err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (optlen >= sizeof(int) && get_user(val, (unsigned int __user *)optval)) return -EFAULT; switch (optname) { case NETLINK_PKTINFO: if (val) nlk->flags |= NETLINK_RECV_PKTINFO; else nlk->flags &= ~NETLINK_RECV_PKTINFO; err = 0; break; case NETLINK_ADD_MEMBERSHIP: case NETLINK_DROP_MEMBERSHIP: { if (!netlink_capable(sock, NL_NONROOT_RECV)) return -EPERM; err = netlink_realloc_groups(sk); if (err) return err; if (!val || val - 1 >= nlk->ngroups) return -EINVAL; netlink_table_grab(); netlink_update_socket_mc(nlk, val, optname == NETLINK_ADD_MEMBERSHIP); netlink_table_ungrab(); if (nlk->netlink_bind) nlk->netlink_bind(val); err = 0; break; } case NETLINK_BROADCAST_ERROR: if (val) nlk->flags |= NETLINK_BROADCAST_SEND_ERROR; else nlk->flags &= ~NETLINK_BROADCAST_SEND_ERROR; err = 0; break; case NETLINK_NO_ENOBUFS: if (val) { nlk->flags |= NETLINK_RECV_NO_ENOBUFS; clear_bit(0, &nlk->state); wake_up_interruptible(&nlk->wait); } else { nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; } err = 0; break; default: err = -ENOPROTOOPT; } return err; } static int netlink_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); int len, val, err; if (level != SOL_NETLINK) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; switch (optname) { case NETLINK_PKTINFO: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_RECV_PKTINFO ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; case NETLINK_BROADCAST_ERROR: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_BROADCAST_SEND_ERROR ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; case NETLINK_NO_ENOBUFS: if (len < sizeof(int)) return -EINVAL; len = sizeof(int); val = nlk->flags & NETLINK_RECV_NO_ENOBUFS ? 1 : 0; if (put_user(len, optlen) || put_user(val, optval)) return -EFAULT; err = 0; break; default: err = -ENOPROTOOPT; } return err; } static void netlink_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) { struct nl_pktinfo info; info.group = NETLINK_CB(skb).dst_group; put_cmsg(msg, SOL_NETLINK, NETLINK_PKTINFO, sizeof(info), &info); } static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); struct sockaddr_nl *addr = msg->msg_name; u32 dst_pid; u32 dst_group; struct sk_buff *skb; int err; struct scm_cookie scm; if (msg->msg_flags&MSG_OOB) return -EOPNOTSUPP; if (NULL == siocb->scm) siocb->scm = &scm; err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; if (msg->msg_namelen) { err = -EINVAL; if (addr->nl_family != AF_NETLINK) goto out; dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; if ((dst_group || dst_pid) && !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; dst_group = nlk->dst_group; } if (!nlk->pid) { err = netlink_autobind(sock); if (err) goto out; } err = -EMSGSIZE; if (len > sk->sk_sndbuf - 32) goto out; err = -ENOBUFS; skb = alloc_skb(len, GFP_KERNEL); if (skb == NULL) goto out; NETLINK_CB(skb).pid = nlk->pid; NETLINK_CB(skb).dst_group = dst_group; memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); err = -EFAULT; if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { kfree_skb(skb); goto out; } err = security_netlink_send(sk, skb); if (err) { kfree_skb(skb); goto out; } if (dst_group) { atomic_inc(&skb->users); netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL); } err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); out: scm_destroy(siocb->scm); return err; } static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len, int flags) { struct sock_iocb *siocb = kiocb_to_siocb(kiocb); struct scm_cookie scm; struct sock *sk = sock->sk; struct netlink_sock *nlk = nlk_sk(sk); int noblock = flags&MSG_DONTWAIT; size_t copied; struct sk_buff *skb, *data_skb; int err, ret; if (flags&MSG_OOB) return -EOPNOTSUPP; copied = 0; skb = skb_recv_datagram(sk, flags, noblock, &err); if (skb == NULL) goto out; data_skb = skb; #ifdef CONFIG_COMPAT_NETLINK_MESSAGES if (unlikely(skb_shinfo(skb)->frag_list)) { /* * If this skb has a frag_list, then here that means that we * will have to use the frag_list skb's data for compat tasks * and the regular skb's data for normal (non-compat) tasks. * * If we need to send the compat skb, assign it to the * 'data_skb' variable so that it will be used below for data * copying. We keep 'skb' for everything else, including * freeing both later. */ if (flags & MSG_CMSG_COMPAT) data_skb = skb_shinfo(skb)->frag_list; } #endif msg->msg_namelen = 0; copied = data_skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } skb_reset_transport_header(data_skb); err = skb_copy_datagram_iovec(data_skb, 0, msg->msg_iov, copied); if (msg->msg_name) { struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; addr->nl_family = AF_NETLINK; addr->nl_pad = 0; addr->nl_pid = NETLINK_CB(skb).pid; addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); msg->msg_namelen = sizeof(*addr); } if (nlk->flags & NETLINK_RECV_PKTINFO) netlink_cmsg_recv_pktinfo(msg, skb); if (NULL == siocb->scm) { memset(&scm, 0, sizeof(scm)); siocb->scm = &scm; } siocb->scm->creds = *NETLINK_CREDS(skb); if (flags & MSG_TRUNC) copied = data_skb->len; skb_free_datagram(sk, skb); if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { ret = netlink_dump(sk); if (ret) { sk->sk_err = ret; sk->sk_error_report(sk); } } scm_recv(sock, msg, siocb->scm, flags); out: netlink_rcv_wake(sk); return err ? : copied; } static void netlink_data_ready(struct sock *sk, int len) { BUG(); } /* * We export these functions to other modules. They provide a * complete set of kernel non-blocking support for message * queueing. */ struct sock * netlink_kernel_create(struct net *net, int unit, struct module *module, struct netlink_kernel_cfg *cfg) { struct socket *sock; struct sock *sk; struct netlink_sock *nlk; struct listeners *listeners = NULL; struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL; unsigned int groups; BUG_ON(!nl_table); if (unit < 0 || unit >= MAX_LINKS) return NULL; if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) return NULL; /* * We have to just have a reference on the net from sk, but don't * get_net it. Besides, we cannot get and then put the net here. * So we create one inside init_net and the move it to net. */ if (__netlink_create(&init_net, sock, cb_mutex, unit) < 0) goto out_sock_release_nosk; sk = sock->sk; sk_change_net(sk, net); if (!cfg || cfg->groups < 32) groups = 32; else groups = cfg->groups; listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); if (!listeners) goto out_sock_release; sk->sk_data_ready = netlink_data_ready; if (cfg && cfg->input) nlk_sk(sk)->netlink_rcv = cfg->input; if (netlink_insert(sk, net, 0)) goto out_sock_release; nlk = nlk_sk(sk); nlk->flags |= NETLINK_KERNEL_SOCKET; netlink_table_grab(); if (!nl_table[unit].registered) { nl_table[unit].groups = groups; rcu_assign_pointer(nl_table[unit].listeners, listeners); nl_table[unit].cb_mutex = cb_mutex; nl_table[unit].module = module; nl_table[unit].bind = cfg ? cfg->bind : NULL; nl_table[unit].registered = 1; } else { kfree(listeners); nl_table[unit].registered++; } netlink_table_ungrab(); return sk; out_sock_release: kfree(listeners); netlink_kernel_release(sk); return NULL; out_sock_release_nosk: sock_release(sock); return NULL; } EXPORT_SYMBOL(netlink_kernel_create); void netlink_kernel_release(struct sock *sk) { sk_release_kernel(sk); } EXPORT_SYMBOL(netlink_kernel_release); int __netlink_change_ngroups(struct sock *sk, unsigned int groups) { struct listeners *new, *old; struct netlink_table *tbl = &nl_table[sk->sk_protocol]; if (groups < 32) groups = 32; if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); if (!new) return -ENOMEM; old = rcu_dereference_protected(tbl->listeners, 1); memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); rcu_assign_pointer(tbl->listeners, new); kfree_rcu(old, rcu); } tbl->groups = groups; return 0; } /** * netlink_change_ngroups - change number of multicast groups * * This changes the number of multicast groups that are available * on a certain netlink family. Note that it is not possible to * change the number of groups to below 32. Also note that it does * not implicitly call netlink_clear_multicast_users() when the * number of groups is reduced. * * @sk: The kernel netlink socket, as returned by netlink_kernel_create(). * @groups: The new number of groups. */ int netlink_change_ngroups(struct sock *sk, unsigned int groups) { int err; netlink_table_grab(); err = __netlink_change_ngroups(sk, groups); netlink_table_ungrab(); return err; } void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) { struct sock *sk; struct hlist_node *node; struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; sk_for_each_bound(sk, node, &tbl->mc_list) netlink_update_socket_mc(nlk_sk(sk), group, 0); } /** * netlink_clear_multicast_users - kick off multicast listeners * * This function removes all listeners from the given group. * @ksk: The kernel netlink socket, as returned by * netlink_kernel_create(). * @group: The multicast group to clear. */ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) { netlink_table_grab(); __netlink_clear_multicast_users(ksk, group); netlink_table_ungrab(); } void netlink_set_nonroot(int protocol, unsigned int flags) { if ((unsigned int)protocol < MAX_LINKS) nl_table[protocol].nl_nonroot = flags; } EXPORT_SYMBOL(netlink_set_nonroot); struct nlmsghdr * __nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq, int type, int len, int flags) { struct nlmsghdr *nlh; int size = NLMSG_LENGTH(len); nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); nlh->nlmsg_type = type; nlh->nlmsg_len = size; nlh->nlmsg_flags = flags; nlh->nlmsg_pid = pid; nlh->nlmsg_seq = seq; if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0) memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size); return nlh; } EXPORT_SYMBOL(__nlmsg_put); /* * It looks a bit ugly. * It would be better to create kernel thread. */ static int netlink_dump(struct sock *sk) { struct netlink_sock *nlk = nlk_sk(sk); struct netlink_callback *cb; struct sk_buff *skb = NULL; struct nlmsghdr *nlh; int len, err = -ENOBUFS; int alloc_size; mutex_lock(nlk->cb_mutex); cb = nlk->cb; if (cb == NULL) { err = -EINVAL; goto errout_skb; } alloc_size = max_t(int, cb->min_dump_alloc, NLMSG_GOODSIZE); skb = sock_rmalloc(sk, alloc_size, 0, GFP_KERNEL); if (!skb) goto errout_skb; len = cb->dump(skb, cb); if (len > 0) { mutex_unlock(nlk->cb_mutex); if (sk_filter(sk, skb)) kfree_skb(skb); else __netlink_sendskb(sk, skb); return 0; } nlh = nlmsg_put_answer(skb, cb, NLMSG_DONE, sizeof(len), NLM_F_MULTI); if (!nlh) goto errout_skb; nl_dump_check_consistent(cb, nlh); memcpy(nlmsg_data(nlh), &len, sizeof(len)); if (sk_filter(sk, skb)) kfree_skb(skb); else __netlink_sendskb(sk, skb); if (cb->done) cb->done(cb); nlk->cb = NULL; mutex_unlock(nlk->cb_mutex); netlink_consume_callback(cb); return 0; errout_skb: mutex_unlock(nlk->cb_mutex); kfree_skb(skb); return err; } int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control) { struct netlink_callback *cb; struct sock *sk; struct netlink_sock *nlk; int ret; cb = kzalloc(sizeof(*cb), GFP_KERNEL); if (cb == NULL) return -ENOBUFS; cb->dump = control->dump; cb->done = control->done; cb->nlh = nlh; cb->data = control->data; cb->min_dump_alloc = control->min_dump_alloc; atomic_inc(&skb->users); cb->skb = skb; sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid); if (sk == NULL) { netlink_destroy_callback(cb); return -ECONNREFUSED; } nlk = nlk_sk(sk); /* A dump is in progress... */ mutex_lock(nlk->cb_mutex); if (nlk->cb) { mutex_unlock(nlk->cb_mutex); netlink_destroy_callback(cb); sock_put(sk); return -EBUSY; } nlk->cb = cb; mutex_unlock(nlk->cb_mutex); ret = netlink_dump(sk); sock_put(sk); if (ret) return ret; /* We successfully started a dump, by returning -EINTR we * signal not to send ACK even if it was requested. */ return -EINTR; } EXPORT_SYMBOL(netlink_dump_start); void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) { struct sk_buff *skb; struct nlmsghdr *rep; struct nlmsgerr *errmsg; size_t payload = sizeof(*errmsg); /* error messages get the original request appened */ if (err) payload += nlmsg_len(nlh); skb = nlmsg_new(payload, GFP_KERNEL); if (!skb) { struct sock *sk; sk = netlink_lookup(sock_net(in_skb->sk), in_skb->sk->sk_protocol, NETLINK_CB(in_skb).pid); if (sk) { sk->sk_err = ENOBUFS; sk->sk_error_report(sk); sock_put(sk); } return; } rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, NLMSG_ERROR, payload, 0); errmsg = nlmsg_data(rep); errmsg->error = err; memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); } EXPORT_SYMBOL(netlink_ack); int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *, struct nlmsghdr *)) { struct nlmsghdr *nlh; int err; while (skb->len >= nlmsg_total_size(0)) { int msglen; nlh = nlmsg_hdr(skb); err = 0; if (nlh->nlmsg_len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len) return 0; /* Only requests are handled by the kernel */ if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) goto ack; /* Skip control messages */ if (nlh->nlmsg_type < NLMSG_MIN_TYPE) goto ack; err = cb(skb, nlh); if (err == -EINTR) goto skip; ack: if (nlh->nlmsg_flags & NLM_F_ACK || err) netlink_ack(skb, nlh, err); skip: msglen = NLMSG_ALIGN(nlh->nlmsg_len); if (msglen > skb->len) msglen = skb->len; skb_pull(skb, msglen); } return 0; } EXPORT_SYMBOL(netlink_rcv_skb); /** * nlmsg_notify - send a notification netlink message * @sk: netlink socket to use * @skb: notification message * @pid: destination netlink pid for reports or 0 * @group: destination multicast group or 0 * @report: 1 to report back, 0 to disable * @flags: allocation flags */ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid, unsigned int group, int report, gfp_t flags) { int err = 0; if (group) { int exclude_pid = 0; if (report) { atomic_inc(&skb->users); exclude_pid = pid; } /* errors reported via destination sk->sk_err, but propagate * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ err = nlmsg_multicast(sk, skb, exclude_pid, group, flags); } if (report) { int err2; err2 = nlmsg_unicast(sk, skb, pid); if (!err || err == -ESRCH) err = err2; } return err; } EXPORT_SYMBOL(nlmsg_notify); #ifdef CONFIG_PROC_FS struct nl_seq_iter { struct seq_net_private p; int link; int hash_idx; }; static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) { struct nl_seq_iter *iter = seq->private; int i, j; struct sock *s; struct hlist_node *node; loff_t off = 0; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; for (j = 0; j <= hash->mask; j++) { sk_for_each(s, node, &hash->table[j]) { if (sock_net(s) != seq_file_net(seq)) continue; if (off == pos) { iter->link = i; iter->hash_idx = j; return s; } ++off; } } } return NULL; } static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) __acquires(nl_table_lock) { read_lock(&nl_table_lock); return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct sock *s; struct nl_seq_iter *iter; int i, j; ++*pos; if (v == SEQ_START_TOKEN) return netlink_seq_socket_idx(seq, 0); iter = seq->private; s = v; do { s = sk_next(s); } while (s && sock_net(s) != seq_file_net(seq)); if (s) return s; i = iter->link; j = iter->hash_idx + 1; do { struct nl_pid_hash *hash = &nl_table[i].hash; for (; j <= hash->mask; j++) { s = sk_head(&hash->table[j]); while (s && sock_net(s) != seq_file_net(seq)) s = sk_next(s); if (s) { iter->link = i; iter->hash_idx = j; return s; } } j = 0; } while (++i < MAX_LINKS); return NULL; } static void netlink_seq_stop(struct seq_file *seq, void *v) __releases(nl_table_lock) { read_unlock(&nl_table_lock); } static int netlink_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "sk Eth Pid Groups " "Rmem Wmem Dump Locks Drops Inode\n"); } else { struct sock *s = v; struct netlink_sock *nlk = nlk_sk(s); seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", s, s->sk_protocol, nlk->pid, nlk->groups ? (u32)nlk->groups[0] : 0, sk_rmem_alloc_get(s), sk_wmem_alloc_get(s), nlk->cb, atomic_read(&s->sk_refcnt), atomic_read(&s->sk_drops), sock_i_ino(s) ); } return 0; } static const struct seq_operations netlink_seq_ops = { .start = netlink_seq_start, .next = netlink_seq_next, .stop = netlink_seq_stop, .show = netlink_seq_show, }; static int netlink_seq_open(struct inode *inode, struct file *file) { return seq_open_net(inode, file, &netlink_seq_ops, sizeof(struct nl_seq_iter)); } static const struct file_operations netlink_seq_fops = { .owner = THIS_MODULE, .open = netlink_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, }; #endif int netlink_register_notifier(struct notifier_block *nb) { return atomic_notifier_chain_register(&netlink_chain, nb); } EXPORT_SYMBOL(netlink_register_notifier); int netlink_unregister_notifier(struct notifier_block *nb) { return atomic_notifier_chain_unregister(&netlink_chain, nb); } EXPORT_SYMBOL(netlink_unregister_notifier); static const struct proto_ops netlink_ops = { .family = PF_NETLINK, .owner = THIS_MODULE, .release = netlink_release, .bind = netlink_bind, .connect = netlink_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = netlink_getname, .poll = datagram_poll, .ioctl = sock_no_ioctl, .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = netlink_setsockopt, .getsockopt = netlink_getsockopt, .sendmsg = netlink_sendmsg, .recvmsg = netlink_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static const struct net_proto_family netlink_family_ops = { .family = PF_NETLINK, .create = netlink_create, .owner = THIS_MODULE, /* for consistency 8) */ }; static int __net_init netlink_net_init(struct net *net) { #ifdef CONFIG_PROC_FS if (!proc_net_fops_create(net, "netlink", 0, &netlink_seq_fops)) return -ENOMEM; #endif return 0; } static void __net_exit netlink_net_exit(struct net *net) { #ifdef CONFIG_PROC_FS proc_net_remove(net, "netlink"); #endif } static void __init netlink_add_usersock_entry(void) { struct listeners *listeners; int groups = 32; listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); if (!listeners) panic("netlink_add_usersock_entry: Cannot allocate listeners\n"); netlink_table_grab(); nl_table[NETLINK_USERSOCK].groups = groups; rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].registered = 1; nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; netlink_table_ungrab(); } static struct pernet_operations __net_initdata netlink_net_ops = { .init = netlink_net_init, .exit = netlink_net_exit, }; static int __init netlink_proto_init(void) { struct sk_buff *dummy_skb; int i; unsigned long limit; unsigned int order; int err = proto_register(&netlink_proto, 0); if (err != 0) goto out; BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)); nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); if (!nl_table) goto panic; if (totalram_pages >= (128 * 1024)) limit = totalram_pages >> (21 - PAGE_SHIFT); else limit = totalram_pages >> (23 - PAGE_SHIFT); order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; limit = (1UL << order) / sizeof(struct hlist_head); order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; for (i = 0; i < MAX_LINKS; i++) { struct nl_pid_hash *hash = &nl_table[i].hash; hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table)); if (!hash->table) { while (i-- > 0) nl_pid_hash_free(nl_table[i].hash.table, 1 * sizeof(*hash->table)); kfree(nl_table); goto panic; } hash->max_shift = order; hash->shift = 0; hash->mask = 0; hash->rehash_time = jiffies; } netlink_add_usersock_entry(); sock_register(&netlink_family_ops); register_pernet_subsys(&netlink_net_ops); /* The netlink device handler may be needed early. */ rtnetlink_init(); out: return err; panic: panic("netlink_init: Cannot allocate nl_table\n"); } core_initcall(netlink_proto_init);
./CrossVul/dataset_final_sorted/CWE-284/c/good_3852_0
crossvul-cpp_data_good_1571_5
/* Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #define CHECK_VERSION "2.5.1" #include "client_priv.h" #include "my_default.h" #include <m_ctype.h> #include <mysql_version.h> #include <mysqld_error.h> #include <sslopt-vars.h> #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ /* Exit codes */ #define EX_USAGE 1 #define EX_MYSQLERR 2 /* ALTER instead of repair. */ #define MAX_ALTER_STR_SIZE 128 * 1024 #define KEY_PARTITIONING_CHANGED_STR "KEY () partitioning changed" static MYSQL mysql_connection, *sock = 0; static my_bool opt_alldbs = 0, opt_check_only_changed = 0, opt_extended = 0, opt_compress = 0, opt_databases = 0, opt_fast = 0, opt_medium_check = 0, opt_quick = 0, opt_all_in_1 = 0, opt_silent = 0, opt_auto_repair = 0, ignore_errors = 0, tty_password= 0, opt_frm= 0, debug_info_flag= 0, debug_check_flag= 0, opt_fix_table_names= 0, opt_fix_db_names= 0, opt_upgrade= 0, opt_write_binlog= 1; static uint verbose = 0, opt_mysql_port=0; static int my_end_arg; static char * opt_mysql_unix_port = 0; static char *opt_password = 0, *current_user = 0, *default_charset= 0, *current_host= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static int first_error = 0; static char *opt_skip_database; DYNAMIC_ARRAY tables4repair, tables4rebuild, alter_table_cmds; #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static uint opt_protocol=0; static char *opt_bind_addr = NULL; enum operations { DO_CHECK=1, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE, DO_UPGRADE }; static struct my_option my_long_options[] = { {"all-databases", 'A', "Check all the databases. This is the same as --databases with all databases selected.", &opt_alldbs, &opt_alldbs, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"analyze", 'a', "Analyze given tables.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"all-in-1", '1', "Instead of issuing one query for each table, use one query per database, naming all tables in the database in a comma-separated list.", &opt_all_in_1, &opt_all_in_1, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"auto-repair", OPT_AUTO_REPAIR, "If a checked table is corrupted, automatically fix it. Repairing will be done after all tables have been checked, if corrupted ones were found.", &opt_auto_repair, &opt_auto_repair, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"check", 'c', "Check table for errors.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"check-only-changed", 'C', "Check only tables that have changed since last check or haven't been closed properly.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"check-upgrade", 'g', "Check tables for version-dependent changes. May be used with --auto-repair to correct tables requiring version-dependent updates.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"compress", OPT_COMPRESS, "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"databases", 'B', "Check several databases. Note the difference in usage; in this case no tables are given. All name arguments are regarded as database names.", &opt_databases, &opt_databases, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit.", 0, 0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fast",'F', "Check only tables that haven't been closed properly.", &opt_fast, &opt_fast, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"fix-db-names", OPT_FIX_DB_NAMES, "Fix database names.", &opt_fix_db_names, &opt_fix_db_names, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"fix-table-names", OPT_FIX_TABLE_NAMES, "Fix table names.", &opt_fix_table_names, &opt_fix_table_names, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Continue even if we get an SQL error.", &ignore_errors, &ignore_errors, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"extended", 'e', "If you are using this option with CHECK TABLE, it will ensure that the table is 100 percent consistent, but will take a long time. If you are using this option with REPAIR TABLE, it will force using old slow repair with keycache method, instead of much faster repair by sorting.", &opt_extended, &opt_extended, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host",'h', "Connect to host.", &current_host, &current_host, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"medium-check", 'm', "Faster than extended-check, but only finds 99.99 percent of all errors. Should be good enough for most cases.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"write-binlog", OPT_WRITE_BINLOG, "Log ANALYZE, OPTIMIZE and REPAIR TABLE commands. Use --skip-write-binlog " "when commands should not be sent to replication slaves.", &opt_write_binlog, &opt_write_binlog, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"optimize", 'o', "Optimize table.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given, it's solicited on the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"quick", 'q', "If you are using this option with CHECK TABLE, it prevents the check from scanning the rows to check for wrong links. This is the fastest check. If you are using this option with REPAIR TABLE, it will try to repair only the index tree. This is the fastest repair method for a table.", &opt_quick, &opt_quick, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"repair", 'r', "Can fix almost anything except unique keys that aren't unique.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"silent", 's', "Print only error messages.", &opt_silent, &opt_silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"skip_database", 0, "Don't process the database specified as argument", &opt_skip_database, &opt_skip_database, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"tables", OPT_TABLES, "Overrides option --databases (-B).", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"use-frm", OPT_FRM, "When used with REPAIR, get table structure from .frm file, so the table can be repaired even if .MYI header is corrupted.", &opt_frm, &opt_frm, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &current_user, &current_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print info about the various stages.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static const char *load_default_groups[] = { "mysqlcheck", "client", 0 }; static void print_version(void); static void usage(void); static int get_options(int *argc, char ***argv); static int process_all_databases(); static int process_databases(char **db_names); static int process_selected_tables(char *db, char **table_names, int tables); static int process_all_tables_in_db(char *database); static int process_one_db(char *database); static int use_db(char *database); static int handle_request_for_tables(char *tables, uint length); static int dbConnect(char *host, char *user,char *passwd); static void dbDisconnect(char *host); static void DBerror(MYSQL *mysql, const char *when); static void safe_exit(int error); static void print_result(); static uint fixed_name_length(const char *name); static char *fix_table_name(char *dest, char *src); int what_to_do = 0; static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, CHECK_VERSION, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); } /* print_version */ static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("This program can be used to CHECK (-c, -m, -C), REPAIR (-r), ANALYZE (-a),"); puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be"); puts("used at the same time. Not all options are supported by all storage engines."); puts("Please consult the MySQL manual for latest information about the"); puts("above. The options -c, -r, -a, and -o are exclusive to each other, which"); puts("means that the last option will be used, if several was specified.\n"); puts("The option -c will be used by default, if none was specified. You"); puts("can change the default behavior by making a symbolic link, or"); puts("copying this file somewhere with another name, the alternatives are:"); puts("mysqlrepair: The default option will be -r"); puts("mysqlanalyze: The default option will be -a"); puts("mysqloptimize: The default option will be -o\n"); printf("Usage: %s [OPTIONS] database [tables]\n", my_progname); printf("OR %s [OPTIONS] --databases DB1 [DB2 DB3...]\n", my_progname); printf("OR %s [OPTIONS] --all-databases\n", my_progname); print_defaults("my", load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } /* usage */ static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { int orig_what_to_do= what_to_do; switch(optid) { case 'a': what_to_do = DO_ANALYZE; break; case 'c': what_to_do = DO_CHECK; break; case 'C': what_to_do = DO_CHECK; opt_check_only_changed = 1; break; case 'I': /* Fall through */ case '?': usage(); exit(0); case 'm': what_to_do = DO_CHECK; opt_medium_check = 1; break; case 'o': what_to_do = DO_OPTIMIZE; break; case OPT_FIX_DB_NAMES: what_to_do= DO_UPGRADE; opt_databases= 1; break; case OPT_FIX_TABLE_NAMES: what_to_do= DO_UPGRADE; break; case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start = argument; my_free(opt_password); opt_password = my_strdup(PSI_NOT_INSTRUMENTED, argument, MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1] = 0; /* Cut length of argument */ tty_password= 0; } else tty_password = 1; break; case 'r': what_to_do = DO_REPAIR; break; case 'g': what_to_do= DO_CHECK; opt_upgrade= 1; break; case 'W': #ifdef _WIN32 opt_protocol = MYSQL_PROTOCOL_PIPE; #endif break; case '#': DBUG_PUSH(argument ? argument : "d:t:o"); debug_check_flag= 1; break; #include <sslopt-case.h> case OPT_TABLES: opt_databases = 0; break; case 'v': verbose++; break; case 'V': print_version(); exit(0); case OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; } if (orig_what_to_do && (what_to_do != orig_what_to_do)) { fprintf(stderr, "Error: %s doesn't support multiple contradicting commands.\n", my_progname); return 1; } return 0; } static int get_options(int *argc, char ***argv) { int ho_error; if (*argc == 1) { usage(); exit(0); } my_getopt_use_args_separator= TRUE; if ((ho_error= load_defaults("my", load_default_groups, argc, argv)) || (ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); my_getopt_use_args_separator= FALSE; if (!what_to_do) { size_t pnlen= strlen(my_progname); if (pnlen < 6) /* name too short */ what_to_do = DO_CHECK; else if (!strcmp("repair", my_progname + pnlen - 6)) what_to_do = DO_REPAIR; else if (!strcmp("analyze", my_progname + pnlen - 7)) what_to_do = DO_ANALYZE; else if (!strcmp("optimize", my_progname + pnlen - 8)) what_to_do = DO_OPTIMIZE; else what_to_do = DO_CHECK; } /* If there's no --default-character-set option given with --fix-table-name or --fix-db-name set the default character set to "utf8". */ if (!default_charset) { if (opt_fix_db_names || opt_fix_table_names) default_charset= (char*) "utf8"; else default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME; } if (strcmp(default_charset, MYSQL_AUTODETECT_CHARSET_NAME) && !get_charset_by_csname(default_charset, MY_CS_PRIMARY, MYF(MY_WME))) { printf("Unsupported character set: %s\n", default_charset); return 1; } if (*argc > 0 && opt_alldbs) { printf("You should give only options, no arguments at all, with option\n"); printf("--all-databases. Please see %s --help for more information.\n", my_progname); return 1; } if (*argc < 1 && !opt_alldbs) { printf("You forgot to give the arguments! Please see %s --help\n", my_progname); printf("for more information.\n"); return 1; } if (tty_password) opt_password = get_tty_password(NullS); if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; return(0); } /* get_options */ static int process_all_databases() { MYSQL_ROW row; MYSQL_RES *tableres; int result = 0; if (mysql_query(sock, "SHOW DATABASES") || !(tableres = mysql_store_result(sock))) { my_printf_error(0, "Error: Couldn't execute 'SHOW DATABASES': %s", MYF(0), mysql_error(sock)); return 1; } while ((row = mysql_fetch_row(tableres))) { if (process_one_db(row[0])) result = 1; } return result; } /* process_all_databases */ static int process_databases(char **db_names) { int result = 0; for ( ; *db_names ; db_names++) { if (process_one_db(*db_names)) result = 1; } return result; } /* process_databases */ static int process_selected_tables(char *db, char **table_names, int tables) { if (use_db(db)) return 1; if (opt_all_in_1 && what_to_do != DO_UPGRADE) { /* We need table list in form `a`, `b`, `c` that's why we need 2 more chars added to to each table name space is for more readable output in logs and in case of error */ char *table_names_comma_sep, *end; size_t tot_length= 0; int i= 0; for (i = 0; i < tables; i++) tot_length+= fixed_name_length(*(table_names + i)) + 2; if (!(table_names_comma_sep = (char *) my_malloc(PSI_NOT_INSTRUMENTED, (sizeof(char) * tot_length) + 4, MYF(MY_WME)))) return 1; for (end = table_names_comma_sep + 1; tables > 0; tables--, table_names++) { end= fix_table_name(end, *table_names); *end++= ','; } *--end = 0; handle_request_for_tables(table_names_comma_sep + 1, (uint) (tot_length - 1)); my_free(table_names_comma_sep); } else for (; tables > 0; tables--, table_names++) handle_request_for_tables(*table_names, fixed_name_length(*table_names)); return 0; } /* process_selected_tables */ static uint fixed_name_length(const char *name) { const char *p; uint extra_length= 2; /* count the first/last backticks */ for (p= name; *p; p++) { if (*p == '`') extra_length++; else if (*p == '.') extra_length+= 2; } return (uint) ((p - name) + extra_length); } static char *fix_table_name(char *dest, char *src) { *dest++= '`'; for (; *src; src++) { switch (*src) { case '.': /* add backticks around '.' */ *dest++= '`'; *dest++= '.'; *dest++= '`'; break; case '`': /* escape backtick character */ *dest++= '`'; /* fall through */ default: *dest++= *src; } } *dest++= '`'; return dest; } static int process_all_tables_in_db(char *database) { MYSQL_RES *res; MYSQL_ROW row; uint num_columns; LINT_INIT(res); if (use_db(database)) return 1; if ((mysql_query(sock, "SHOW /*!50002 FULL*/ TABLES") && mysql_query(sock, "SHOW TABLES")) || !(res= mysql_store_result(sock))) { my_printf_error(0, "Error: Couldn't get table list for database %s: %s", MYF(0), database, mysql_error(sock)); return 1; } num_columns= mysql_num_fields(res); if (opt_all_in_1 && what_to_do != DO_UPGRADE) { /* We need table list in form `a`, `b`, `c` that's why we need 2 more chars added to to each table name space is for more readable output in logs and in case of error */ char *tables, *end; uint tot_length = 0; while ((row = mysql_fetch_row(res))) tot_length+= fixed_name_length(row[0]) + 2; mysql_data_seek(res, 0); if (!(tables=(char *) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(char)*tot_length+4, MYF(MY_WME)))) { mysql_free_result(res); return 1; } for (end = tables + 1; (row = mysql_fetch_row(res)) ;) { if ((num_columns == 2) && (strcmp(row[1], "VIEW") == 0)) continue; end= fix_table_name(end, row[0]); *end++= ','; } *--end = 0; if (tot_length) handle_request_for_tables(tables + 1, tot_length - 1); my_free(tables); } else { while ((row = mysql_fetch_row(res))) { /* Skip views if we don't perform renaming. */ if ((what_to_do != DO_UPGRADE) && (num_columns == 2) && (strcmp(row[1], "VIEW") == 0)) continue; handle_request_for_tables(row[0], fixed_name_length(row[0])); } } mysql_free_result(res); return 0; } /* process_all_tables_in_db */ static int run_query(const char *query) { if (mysql_query(sock, query)) { fprintf(stderr, "Failed to %s\n", query); fprintf(stderr, "Error: %s\n", mysql_error(sock)); return 1; } return 0; } static int fix_table_storage_name(const char *name) { char qbuf[100 + NAME_LEN*4]; int rc= 0; if (strncmp(name, "#mysql50#", 9)) return 1; sprintf(qbuf, "RENAME TABLE `%s` TO `%s`", name, name + 9); rc= run_query(qbuf); if (verbose) printf("%-50s %s\n", name, rc ? "FAILED" : "OK"); return rc; } static int fix_database_storage_name(const char *name) { char qbuf[100 + NAME_LEN*4]; int rc= 0; if (strncmp(name, "#mysql50#", 9)) return 1; sprintf(qbuf, "ALTER DATABASE `%s` UPGRADE DATA DIRECTORY NAME", name); rc= run_query(qbuf); if (verbose) printf("%-50s %s\n", name, rc ? "FAILED" : "OK"); return rc; } static int rebuild_table(char *name) { char *query, *ptr; int rc= 0; query= (char*)my_malloc(PSI_NOT_INSTRUMENTED, sizeof(char) * (12 + fixed_name_length(name) + 6 + 1), MYF(MY_WME)); if (!query) return 1; ptr= my_stpcpy(query, "ALTER TABLE "); ptr= fix_table_name(ptr, name); ptr= strxmov(ptr, " FORCE", NullS); if (mysql_real_query(sock, query, (uint)(ptr - query))) { fprintf(stderr, "Failed to %s\n", query); fprintf(stderr, "Error: %s\n", mysql_error(sock)); rc= 1; } my_free(query); return rc; } static int process_one_db(char *database) { if (opt_skip_database && opt_alldbs && !strcmp(database, opt_skip_database)) return 0; if (what_to_do == DO_UPGRADE) { int rc= 0; if (opt_fix_db_names && !strncmp(database,"#mysql50#", 9)) { rc= fix_database_storage_name(database); database+= 9; } if (rc || !opt_fix_table_names) return rc; } return process_all_tables_in_db(database); } static int use_db(char *database) { if (mysql_get_server_version(sock) >= FIRST_INFORMATION_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, database, INFORMATION_SCHEMA_DB_NAME)) return 1; if (mysql_get_server_version(sock) >= FIRST_PERFORMANCE_SCHEMA_VERSION && !my_strcasecmp(&my_charset_latin1, database, PERFORMANCE_SCHEMA_DB_NAME)) return 1; if (mysql_select_db(sock, database)) { DBerror(sock, "when selecting the database"); return 1; } return 0; } /* use_db */ static int disable_binlog() { const char *stmt= "SET SQL_LOG_BIN=0"; return run_query(stmt); } static int handle_request_for_tables(char *tables, uint length) { char *query, *end, options[100], message[100]; uint query_length= 0; const char *op = 0; options[0] = 0; end = options; switch (what_to_do) { case DO_CHECK: op = "CHECK"; if (opt_quick) end = my_stpcpy(end, " QUICK"); if (opt_fast) end = my_stpcpy(end, " FAST"); if (opt_medium_check) end = my_stpcpy(end, " MEDIUM"); /* Default */ if (opt_extended) end = my_stpcpy(end, " EXTENDED"); if (opt_check_only_changed) end = my_stpcpy(end, " CHANGED"); if (opt_upgrade) end = my_stpcpy(end, " FOR UPGRADE"); break; case DO_REPAIR: op= (opt_write_binlog) ? "REPAIR" : "REPAIR NO_WRITE_TO_BINLOG"; if (opt_quick) end = my_stpcpy(end, " QUICK"); if (opt_extended) end = my_stpcpy(end, " EXTENDED"); if (opt_frm) end = my_stpcpy(end, " USE_FRM"); break; case DO_ANALYZE: op= (opt_write_binlog) ? "ANALYZE" : "ANALYZE NO_WRITE_TO_BINLOG"; break; case DO_OPTIMIZE: op= (opt_write_binlog) ? "OPTIMIZE" : "OPTIMIZE NO_WRITE_TO_BINLOG"; break; case DO_UPGRADE: return fix_table_storage_name(tables); } if (!(query =(char *) my_malloc(PSI_NOT_INSTRUMENTED, (sizeof(char)*(length+110)), MYF(MY_WME)))) return 1; if (opt_all_in_1) { /* No backticks here as we added them before */ query_length= sprintf(query, "%s TABLE %s %s", op, tables, options); } else { char *ptr; ptr= my_stpcpy(my_stpcpy(query, op), " TABLE "); ptr= fix_table_name(ptr, tables); ptr= strxmov(ptr, " ", options, NullS); query_length= (uint) (ptr - query); } if (mysql_real_query(sock, query, query_length)) { sprintf(message, "when executing '%s TABLE ... %s'", op, options); DBerror(sock, message); return 1; } print_result(); my_free(query); return 0; } static void print_result() { MYSQL_RES *res; MYSQL_ROW row; char prev[NAME_LEN*2+2]; char prev_alter[MAX_ALTER_STR_SIZE]; uint i; my_bool found_error=0, table_rebuild=0; res = mysql_use_result(sock); prev[0] = '\0'; prev_alter[0]= 0; for (i = 0; (row = mysql_fetch_row(res)); i++) { int changed = strcmp(prev, row[0]); my_bool status = !strcmp(row[2], "status"); if (status) { /* if there was an error with the table, we have --auto-repair set, and this isn't a repair op, then add the table to the tables4repair list */ if (found_error && opt_auto_repair && what_to_do != DO_REPAIR && strcmp(row[3],"OK")) { if (table_rebuild) { if (prev_alter[0]) insert_dynamic(&alter_table_cmds, (uchar*) prev_alter); else insert_dynamic(&tables4rebuild, (uchar*) prev); } else insert_dynamic(&tables4repair, prev); } found_error=0; table_rebuild=0; prev_alter[0]= 0; if (opt_silent) continue; } if (status && changed) printf("%-50s %s", row[0], row[3]); else if (!status && changed) { printf("%s\n%-9s: %s", row[0], row[2], row[3]); if (opt_auto_repair && strcmp(row[2],"note")) { const char *alter_txt= strstr(row[3], "ALTER TABLE"); found_error=1; if (alter_txt) { table_rebuild=1; if (!strncmp(row[3], KEY_PARTITIONING_CHANGED_STR, strlen(KEY_PARTITIONING_CHANGED_STR)) && strstr(alter_txt, "PARTITION BY")) { if (strlen(alter_txt) >= MAX_ALTER_STR_SIZE) { printf("Error: Alter command too long (>= %d)," " please do \"%s\" or dump/reload to fix it!\n", MAX_ALTER_STR_SIZE, alter_txt); table_rebuild= 0; prev_alter[0]= 0; } else strcpy(prev_alter, alter_txt); } } } } else printf("%-9s: %s", row[2], row[3]); my_stpcpy(prev, row[0]); putchar('\n'); } /* add the last table to be repaired to the list */ if (found_error && opt_auto_repair && what_to_do != DO_REPAIR) { if (table_rebuild) { if (prev_alter[0]) insert_dynamic(&alter_table_cmds, (uchar*) prev_alter); else insert_dynamic(&tables4rebuild, (uchar*) prev); } else insert_dynamic(&tables4repair, prev); } mysql_free_result(res); } static int dbConnect(char *host, char *user, char *passwd) { DBUG_ENTER("dbConnect"); if (verbose) { fprintf(stderr, "# Connecting to %s...\n", host ? host : "localhost"); } mysql_init(&mysql_connection); if (opt_compress) mysql_options(&mysql_connection, MYSQL_OPT_COMPRESS, NullS); SSL_SET_OPTIONS(&mysql_connection); if (opt_protocol) mysql_options(&mysql_connection,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(&mysql_connection, MYSQL_OPT_BIND, opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(&mysql_connection,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif if (opt_plugin_dir && *opt_plugin_dir) mysql_options(&mysql_connection, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(&mysql_connection, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(&mysql_connection, MYSQL_SET_CHARSET_NAME, default_charset); mysql_options(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(&mysql_connection, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqlcheck"); if (!(sock = mysql_real_connect(&mysql_connection, host, user, passwd, NULL, opt_mysql_port, opt_mysql_unix_port, 0))) { DBerror(&mysql_connection, "when trying to connect"); DBUG_RETURN(1); } mysql_connection.reconnect= 1; DBUG_RETURN(0); } /* dbConnect */ static void dbDisconnect(char *host) { if (verbose) fprintf(stderr, "# Disconnecting from %s...\n", host ? host : "localhost"); mysql_close(sock); } /* dbDisconnect */ static void DBerror(MYSQL *mysql, const char *when) { DBUG_ENTER("DBerror"); my_printf_error(0,"Got error: %d: %s %s", MYF(0), mysql_errno(mysql), mysql_error(mysql), when); safe_exit(EX_MYSQLERR); DBUG_VOID_RETURN; } /* DBerror */ static void safe_exit(int error) { if (!first_error) first_error= error; if (ignore_errors) return; if (sock) mysql_close(sock); exit(error); } int main(int argc, char **argv) { MY_INIT(argv[0]); /* ** Check out the args */ if (get_options(&argc, &argv)) { my_end(my_end_arg); exit(EX_USAGE); } if (dbConnect(current_host, current_user, opt_password)) exit(EX_MYSQLERR); if (!opt_write_binlog) { if (disable_binlog()) { first_error= 1; goto end; } } if (opt_auto_repair && (my_init_dynamic_array(&tables4repair, sizeof(char)*(NAME_LEN*2+2),16,64) || my_init_dynamic_array(&tables4rebuild, sizeof(char)*(NAME_LEN*2+2),16,64) || my_init_dynamic_array(&alter_table_cmds, MAX_ALTER_STR_SIZE, 0, 1))) { first_error = 1; goto end; } if (opt_alldbs) process_all_databases(); /* Only one database and selected table(s) */ else if (argc > 1 && !opt_databases) process_selected_tables(*argv, (argv + 1), (argc - 1)); /* One or more databases, all tables */ else process_databases(argv); if (opt_auto_repair) { uint i; if (!opt_silent && (tables4repair.elements || tables4rebuild.elements)) puts("\nRepairing tables"); what_to_do = DO_REPAIR; for (i = 0; i < tables4repair.elements ; i++) { char *name= (char*) dynamic_array_ptr(&tables4repair, i); handle_request_for_tables(name, fixed_name_length(name)); } for (i = 0; i < tables4rebuild.elements ; i++) rebuild_table((char*) dynamic_array_ptr(&tables4rebuild, i)); for (i = 0; i < alter_table_cmds.elements ; i++) run_query((char*) dynamic_array_ptr(&alter_table_cmds, i)); } end: dbDisconnect(current_host); if (opt_auto_repair) { delete_dynamic(&tables4repair); delete_dynamic(&tables4rebuild); } my_free(opt_password); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif my_end(my_end_arg); return(first_error!=0); } /* main */
./CrossVul/dataset_final_sorted/CWE-284/c/good_1571_5
crossvul-cpp_data_good_5092_0
/* * socket.c * * Copyright (c) 2012 Martin Szulecki All Rights Reserved. * Copyright (c) 2012 Nikias Bassen All Rights Reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <errno.h> #include <sys/time.h> #include <sys/stat.h> #ifdef WIN32 #include <winsock2.h> #include <windows.h> static int wsa_init = 0; #else #include <sys/socket.h> #include <sys/un.h> #include <netinet/in.h> #include <netdb.h> #include <arpa/inet.h> #endif #include "socket.h" #define RECV_TIMEOUT 20000 static int verbose = 0; void socket_set_verbose(int level) { verbose = level; } #ifndef WIN32 int socket_create_unix(const char *filename) { struct sockaddr_un name; int sock; size_t size; // remove if still present unlink(filename); /* Create the socket. */ sock = socket(PF_LOCAL, SOCK_STREAM, 0); if (sock < 0) { perror("socket"); return -1; } /* Bind a name to the socket. */ name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = '\0'; /* The size of the address is the offset of the start of the filename, plus its length, plus one for the terminating null byte. Alternatively you can just do: size = SUN_LEN (&name); */ size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (bind(sock, (struct sockaddr *) &name, size) < 0) { perror("bind"); socket_close(sock); return -1; } if (listen(sock, 10) < 0) { perror("listen"); socket_close(sock); return -1; } return sock; } int socket_connect_unix(const char *filename) { struct sockaddr_un name; int sfd = -1; size_t size; struct stat fst; // check if socket file exists... if (stat(filename, &fst) != 0) { if (verbose >= 2) fprintf(stderr, "%s: stat '%s': %s\n", __func__, filename, strerror(errno)); return -1; } // ... and if it is a unix domain socket if (!S_ISSOCK(fst.st_mode)) { if (verbose >= 2) fprintf(stderr, "%s: File '%s' is not a socket!\n", __func__, filename); return -1; } // make a new socket if ((sfd = socket(PF_LOCAL, SOCK_STREAM, 0)) < 0) { if (verbose >= 2) fprintf(stderr, "%s: socket: %s\n", __func__, strerror(errno)); return -1; } // and connect to 'filename' name.sun_family = AF_LOCAL; strncpy(name.sun_path, filename, sizeof(name.sun_path)); name.sun_path[sizeof(name.sun_path) - 1] = 0; size = (offsetof(struct sockaddr_un, sun_path) + strlen(name.sun_path) + 1); if (connect(sfd, (struct sockaddr *) &name, size) < 0) { socket_close(sfd); if (verbose >= 2) fprintf(stderr, "%s: connect: %s\n", __func__, strerror(errno)); return -1; } return sfd; } #endif int socket_create(uint16_t port) { int sfd = -1; int yes = 1; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif struct sockaddr_in saddr; if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); saddr.sin_port = htons(port); if (0 > bind(sfd, (struct sockaddr *) &saddr, sizeof(saddr))) { perror("bind()"); socket_close(sfd); return -1; } if (listen(sfd, 1) == -1) { perror("listen()"); socket_close(sfd); return -1; } return sfd; } int socket_connect(const char *addr, uint16_t port) { int sfd = -1; int yes = 1; struct hostent *hp; struct sockaddr_in saddr; #ifdef WIN32 WSADATA wsa_data; if (!wsa_init) { if (WSAStartup(MAKEWORD(2,2), &wsa_data) != ERROR_SUCCESS) { fprintf(stderr, "WSAStartup failed!\n"); ExitProcess(-1); } wsa_init = 1; } #endif if (!addr) { errno = EINVAL; return -1; } if ((hp = gethostbyname(addr)) == NULL) { if (verbose >= 2) fprintf(stderr, "%s: unknown host '%s'\n", __func__, addr); return -1; } if (!hp->h_addr) { if (verbose >= 2) fprintf(stderr, "%s: gethostbyname returned NULL address!\n", __func__); return -1; } if (0 > (sfd = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP))) { perror("socket()"); return -1; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, (void*)&yes, sizeof(int)) == -1) { perror("setsockopt()"); socket_close(sfd); return -1; } memset((void *) &saddr, 0, sizeof(saddr)); saddr.sin_family = AF_INET; saddr.sin_addr.s_addr = *(uint32_t *) hp->h_addr; saddr.sin_port = htons(port); if (connect(sfd, (struct sockaddr *) &saddr, sizeof(saddr)) < 0) { perror("connect"); socket_close(sfd); return -2; } return sfd; } int socket_check_fd(int fd, fd_mode fdm, unsigned int timeout) { fd_set fds; int sret; int eagain; struct timeval to; struct timeval *pto; if (fd <= 0) { if (verbose >= 2) fprintf(stderr, "ERROR: invalid fd in check_fd %d\n", fd); return -1; } FD_ZERO(&fds); FD_SET(fd, &fds); if (timeout > 0) { to.tv_sec = (time_t) (timeout / 1000); to.tv_usec = (time_t) ((timeout - (to.tv_sec * 1000)) * 1000); pto = &to; } else { pto = NULL; } sret = -1; do { eagain = 0; switch (fdm) { case FDM_READ: sret = select(fd + 1, &fds, NULL, NULL, pto); break; case FDM_WRITE: sret = select(fd + 1, NULL, &fds, NULL, pto); break; case FDM_EXCEPT: sret = select(fd + 1, NULL, NULL, &fds, pto); break; default: return -1; } if (sret < 0) { switch (errno) { case EINTR: // interrupt signal in select if (verbose >= 2) fprintf(stderr, "%s: EINTR\n", __func__); eagain = 1; break; case EAGAIN: if (verbose >= 2) fprintf(stderr, "%s: EAGAIN\n", __func__); break; default: if (verbose >= 2) fprintf(stderr, "%s: select failed: %s\n", __func__, strerror(errno)); return -1; } } } while (eagain); return sret; } int socket_accept(int fd, uint16_t port) { #ifdef WIN32 int addr_len; #else socklen_t addr_len; #endif int result; struct sockaddr_in addr; memset(&addr, 0, sizeof(addr)); addr.sin_family = AF_INET; addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK); addr.sin_port = htons(port); addr_len = sizeof(addr); result = accept(fd, (struct sockaddr*)&addr, &addr_len); return result; } int socket_shutdown(int fd, int how) { return shutdown(fd, how); } int socket_close(int fd) { #ifdef WIN32 return closesocket(fd); #else return close(fd); #endif } int socket_receive(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, 0, RECV_TIMEOUT); } int socket_peek(int fd, void *data, size_t length) { return socket_receive_timeout(fd, data, length, MSG_PEEK, RECV_TIMEOUT); } int socket_receive_timeout(int fd, void *data, size_t length, int flags, unsigned int timeout) { int res; int result; // check if data is available res = socket_check_fd(fd, FDM_READ, timeout); if (res <= 0) { return res; } // if we get here, there _is_ data available result = recv(fd, data, length, flags); if (res > 0 && result == 0) { // but this is an error condition if (verbose >= 3) fprintf(stderr, "%s: fd=%d recv returned 0\n", __func__, fd); return -EAGAIN; } if (result < 0) { return -errno; } return result; } int socket_send(int fd, void *data, size_t length) { return send(fd, data, length, 0); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5092_0
crossvul-cpp_data_good_4896_0
/* * Process version 2 NFSACL requests. * * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> */ #include "nfsd.h" /* FIXME: nfsacl.h is a broken header */ #include <linux/nfsacl.h> #include <linux/gfp.h> #include "cache.h" #include "xdr3.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC #define RETURN_STATUS(st) { resp->status = (st); return (st); } /* * NULL call. */ static __be32 nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { return nfs_ok; } /* * Get the Access and/or Default ACL of a file. */ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp, struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp) { struct posix_acl *acl; struct inode *inode; svc_fh *fh; __be32 nfserr = 0; dprintk("nfsd: GETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); if (nfserr) RETURN_STATUS(nfserr); inode = d_inode(fh->fh_dentry); if (argp->mask & ~NFS_ACL_MASK) RETURN_STATUS(nfserr_inval); resp->mask = argp->mask; nfserr = fh_getattr(fh, &resp->stat); if (nfserr) RETURN_STATUS(nfserr); if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { acl = get_acl(inode, ACL_TYPE_ACCESS); if (acl == NULL) { /* Solaris returns the inode's minimum ACL. */ acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); } if (IS_ERR(acl)) { nfserr = nfserrno(PTR_ERR(acl)); goto fail; } resp->acl_access = acl; } if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { /* Check how Solaris handles requests for the Default ACL of a non-directory! */ acl = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) { nfserr = nfserrno(PTR_ERR(acl)); goto fail; } resp->acl_default = acl; } /* resp->acl_{access,default} are released in nfssvc_release_getacl. */ RETURN_STATUS(0); fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); RETURN_STATUS(nfserr); } /* * Set the Access and/or Default ACL of a file. */ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, struct nfsd3_setaclargs *argp, struct nfsd_attrstat *resp) { struct inode *inode; svc_fh *fh; __be32 nfserr = 0; int error; dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR); if (nfserr) goto out; inode = d_inode(fh->fh_dentry); error = fh_want_write(fh); if (error) goto out_errno; fh_lock(fh); error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access); if (error) goto out_drop_lock; error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default); if (error) goto out_drop_lock; fh_unlock(fh); fh_drop_write(fh); nfserr = fh_getattr(fh, &resp->stat); out: /* argp->acl_{access,default} may have been allocated in nfssvc_decode_setaclargs. */ posix_acl_release(argp->acl_access); posix_acl_release(argp->acl_default); return nfserr; out_drop_lock: fh_unlock(fh); fh_drop_write(fh); out_errno: nfserr = nfserrno(error); goto out; } /* * Check file attributes */ static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, struct nfsd_attrstat *resp) { __be32 nfserr; dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh)); fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); if (nfserr) return nfserr; nfserr = fh_getattr(&resp->fh, &resp->stat); return nfserr; } /* * Check file access */ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp, struct nfsd3_accessres *resp) { __be32 nfserr; dprintk("nfsd: ACCESS(2acl) %s 0x%x\n", SVCFH_fmt(&argp->fh), argp->access); fh_copy(&resp->fh, &argp->fh); resp->access = argp->access; nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL); if (nfserr) return nfserr; nfserr = fh_getattr(&resp->fh, &resp->stat); return nfserr; } /* * XDR decode functions */ static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclargs *argp) { p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; argp->mask = ntohl(*p); p++; return xdr_argsize_check(rqstp, p); } static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_setaclargs *argp) { struct kvec *head = rqstp->rq_arg.head; unsigned int base; int n; p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; argp->mask = ntohl(*p++); if (argp->mask & ~NFS_ACL_MASK || !xdr_argsize_check(rqstp, p)) return 0; base = (char *)p - (char *)head->iov_base; n = nfsacl_decode(&rqstp->rq_arg, base, NULL, (argp->mask & NFS_ACL) ? &argp->acl_access : NULL); if (n > 0) n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, (argp->mask & NFS_DFACL) ? &argp->acl_default : NULL); return (n > 0); } static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *argp) { p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; return xdr_argsize_check(rqstp, p); } static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessargs *argp) { p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; argp->access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ /* * There must be an encoding function for void results so svc_process * will work properly. */ static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } /* GETACL */ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { struct dentry *dentry = resp->fh.fh_dentry; struct inode *inode; struct kvec *head = rqstp->rq_res.head; unsigned int base; int n; int w; /* * Since this is version 2, the check for nfserr in * nfsd_dispatch actually ensures the following cannot happen. * However, it seems fragile to depend on that. */ if (dentry == NULL || d_really_is_negative(dentry)) return 0; inode = d_inode(dentry); p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->mask); if (!xdr_ressize_check(rqstp, p)) return 0; base = (char *)p - (char *)head->iov_base; rqstp->rq_res.page_len = w = nfsacl_size( (resp->mask & NFS_ACL) ? resp->acl_access : NULL, (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); while (w > 0) { if (!*(rqstp->rq_next_page++)) return 0; w -= PAGE_SIZE; } n = nfsacl_encode(&rqstp->rq_res, base, inode, resp->acl_access, resp->mask & NFS_ACL, 0); if (n > 0) n = nfsacl_encode(&rqstp->rq_res, base + n, inode, resp->acl_default, resp->mask & NFS_DFACL, NFS_ACL_DEFAULT); return (n > 0); } static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } /* ACCESS */ static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->access); return xdr_ressize_check(rqstp, p); } /* * XDR release functions */ static int nfsaclsvc_release_getacl(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { fh_put(&resp->fh); posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); return 1; } static int nfsaclsvc_release_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { fh_put(&resp->fh); return 1; } static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { fh_put(&resp->fh); return 1; } #define nfsaclsvc_decode_voidargs NULL #define nfsaclsvc_release_void NULL #define nfsd3_fhandleargs nfsd_fhandle #define nfsd3_attrstatres nfsd_attrstat #define nfsd3_voidres nfsd3_voidargs struct nfsd3_voidargs { int dummy; }; #define PROC(name, argt, rest, relt, cache, respsize) \ { (svc_procfunc) nfsacld_proc_##name, \ (kxdrproc_t) nfsaclsvc_decode_##argt##args, \ (kxdrproc_t) nfsaclsvc_encode_##rest##res, \ (kxdrproc_t) nfsaclsvc_release_##relt, \ sizeof(struct nfsd3_##argt##args), \ sizeof(struct nfsd3_##rest##res), \ 0, \ cache, \ respsize, \ } #define ST 1 /* status*/ #define AT 21 /* attributes */ #define pAT (1+AT) /* post attributes - conditional */ #define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */ static struct svc_procedure nfsd_acl_procedures2[] = { PROC(null, void, void, void, RC_NOCACHE, ST), PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)), PROC(setacl, setacl, attrstat, attrstat, RC_NOCACHE, ST+AT), PROC(getattr, fhandle, attrstat, attrstat, RC_NOCACHE, ST+AT), PROC(access, access, access, access, RC_NOCACHE, ST+AT+1), }; struct svc_version nfsd_acl_version2 = { .vs_vers = 2, .vs_nproc = 5, .vs_proc = nfsd_acl_procedures2, .vs_dispatch = nfsd_dispatch, .vs_xdrsize = NFS3_SVC_XDRSIZE, .vs_hidden = 0, };
./CrossVul/dataset_final_sorted/CWE-284/c/good_4896_0
crossvul-cpp_data_good_880_3
/* * Copyright (C) 2014-2019 Firejail Authors * * This file is part of firejail project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "firejail.h" #include <sys/mount.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/prctl.h> #include <sys/time.h> #include <sys/resource.h> #include <sys/types.h> #include <dirent.h> #include <errno.h> #include <fcntl.h> #include <sched.h> #ifndef CLONE_NEWUSER #define CLONE_NEWUSER 0x10000000 #endif #include <sys/prctl.h> #ifndef PR_SET_NO_NEW_PRIVS # define PR_SET_NO_NEW_PRIVS 38 #endif #ifndef PR_GET_NO_NEW_PRIVS # define PR_GET_NO_NEW_PRIVS 39 #endif #ifdef HAVE_APPARMOR #include <sys/apparmor.h> #endif #include <syscall.h> static int force_nonewprivs = 0; static int monitored_pid = 0; static void sandbox_handler(int sig){ usleep(10000); // don't race to print a message fmessage("\nChild received signal %d, shutting down the sandbox...\n", sig); // broadcast sigterm to all processes in the group kill(-1, SIGTERM); sleep(1); if (monitored_pid) { int monsec = 9; char *monfile; if (asprintf(&monfile, "/proc/%d/cmdline", monitored_pid) == -1) errExit("asprintf"); while (monsec) { FILE *fp = fopen(monfile, "r"); if (!fp) break; char c; size_t count = fread(&c, 1, 1, fp); fclose(fp); if (count == 0) break; if (arg_debug) printf("Waiting on PID %d to finish\n", monitored_pid); sleep(1); monsec--; } free(monfile); } // broadcast a SIGKILL kill(-1, SIGKILL); flush_stdin(); exit(sig); } static void install_handler(void) { struct sigaction sga; // block SIGTERM while handling SIGINT sigemptyset(&sga.sa_mask); sigaddset(&sga.sa_mask, SIGTERM); sga.sa_handler = sandbox_handler; sga.sa_flags = 0; sigaction(SIGINT, &sga, NULL); // block SIGINT while handling SIGTERM sigemptyset(&sga.sa_mask); sigaddset(&sga.sa_mask, SIGINT); sga.sa_handler = sandbox_handler; sga.sa_flags = 0; sigaction(SIGTERM, &sga, NULL); } static void set_caps(void) { if (arg_caps_drop_all) caps_drop_all(); else if (arg_caps_drop) caps_drop_list(arg_caps_list); else if (arg_caps_keep) caps_keep_list(arg_caps_list); else if (arg_caps_default_filter) caps_default_filter(); // drop discretionary access control capabilities for root sandboxes // if caps.keep, the user has to set it manually in the list if (!arg_caps_keep) caps_drop_dac_override(); } static void save_nogroups(void) { if (arg_nogroups == 0) return; FILE *fp = fopen(RUN_GROUPS_CFG, "w"); if (fp) { fprintf(fp, "\n"); SET_PERMS_STREAM(fp, 0, 0, 0644); // assume mode 0644 fclose(fp); } else { fprintf(stderr, "Error: cannot save nogroups state\n"); exit(1); } } static void save_nonewprivs(void) { if (arg_nonewprivs == 0) return; FILE *fp = fopen(RUN_NONEWPRIVS_CFG, "wxe"); if (fp) { fprintf(fp, "\n"); SET_PERMS_STREAM(fp, 0, 0, 0644); // assume mode 0644 fclose(fp); } else { fprintf(stderr, "Error: cannot save nonewprivs state\n"); exit(1); } } static void save_umask(void) { FILE *fp = fopen(RUN_UMASK_FILE, "wxe"); if (fp) { fprintf(fp, "%o\n", orig_umask); SET_PERMS_STREAM(fp, 0, 0, 0644); // assume mode 0644 fclose(fp); } else { fprintf(stderr, "Error: cannot save umask\n"); exit(1); } } static FILE *create_ready_for_join_file(void) { FILE *fp = fopen(RUN_READY_FOR_JOIN, "wxe"); if (fp) { ASSERT_PERMS_STREAM(fp, 0, 0, 0644); return fp; } else { fprintf(stderr, "Error: cannot create %s\n", RUN_READY_FOR_JOIN); exit(1); } } static void sandbox_if_up(Bridge *br) { assert(br); if (!br->configured) return; char *dev = br->devsandbox; net_if_up(dev); if (br->arg_ip_none == 1); // do nothing else if (br->arg_ip_none == 0 && br->macvlan == 0) { if (br->ipsandbox == br->ip) { fprintf(stderr, "Error: %d.%d.%d.%d is interface %s address.\n", PRINT_IP(br->ipsandbox), br->dev); exit(1); } // just assign the address assert(br->ipsandbox); if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(br->ipsandbox), dev); net_config_interface(dev, br->ipsandbox, br->mask, br->mtu); arp_announce(dev, br); } else if (br->arg_ip_none == 0 && br->macvlan == 1) { // reassign the macvlan address if (br->ipsandbox == 0) // ip address assigned by arp-scan for a macvlan device br->ipsandbox = arp_assign(dev, br); //br->ip, br->mask); else { if (br->ipsandbox == br->ip) { fprintf(stderr, "Error: %d.%d.%d.%d is interface %s address.\n", PRINT_IP(br->ipsandbox), br->dev); exit(1); } uint32_t rv = arp_check(dev, br->ipsandbox); if (rv) { fprintf(stderr, "Error: the address %d.%d.%d.%d is already in use.\n", PRINT_IP(br->ipsandbox)); exit(1); } } if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(br->ipsandbox), dev); net_config_interface(dev, br->ipsandbox, br->mask, br->mtu); arp_announce(dev, br); } if (br->ip6sandbox) net_if_ip6(dev, br->ip6sandbox); } static void chk_chroot(void) { // if we are starting firejail inside some other container technology, we don't care about this char *mycont = getenv("container"); if (mycont) return; // check if this is a regular chroot struct stat s; if (stat("/", &s) == 0) { if (s.st_ino != 2) return; } fprintf(stderr, "Error: cannot mount filesystem as slave\n"); exit(1); } static int monitor_application(pid_t app_pid) { EUID_ASSERT(); monitored_pid = app_pid; // block signals and install handler sigset_t oldmask, newmask; sigemptyset(&oldmask); sigemptyset(&newmask); sigaddset(&newmask, SIGTERM); sigaddset(&newmask, SIGINT); sigprocmask(SIG_BLOCK, &newmask, &oldmask); install_handler(); // handle --timeout int options = 0;; unsigned timeout = 0; if (cfg.timeout) { options = WNOHANG; timeout = cfg.timeout; } int status = 0; while (monitored_pid) { usleep(20000); char *msg; if (asprintf(&msg, "monitoring pid %d\n", monitored_pid) == -1) errExit("asprintf"); logmsg(msg); if (arg_debug) printf("%s\n", msg); free(msg); pid_t rv; do { // handle signals asynchronously sigprocmask(SIG_SETMASK, &oldmask, NULL); rv = waitpid(-1, &status, options); // block signals again sigprocmask(SIG_BLOCK, &newmask, NULL); if (rv == -1) { // we can get here if we have processes joining the sandbox (ECHILD) sleep(1); break; } // handle --timeout if (options) { if (--timeout == 0) { kill(-1, SIGTERM); sleep(1); flush_stdin(); _exit(1); } else sleep(1); } } while(rv != monitored_pid); if (arg_debug) printf("Sandbox monitor: waitpid %d retval %d status %d\n", monitored_pid, rv, status); DIR *dir; if (!(dir = opendir("/proc"))) { // sleep 2 seconds and try again sleep(2); if (!(dir = opendir("/proc"))) { fprintf(stderr, "Error: cannot open /proc directory\n"); exit(1); } } struct dirent *entry; monitored_pid = 0; while ((entry = readdir(dir)) != NULL) { unsigned pid; if (sscanf(entry->d_name, "%u", &pid) != 1) continue; if (pid == 1) continue; // todo: make this generic // Dillo browser leaves a dpid process running, we need to shut it down int found = 0; if (strcmp(cfg.command_name, "dillo") == 0) { char *pidname = pid_proc_comm(pid); if (pidname && strcmp(pidname, "dpid") == 0) found = 1; free(pidname); } if (found) break; monitored_pid = pid; break; } closedir(dir); if (monitored_pid != 0 && arg_debug) printf("Sandbox monitor: monitoring %d\n", monitored_pid); } // return the latest exit status. return status; } static void print_time(void) { if (start_timestamp) { unsigned long long end_timestamp = getticks(); // measure 1 ms usleep(1000); unsigned long long onems = getticks() - end_timestamp; if (onems) { fmessage("Child process initialized in %.02f ms\n", (float) (end_timestamp - start_timestamp) / (float) onems); return; } } fmessage("Child process initialized\n"); } // check execute permissions for the program // this is done typically by the shell // we are here because of --shell=none // we duplicate execvp functionality (man execvp): // [...] if the specified // filename does not contain a slash (/) character. The file is sought // in the colon-separated list of directory pathnames specified in the // PATH environment variable. static int ok_to_run(const char *program) { if (strstr(program, "/")) { if (access(program, X_OK) == 0) // it will also dereference symlinks return 1; } else { // search $PATH char *path1 = getenv("PATH"); if (path1) { if (arg_debug) printf("Searching $PATH for %s\n", program); char *path2 = strdup(path1); if (!path2) errExit("strdup"); // use path2 to count the entries char *ptr = strtok(path2, ":"); while (ptr) { char *fname; if (asprintf(&fname, "%s/%s", ptr, program) == -1) errExit("asprintf"); if (arg_debug) printf("trying #%s#\n", fname); struct stat s; int rv = stat(fname, &s); if (rv == 0) { if (access(fname, X_OK) == 0) { free(path2); free(fname); return 1; } else fprintf(stderr, "Error: execute permission denied for %s\n", fname); free(fname); break; } free(fname); ptr = strtok(NULL, ":"); } free(path2); } } return 0; } void start_application(int no_sandbox, FILE *fp) { // set environment if (no_sandbox == 0) { env_defaults(); env_apply(); } // restore original umask umask(orig_umask); if (arg_debug) { printf("starting application\n"); printf("LD_PRELOAD=%s\n", getenv("LD_PRELOAD")); } //**************************************** // audit //**************************************** if (arg_audit) { assert(arg_audit_prog); if (fp) { fprintf(fp, "ready\n"); fclose(fp); } #ifdef HAVE_GCOV __gcov_dump(); #endif #ifdef HAVE_SECCOMP seccomp_install_filters(); #endif execl(arg_audit_prog, arg_audit_prog, NULL); perror("execl"); exit(1); } //**************************************** // start the program without using a shell //**************************************** else if (arg_shell_none) { if (arg_debug) { int i; for (i = cfg.original_program_index; i < cfg.original_argc; i++) { if (cfg.original_argv[i] == NULL) break; printf("execvp argument %d: %s\n", i - cfg.original_program_index, cfg.original_argv[i]); } } if (cfg.original_program_index == 0) { fprintf(stderr, "Error: --shell=none configured, but no program specified\n"); exit(1); } if (!arg_command && !arg_quiet) print_time(); int rv = ok_to_run(cfg.original_argv[cfg.original_program_index]); if (fp) { fprintf(fp, "ready\n"); fclose(fp); } #ifdef HAVE_GCOV __gcov_dump(); #endif #ifdef HAVE_SECCOMP seccomp_install_filters(); #endif if (rv) execvp(cfg.original_argv[cfg.original_program_index], &cfg.original_argv[cfg.original_program_index]); else fprintf(stderr, "Error: no suitable %s executable found\n", cfg.original_argv[cfg.original_program_index]); exit(1); } //**************************************** // start the program using a shell //**************************************** else { assert(cfg.shell); assert(cfg.command_line); char *arg[5]; int index = 0; arg[index++] = cfg.shell; if (login_shell) { arg[index++] = "-l"; if (arg_debug) printf("Starting %s login shell\n", cfg.shell); } else { arg[index++] = "-c"; if (arg_debug) printf("Running %s command through %s\n", cfg.command_line, cfg.shell); if (arg_doubledash) arg[index++] = "--"; arg[index++] = cfg.command_line; } arg[index] = NULL; assert(index < 5); if (arg_debug) { char *msg; if (asprintf(&msg, "sandbox %d, execvp into %s", sandbox_pid, cfg.command_line) == -1) errExit("asprintf"); logmsg(msg); free(msg); } if (arg_debug) { int i; for (i = 0; i < 5; i++) { if (arg[i] == NULL) break; printf("execvp argument %d: %s\n", i, arg[i]); } } if (!arg_command && !arg_quiet) print_time(); if (fp) { fprintf(fp, "ready\n"); fclose(fp); } #ifdef HAVE_GCOV __gcov_dump(); #endif #ifdef HAVE_SECCOMP seccomp_install_filters(); #endif execvp(arg[0], arg); } perror("execvp"); exit(1); // it should never get here!!! } static void enforce_filters(void) { // enforce NO_NEW_PRIVS arg_nonewprivs = 1; force_nonewprivs = 1; // disable all capabilities fmessage("\n** Warning: dropping all Linux capabilities **\n"); arg_caps_drop_all = 1; // drop all supplementary groups; /etc/group file inside chroot // is controlled by a regular usr arg_nogroups = 1; } int sandbox(void* sandbox_arg) { // Get rid of unused parameter warning (void)sandbox_arg; pid_t child_pid = getpid(); if (arg_debug) printf("Initializing child process\n"); // close each end of the unused pipes close(parent_to_child_fds[1]); close(child_to_parent_fds[0]); // wait for parent to do base setup wait_for_other(parent_to_child_fds[0]); if (arg_debug && child_pid == 1) printf("PID namespace installed\n"); //**************************** // set hostname //**************************** if (cfg.hostname) { if (sethostname(cfg.hostname, strlen(cfg.hostname)) < 0) errExit("sethostname"); } //**************************** // mount namespace //**************************** // mount events are not forwarded between the host the sandbox if (mount(NULL, "/", NULL, MS_SLAVE | MS_REC, NULL) < 0) { chk_chroot(); } // ... and mount a tmpfs on top of /run/firejail/mnt directory preproc_mount_mnt_dir(); // bind-mount firejail binaries and helper programs if (mount(LIBDIR "/firejail", RUN_FIREJAIL_LIB_DIR, "none", MS_BIND, NULL) < 0) errExit("mounting " RUN_FIREJAIL_LIB_DIR); //**************************** // log sandbox data //**************************** if (cfg.name) fs_logger2("sandbox name:", cfg.name); fs_logger2int("sandbox pid:", (int) sandbox_pid); if (cfg.chrootdir) fs_logger("sandbox filesystem: chroot"); else if (arg_overlay) fs_logger("sandbox filesystem: overlay"); else fs_logger("sandbox filesystem: local"); fs_logger("install mount namespace"); //**************************** // netfilter //**************************** if (arg_netfilter && any_bridge_configured()) { // assuming by default the client filter netfilter(arg_netfilter_file); } if (arg_netfilter6 && any_bridge_configured()) { // assuming by default the client filter netfilter6(arg_netfilter6_file); } //**************************** // networking //**************************** int gw_cfg_failed = 0; // default gw configuration flag if (arg_nonetwork) { net_if_up("lo"); if (arg_debug) printf("Network namespace enabled, only loopback interface available\n"); } else if (arg_netns) { netns(arg_netns); if (arg_debug) printf("Network namespace '%s' activated\n", arg_netns); } else if (any_bridge_configured() || any_interface_configured()) { // configure lo and eth0...eth3 net_if_up("lo"); if (mac_not_zero(cfg.bridge0.macsandbox)) net_config_mac(cfg.bridge0.devsandbox, cfg.bridge0.macsandbox); sandbox_if_up(&cfg.bridge0); if (mac_not_zero(cfg.bridge1.macsandbox)) net_config_mac(cfg.bridge1.devsandbox, cfg.bridge1.macsandbox); sandbox_if_up(&cfg.bridge1); if (mac_not_zero(cfg.bridge2.macsandbox)) net_config_mac(cfg.bridge2.devsandbox, cfg.bridge2.macsandbox); sandbox_if_up(&cfg.bridge2); if (mac_not_zero(cfg.bridge3.macsandbox)) net_config_mac(cfg.bridge3.devsandbox, cfg.bridge3.macsandbox); sandbox_if_up(&cfg.bridge3); // moving an interface in a namespace using --interface will reset the interface configuration; // we need to put the configuration back if (cfg.interface0.configured && cfg.interface0.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface0.ip), cfg.interface0.dev); net_config_interface(cfg.interface0.dev, cfg.interface0.ip, cfg.interface0.mask, cfg.interface0.mtu); } if (cfg.interface1.configured && cfg.interface1.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface1.ip), cfg.interface1.dev); net_config_interface(cfg.interface1.dev, cfg.interface1.ip, cfg.interface1.mask, cfg.interface1.mtu); } if (cfg.interface2.configured && cfg.interface2.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface2.ip), cfg.interface2.dev); net_config_interface(cfg.interface2.dev, cfg.interface2.ip, cfg.interface2.mask, cfg.interface2.mtu); } if (cfg.interface3.configured && cfg.interface3.ip) { if (arg_debug) printf("Configuring %d.%d.%d.%d address on interface %s\n", PRINT_IP(cfg.interface3.ip), cfg.interface3.dev); net_config_interface(cfg.interface3.dev, cfg.interface3.ip, cfg.interface3.mask, cfg.interface3.mtu); } // add a default route if (cfg.defaultgw) { // set the default route if (net_add_route(0, 0, cfg.defaultgw)) { fwarning("cannot configure default route\n"); gw_cfg_failed = 1; } } if (arg_debug) printf("Network namespace enabled\n"); } // print network configuration if (!arg_quiet) { if (any_bridge_configured() || any_interface_configured() || cfg.defaultgw || cfg.dns1) { fmessage("\n"); if (any_bridge_configured() || any_interface_configured()) { if (arg_scan) sbox_run(SBOX_ROOT | SBOX_CAPS_NETWORK | SBOX_SECCOMP, 3, PATH_FNET, "printif", "scan"); else sbox_run(SBOX_ROOT | SBOX_CAPS_NETWORK | SBOX_SECCOMP, 2, PATH_FNET, "printif"); } if (cfg.defaultgw != 0) { if (gw_cfg_failed) fmessage("Default gateway configuration failed\n"); else fmessage("Default gateway %d.%d.%d.%d\n", PRINT_IP(cfg.defaultgw)); } if (cfg.dns1 != NULL) fmessage("DNS server %s\n", cfg.dns1); if (cfg.dns2 != NULL) fmessage("DNS server %s\n", cfg.dns2); if (cfg.dns3 != NULL) fmessage("DNS server %s\n", cfg.dns3); if (cfg.dns4 != NULL) fmessage("DNS server %s\n", cfg.dns4); fmessage("\n"); } } // load IBUS env variables if (arg_nonetwork || any_bridge_configured() || any_interface_configured()) { // do nothing - there are problems with ibus version 1.5.11 } else { EUID_USER(); env_ibus_load(); EUID_ROOT(); } //**************************** // fs pre-processing: // - build seccomp filters // - create an empty /etc/ld.so.preload //**************************** #ifdef HAVE_SECCOMP if (cfg.protocol) { if (arg_debug) printf("Build protocol filter: %s\n", cfg.protocol); // build the seccomp filter as a regular user int rv = sbox_run(SBOX_USER | SBOX_CAPS_NONE | SBOX_SECCOMP, 5, PATH_FSECCOMP, "protocol", "build", cfg.protocol, RUN_SECCOMP_PROTOCOL); if (rv) exit(rv); } if (arg_seccomp && (cfg.seccomp_list || cfg.seccomp_list_drop || cfg.seccomp_list_keep)) arg_seccomp_postexec = 1; #endif // need ld.so.preload if tracing or seccomp with any non-default lists bool need_preload = arg_trace || arg_tracelog || arg_seccomp_postexec; // for --appimage, --chroot and --overlay* we force NO_NEW_PRIVS // and drop all capabilities if (getuid() != 0 && (arg_appimage || cfg.chrootdir || arg_overlay)) { enforce_filters(); need_preload = arg_trace || arg_tracelog; } // trace pre-install if (need_preload) fs_trace_preload(); // store hosts file if (cfg.hosts_file) fs_store_hosts_file(); //**************************** // configure filesystem //**************************** #ifdef HAVE_CHROOT if (cfg.chrootdir) { fs_chroot(cfg.chrootdir); //**************************** // trace pre-install, this time inside chroot //**************************** if (need_preload) fs_trace_preload(); } else #endif #ifdef HAVE_OVERLAYFS if (arg_overlay) fs_overlayfs(); else #endif fs_basic_fs(); //**************************** // private mode //**************************** if (arg_private) { if (cfg.home_private) { // --private= if (cfg.chrootdir) fwarning("private=directory feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private=directory feature is disabled in overlay\n"); else fs_private_homedir(); } else if (cfg.home_private_keep) { // --private-home= if (cfg.chrootdir) fwarning("private-home= feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-home= feature is disabled in overlay\n"); else fs_private_home_list(); } else // --private fs_private(); } if (arg_private_dev) fs_private_dev(); if (arg_private_etc) { if (cfg.chrootdir) fwarning("private-etc feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-etc feature is disabled in overlay\n"); else { fs_private_dir_list("/etc", RUN_ETC_DIR, cfg.etc_private_keep); // create /etc/ld.so.preload file again if (need_preload) fs_trace_preload(); } } if (arg_private_opt) { if (cfg.chrootdir) fwarning("private-opt feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-opt feature is disabled in overlay\n"); else { fs_private_dir_list("/opt", RUN_OPT_DIR, cfg.opt_private_keep); } } if (arg_private_srv) { if (cfg.chrootdir) fwarning("private-srv feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-srv feature is disabled in overlay\n"); else { fs_private_dir_list("/srv", RUN_SRV_DIR, cfg.srv_private_keep); } } // private-bin is disabled for appimages if (arg_private_bin && !arg_appimage) { if (cfg.chrootdir) fwarning("private-bin feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-bin feature is disabled in overlay\n"); else { // for --x11=xorg we need to add xauth command if (arg_x11_xorg) { EUID_USER(); char *tmp; if (asprintf(&tmp, "%s,xauth", cfg.bin_private_keep) == -1) errExit("asprintf"); cfg.bin_private_keep = tmp; EUID_ROOT(); } fs_private_bin_list(); } } // private-lib is disabled for appimages if (arg_private_lib && !arg_appimage) { if (cfg.chrootdir) fwarning("private-lib feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-lib feature is disabled in overlay\n"); else { fs_private_lib(); } } if (arg_private_cache) { if (cfg.chrootdir) fwarning("private-cache feature is disabled in chroot\n"); else if (arg_overlay) fwarning("private-cache feature is disabled in overlay\n"); else fs_private_cache(); } if (arg_private_tmp) { // private-tmp is implemented as a whitelist EUID_USER(); fs_private_tmp(); EUID_ROOT(); } //**************************** // Session D-BUS //**************************** if (arg_nodbus) dbus_session_disable(); //**************************** // hosts and hostname //**************************** if (cfg.hostname) fs_hostname(cfg.hostname); if (cfg.hosts_file) fs_mount_hosts_file(); //**************************** // /etc overrides from the network namespace //**************************** if (arg_netns) netns_mounts(arg_netns); //**************************** // update /proc, /sys, /dev, /boot directory //**************************** fs_proc_sys_dev_boot(); //**************************** // handle /mnt and /media //**************************** if (checkcfg(CFG_DISABLE_MNT)) fs_mnt(1); else if (arg_disable_mnt) fs_mnt(0); //**************************** // apply the profile file //**************************** // apply all whitelist commands ... fs_whitelist(); // ... followed by blacklist commands fs_blacklist(); // mkdir and mkfile are processed all over again //**************************** // nosound/no3d/notv/novideo and fix for pulseaudio 7.0 //**************************** if (arg_nosound) { // disable pulseaudio pulseaudio_disable(); // disable /dev/snd fs_dev_disable_sound(); } else if (!arg_noautopulse) pulseaudio_init(); if (arg_no3d) fs_dev_disable_3d(); if (arg_notv) fs_dev_disable_tv(); if (arg_nodvd) fs_dev_disable_dvd(); if (arg_nou2f) fs_dev_disable_u2f(); if (arg_novideo) fs_dev_disable_video(); //**************************** // install trace //**************************** if (need_preload) fs_trace(); //**************************** // set dns //**************************** fs_resolvconf(); //**************************** // fs post-processing //**************************** fs_logger_print(); fs_logger_change_owner(); //**************************** // set application environment //**************************** EUID_USER(); int cwd = 0; if (cfg.cwd) { if (chdir(cfg.cwd) == 0) cwd = 1; } if (!cwd) { if (chdir("/") < 0) errExit("chdir"); if (cfg.homedir) { struct stat s; if (stat(cfg.homedir, &s) == 0) { /* coverity[toctou] */ if (chdir(cfg.homedir) < 0) errExit("chdir"); } } } if (arg_debug) { char *cpath = get_current_dir_name(); if (cpath) { printf("Current directory: %s\n", cpath); free(cpath); } } EUID_ROOT(); // clean /tmp/.X11-unix sockets fs_x11(); if (arg_x11_xorg) x11_xorg(); // save original umask save_umask(); //**************************** // set security filters //**************************** // save state of nonewprivs save_nonewprivs(); // save cpu affinity mask to CPU_CFG file save_cpu(); // save cgroup in CGROUP_CFG file save_cgroup(); // set seccomp #ifdef HAVE_SECCOMP // install protocol filter #ifdef SYS_socket if (cfg.protocol) { if (arg_debug) printf("Install protocol filter: %s\n", cfg.protocol); seccomp_load(RUN_SECCOMP_PROTOCOL); // install filter protocol_filter_save(); // save filter in RUN_PROTOCOL_CFG } else { int rv = unlink(RUN_SECCOMP_PROTOCOL); (void) rv; } #endif // if a keep list is available, disregard the drop list if (arg_seccomp == 1) { if (cfg.seccomp_list_keep) seccomp_filter_keep(); else seccomp_filter_drop(); } else { // clean seccomp files under /run/firejail/mnt int rv = unlink(RUN_SECCOMP_CFG); rv |= unlink(RUN_SECCOMP_32); (void) rv; } if (arg_memory_deny_write_execute) { if (arg_debug) printf("Install memory write&execute filter\n"); seccomp_load(RUN_SECCOMP_MDWX); // install filter } else { int rv = unlink(RUN_SECCOMP_MDWX); (void) rv; } // make seccomp filters read-only fs_rdonly(RUN_SECCOMP_DIR); #endif // set capabilities set_caps(); //**************************************** // communicate progress of sandbox set up // to --join //**************************************** FILE *rj = create_ready_for_join_file(); //**************************************** // create a new user namespace // - too early to drop privileges //**************************************** save_nogroups(); if (arg_noroot) { int rv = unshare(CLONE_NEWUSER); if (rv == -1) { fwarning("cannot create a new user namespace, going forward without it...\n"); arg_noroot = 0; } } // notify parent that new user namespace has been created so a proper // UID/GID map can be setup notify_other(child_to_parent_fds[1]); close(child_to_parent_fds[1]); // wait for parent to finish setting up a proper UID/GID map wait_for_other(parent_to_child_fds[0]); close(parent_to_child_fds[0]); // somehow, the new user namespace resets capabilities; // we need to do them again if (arg_noroot) { if (arg_debug) printf("noroot user namespace installed\n"); set_caps(); } //**************************************** // Set NO_NEW_PRIVS if desired //**************************************** if (arg_nonewprivs) { prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); if (prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0) != 1) { fwarning("cannot set NO_NEW_PRIVS, it requires a Linux kernel version 3.5 or newer.\n"); if (force_nonewprivs) { fprintf(stderr, "Error: NO_NEW_PRIVS required for this sandbox, exiting ...\n"); exit(1); } } else if (arg_debug) printf("NO_NEW_PRIVS set\n"); } //**************************************** // drop privileges //**************************************** drop_privs(arg_nogroups); // kill the sandbox in case the parent died prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0); //**************************************** // set cpu affinity //**************************************** if (cfg.cpus) set_cpu_affinity(); //**************************************** // fork the application and monitor it //**************************************** pid_t app_pid = fork(); if (app_pid == -1) errExit("fork"); if (app_pid == 0) { #ifdef HAVE_APPARMOR if (checkcfg(CFG_APPARMOR) && arg_apparmor) { errno = 0; if (aa_change_onexec("firejail-default")) { fwarning("Cannot confine the application using AppArmor.\n" "Maybe firejail-default AppArmor profile is not loaded into the kernel.\n" "As root, run \"aa-enforce firejail-default\" to load it.\n"); } else if (arg_debug) printf("AppArmor enabled\n"); } #endif // set nice and rlimits if (arg_nice) set_nice(cfg.nice); set_rlimits(); start_application(0, rj); } fclose(rj); int status = monitor_application(app_pid); // monitor application flush_stdin(); if (WIFEXITED(status)) { // if we had a proper exit, return that exit status return WEXITSTATUS(status); } else { // something else went wrong! return -1; } }
./CrossVul/dataset_final_sorted/CWE-284/c/good_880_3
crossvul-cpp_data_bad_5346_0
/* * IPv4 over IEEE 1394, per RFC 2734 * IPv6 over IEEE 1394, per RFC 3146 * * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> * * based on eth1394 by Ben Collins et al */ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/highmem.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/jiffies.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <net/arp.h> #include <net/firewire.h> /* rx limits */ #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) /* tx limits */ #define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ #define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ #define IEEE1394_BROADCAST_CHANNEL 31 #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) #define IEEE1394_MAX_PAYLOAD_S100 512 #define FWNET_NO_FIFO_ADDR (~0ULL) #define IANA_SPECIFIER_ID 0x00005eU #define RFC2734_SW_VERSION 0x000001U #define RFC3146_SW_VERSION 0x000002U #define IEEE1394_GASP_HDR_SIZE 8 #define RFC2374_UNFRAG_HDR_SIZE 4 #define RFC2374_FRAG_HDR_SIZE 8 #define RFC2374_FRAG_OVERHEAD 4 #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ static bool fwnet_hwaddr_is_multicast(u8 *ha) { return !!(*ha & 1); } /* IPv4 and IPv6 encapsulation header */ struct rfc2734_header { u32 w0; u32 w1; }; #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) #define fwnet_set_hdr_lf(lf) ((lf) << 30) #define fwnet_set_hdr_ether_type(et) (et) #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) #define fwnet_set_hdr_fg_off(fgo) (fgo) #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, unsigned ether_type) { hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) | fwnet_set_hdr_ether_type(ether_type); } static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, unsigned ether_type, unsigned dg_size, unsigned dgl) { hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) | fwnet_set_hdr_dg_size(dg_size) | fwnet_set_hdr_ether_type(ether_type); hdr->w1 = fwnet_set_hdr_dgl(dgl); } static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) { hdr->w0 = fwnet_set_hdr_lf(lf) | fwnet_set_hdr_dg_size(dg_size) | fwnet_set_hdr_fg_off(fg_off); hdr->w1 = fwnet_set_hdr_dgl(dgl); } /* This list keeps track of what parts of the datagram have been filled in */ struct fwnet_fragment_info { struct list_head fi_link; u16 offset; u16 len; }; struct fwnet_partial_datagram { struct list_head pd_link; struct list_head fi_list; struct sk_buff *skb; /* FIXME Why not use skb->data? */ char *pbuf; u16 datagram_label; u16 ether_type; u16 datagram_size; }; static DEFINE_MUTEX(fwnet_device_mutex); static LIST_HEAD(fwnet_device_list); struct fwnet_device { struct list_head dev_link; spinlock_t lock; enum { FWNET_BROADCAST_ERROR, FWNET_BROADCAST_RUNNING, FWNET_BROADCAST_STOPPED, } broadcast_state; struct fw_iso_context *broadcast_rcv_context; struct fw_iso_buffer broadcast_rcv_buffer; void **broadcast_rcv_buffer_ptrs; unsigned broadcast_rcv_next_ptr; unsigned num_broadcast_rcv_ptrs; unsigned rcv_buffer_size; /* * This value is the maximum unfragmented datagram size that can be * sent by the hardware. It already has the GASP overhead and the * unfragmented datagram header overhead calculated into it. */ unsigned broadcast_xmt_max_payload; u16 broadcast_xmt_datagramlabel; /* * The CSR address that remote nodes must send datagrams to for us to * receive them. */ struct fw_address_handler handler; u64 local_fifo; /* Number of tx datagrams that have been queued but not yet acked */ int queued_datagrams; int peer_count; struct list_head peer_list; struct fw_card *card; struct net_device *netdev; }; struct fwnet_peer { struct list_head peer_link; struct fwnet_device *dev; u64 guid; /* guarded by dev->lock */ struct list_head pd_list; /* received partial datagrams */ unsigned pdg_size; /* pd_list size */ u16 datagram_label; /* outgoing datagram label */ u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ int node_id; int generation; unsigned speed; }; /* This is our task struct. It's used for the packet complete callback. */ struct fwnet_packet_task { struct fw_transaction transaction; struct rfc2734_header hdr; struct sk_buff *skb; struct fwnet_device *dev; int outstanding_pkts; u64 fifo_addr; u16 dest_node; u16 max_payload; u8 generation; u8 speed; u8 enqueued; }; /* * Get fifo address embedded in hwaddr */ static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha) { return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32 | get_unaligned_be32(&ha->uc.fifo_lo); } /* * saddr == NULL means use device source address. * daddr == NULL means leave destination address (eg unresolved arp). */ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct fwnet_header *h; h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); put_unaligned_be16(type, &h->h_proto); if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { memset(h->h_dest, 0, net->addr_len); return net->hard_header_len; } if (daddr) { memcpy(h->h_dest, daddr, net->addr_len); return net->hard_header_len; } return -net->hard_header_len; } static int fwnet_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { struct net_device *net; struct fwnet_header *h; if (type == cpu_to_be16(ETH_P_802_3)) return -1; net = neigh->dev; h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); h->h_proto = type; memcpy(h->h_dest, neigh->ha, net->addr_len); hh->hh_len = FWNET_HLEN; return 0; } /* Called by Address Resolution module to notify changes in address. */ static void fwnet_header_cache_update(struct hh_cache *hh, const struct net_device *net, const unsigned char *haddr) { memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); } static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); return FWNET_ALEN; } static const struct header_ops fwnet_header_ops = { .create = fwnet_header_create, .cache = fwnet_header_cache, .cache_update = fwnet_header_cache_update, .parse = fwnet_header_parse, }; /* FIXME: is this correct for all cases? */ static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) { struct fwnet_fragment_info *fi; unsigned end = offset + len; list_for_each_entry(fi, &pd->fi_list, fi_link) if (offset < fi->offset + fi->len && end > fi->offset) return true; return false; } /* Assumes that new fragment does not overlap any existing fragments */ static struct fwnet_fragment_info *fwnet_frag_new( struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) { struct fwnet_fragment_info *fi, *fi2, *new; struct list_head *list; list = &pd->fi_list; list_for_each_entry(fi, &pd->fi_list, fi_link) { if (fi->offset + fi->len == offset) { /* The new fragment can be tacked on to the end */ /* Did the new fragment plug a hole? */ fi2 = list_entry(fi->fi_link.next, struct fwnet_fragment_info, fi_link); if (fi->offset + fi->len == fi2->offset) { /* glue fragments together */ fi->len += len + fi2->len; list_del(&fi2->fi_link); kfree(fi2); } else { fi->len += len; } return fi; } if (offset + len == fi->offset) { /* The new fragment can be tacked on to the beginning */ /* Did the new fragment plug a hole? */ fi2 = list_entry(fi->fi_link.prev, struct fwnet_fragment_info, fi_link); if (fi2->offset + fi2->len == fi->offset) { /* glue fragments together */ fi2->len += fi->len + len; list_del(&fi->fi_link); kfree(fi); return fi2; } fi->offset = offset; fi->len += len; return fi; } if (offset > fi->offset + fi->len) { list = &fi->fi_link; break; } if (offset + len < fi->offset) { list = fi->fi_link.prev; break; } } new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) return NULL; new->offset = offset; new->len = len; list_add(&new->fi_link, list); return new; } static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, void *frag_buf, unsigned frag_off, unsigned frag_len) { struct fwnet_partial_datagram *new; struct fwnet_fragment_info *fi; new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) goto fail; INIT_LIST_HEAD(&new->fi_list); fi = fwnet_frag_new(new, frag_off, frag_len); if (fi == NULL) goto fail_w_new; new->datagram_label = datagram_label; new->datagram_size = dg_size; new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net)); if (new->skb == NULL) goto fail_w_fi; skb_reserve(new->skb, LL_RESERVED_SPACE(net)); new->pbuf = skb_put(new->skb, dg_size); memcpy(new->pbuf + frag_off, frag_buf, frag_len); list_add_tail(&new->pd_link, &peer->pd_list); return new; fail_w_fi: kfree(fi); fail_w_new: kfree(new); fail: return NULL; } static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, u16 datagram_label) { struct fwnet_partial_datagram *pd; list_for_each_entry(pd, &peer->pd_list, pd_link) if (pd->datagram_label == datagram_label) return pd; return NULL; } static void fwnet_pd_delete(struct fwnet_partial_datagram *old) { struct fwnet_fragment_info *fi, *n; list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) kfree(fi); list_del(&old->pd_link); dev_kfree_skb_any(old->skb); kfree(old); } static bool fwnet_pd_update(struct fwnet_peer *peer, struct fwnet_partial_datagram *pd, void *frag_buf, unsigned frag_off, unsigned frag_len) { if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) return false; memcpy(pd->pbuf + frag_off, frag_buf, frag_len); /* * Move list entry to beginning of list so that oldest partial * datagrams percolate to the end of the list */ list_move_tail(&pd->pd_link, &peer->pd_list); return true; } static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) { struct fwnet_fragment_info *fi; fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); return fi->len == pd->datagram_size; } /* caller must hold dev->lock */ static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, u64 guid) { struct fwnet_peer *peer; list_for_each_entry(peer, &dev->peer_list, peer_link) if (peer->guid == guid) return peer; return NULL; } /* caller must hold dev->lock */ static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, int node_id, int generation) { struct fwnet_peer *peer; list_for_each_entry(peer, &dev->peer_list, peer_link) if (peer->node_id == node_id && peer->generation == generation) return peer; return NULL; } /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) { max_rec = min(max_rec, speed + 8); max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */ return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; } static int fwnet_finish_incoming_packet(struct net_device *net, struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type) { struct fwnet_device *dev; int status; __be64 guid; switch (ether_type) { case ETH_P_ARP: case ETH_P_IP: #if IS_ENABLED(CONFIG_IPV6) case ETH_P_IPV6: #endif break; default: goto err; } dev = netdev_priv(net); /* Write metadata, and then pass to the receive level */ skb->dev = net; skb->ip_summed = CHECKSUM_NONE; /* * Parse the encapsulation header. This actually does the job of * converting to an ethernet-like pseudo frame header. */ guid = cpu_to_be64(dev->card->guid); if (dev_hard_header(skb, net, ether_type, is_broadcast ? net->broadcast : net->dev_addr, NULL, skb->len) >= 0) { struct fwnet_header *eth; u16 *rawp; __be16 protocol; skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = (struct fwnet_header *)skb_mac_header(skb); if (fwnet_hwaddr_is_multicast(eth->h_dest)) { if (memcmp(eth->h_dest, net->broadcast, net->addr_len) == 0) skb->pkt_type = PACKET_BROADCAST; #if 0 else skb->pkt_type = PACKET_MULTICAST; #endif } else { if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { protocol = eth->h_proto; } else { rawp = (u16 *)skb->data; if (*rawp == 0xffff) protocol = htons(ETH_P_802_3); else protocol = htons(ETH_P_802_2); } skb->protocol = protocol; } status = netif_rx(skb); if (status == NET_RX_DROP) { net->stats.rx_errors++; net->stats.rx_dropped++; } else { net->stats.rx_packets++; net->stats.rx_bytes += skb->len; } return 0; err: net->stats.rx_errors++; net->stats.rx_dropped++; dev_kfree_skb_any(skb); return -ENOENT; } static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, int source_node_id, int generation, bool is_broadcast) { struct sk_buff *skb; struct net_device *net = dev->netdev; struct rfc2734_header hdr; unsigned lf; unsigned long flags; struct fwnet_peer *peer; struct fwnet_partial_datagram *pd; int fg_off; int dg_size; u16 datagram_label; int retval; u16 ether_type; hdr.w0 = be32_to_cpu(buf[0]); lf = fwnet_get_hdr_lf(&hdr); if (lf == RFC2374_HDR_UNFRAG) { /* * An unfragmented datagram has been received by the ieee1394 * bus. Build an skbuff around it so we can pass it to the * high level network layer. */ ether_type = fwnet_get_hdr_ether_type(&hdr); buf++; len -= RFC2374_UNFRAG_HDR_SIZE; skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); if (unlikely(!skb)) { net->stats.rx_dropped++; return -ENOMEM; } skb_reserve(skb, LL_RESERVED_SPACE(net)); memcpy(skb_put(skb, len), buf, len); return fwnet_finish_incoming_packet(net, skb, source_node_id, is_broadcast, ether_type); } /* A datagram fragment has been received, now the fun begins. */ hdr.w1 = ntohl(buf[1]); buf += 2; len -= RFC2374_FRAG_HDR_SIZE; if (lf == RFC2374_HDR_FIRSTFRAG) { ether_type = fwnet_get_hdr_ether_type(&hdr); fg_off = 0; } else { ether_type = 0; fg_off = fwnet_get_hdr_fg_off(&hdr); } datagram_label = fwnet_get_hdr_dgl(&hdr); dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ spin_lock_irqsave(&dev->lock, flags); peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); if (!peer) { retval = -ENOENT; goto fail; } pd = fwnet_pd_find(peer, datagram_label); if (pd == NULL) { while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { /* remove the oldest */ fwnet_pd_delete(list_first_entry(&peer->pd_list, struct fwnet_partial_datagram, pd_link)); peer->pdg_size--; } pd = fwnet_pd_new(net, peer, datagram_label, dg_size, buf, fg_off, len); if (pd == NULL) { retval = -ENOMEM; goto fail; } peer->pdg_size++; } else { if (fwnet_frag_overlap(pd, fg_off, len) || pd->datagram_size != dg_size) { /* * Differing datagram sizes or overlapping fragments, * discard old datagram and start a new one. */ fwnet_pd_delete(pd); pd = fwnet_pd_new(net, peer, datagram_label, dg_size, buf, fg_off, len); if (pd == NULL) { peer->pdg_size--; retval = -ENOMEM; goto fail; } } else { if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { /* * Couldn't save off fragment anyway * so might as well obliterate the * datagram now. */ fwnet_pd_delete(pd); peer->pdg_size--; retval = -ENOMEM; goto fail; } } } /* new datagram or add to existing one */ if (lf == RFC2374_HDR_FIRSTFRAG) pd->ether_type = ether_type; if (fwnet_pd_is_complete(pd)) { ether_type = pd->ether_type; peer->pdg_size--; skb = skb_get(pd->skb); fwnet_pd_delete(pd); spin_unlock_irqrestore(&dev->lock, flags); return fwnet_finish_incoming_packet(net, skb, source_node_id, false, ether_type); } /* * Datagram is not complete, we're done for the * moment. */ retval = 0; fail: spin_unlock_irqrestore(&dev->lock, flags); return retval; } static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { struct fwnet_device *dev = callback_data; int rcode; if (destination == IEEE1394_ALL_NODES) { kfree(r); return; } if (offset != dev->handler.offset) rcode = RCODE_ADDRESS_ERROR; else if (tcode != TCODE_WRITE_BLOCK_REQUEST) rcode = RCODE_TYPE_ERROR; else if (fwnet_incoming_packet(dev, payload, length, source, generation, false) != 0) { dev_err(&dev->netdev->dev, "incoming packet failure\n"); rcode = RCODE_CONFLICT_ERROR; } else rcode = RCODE_COMPLETE; fw_send_response(card, r, rcode); } static void fwnet_receive_broadcast(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct fwnet_device *dev; struct fw_iso_packet packet; __be16 *hdr_ptr; __be32 *buf_ptr; int retval; u32 length; u16 source_node_id; u32 specifier_id; u32 ver; unsigned long offset; unsigned long flags; dev = data; hdr_ptr = header; length = be16_to_cpup(hdr_ptr); spin_lock_irqsave(&dev->lock, flags); offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) dev->broadcast_rcv_next_ptr = 0; spin_unlock_irqrestore(&dev->lock, flags); specifier_id = (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8 | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24; ver = be32_to_cpu(buf_ptr[1]) & 0xffffff; source_node_id = be32_to_cpu(buf_ptr[0]) >> 16; if (specifier_id == IANA_SPECIFIER_ID && (ver == RFC2734_SW_VERSION #if IS_ENABLED(CONFIG_IPV6) || ver == RFC3146_SW_VERSION #endif )) { buf_ptr += 2; length -= IEEE1394_GASP_HDR_SIZE; fwnet_incoming_packet(dev, buf_ptr, length, source_node_id, context->card->generation, true); } packet.payload_length = dev->rcv_buffer_size; packet.interrupt = 1; packet.skip = 0; packet.tag = 3; packet.sy = 0; packet.header_length = IEEE1394_GASP_HDR_SIZE; spin_lock_irqsave(&dev->lock, flags); retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, &dev->broadcast_rcv_buffer, offset); spin_unlock_irqrestore(&dev->lock, flags); if (retval >= 0) fw_iso_context_queue_flush(dev->broadcast_rcv_context); else dev_err(&dev->netdev->dev, "requeue failed\n"); } static struct kmem_cache *fwnet_packet_task_cache; static void fwnet_free_ptask(struct fwnet_packet_task *ptask) { dev_kfree_skb_any(ptask->skb); kmem_cache_free(fwnet_packet_task_cache, ptask); } /* Caller must hold dev->lock. */ static void dec_queued_datagrams(struct fwnet_device *dev) { if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) netif_wake_queue(dev->netdev); } static int fwnet_send_packet(struct fwnet_packet_task *ptask); static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) { struct fwnet_device *dev = ptask->dev; struct sk_buff *skb = ptask->skb; unsigned long flags; bool free; spin_lock_irqsave(&dev->lock, flags); ptask->outstanding_pkts--; /* Check whether we or the networking TX soft-IRQ is last user. */ free = (ptask->outstanding_pkts == 0 && ptask->enqueued); if (free) dec_queued_datagrams(dev); if (ptask->outstanding_pkts == 0) { dev->netdev->stats.tx_packets++; dev->netdev->stats.tx_bytes += skb->len; } spin_unlock_irqrestore(&dev->lock, flags); if (ptask->outstanding_pkts > 0) { u16 dg_size; u16 fg_off; u16 datagram_label; u16 lf; /* Update the ptask to point to the next fragment and send it */ lf = fwnet_get_hdr_lf(&ptask->hdr); switch (lf) { case RFC2374_HDR_LASTFRAG: case RFC2374_HDR_UNFRAG: default: dev_err(&dev->netdev->dev, "outstanding packet %x lf %x, header %x,%x\n", ptask->outstanding_pkts, lf, ptask->hdr.w0, ptask->hdr.w1); BUG(); case RFC2374_HDR_FIRSTFRAG: /* Set frag type here for future interior fragments */ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break; case RFC2374_HDR_INTFRAG: dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break; } if (ptask->dest_node == IEEE1394_ALL_NODES) { skb_pull(skb, ptask->max_payload + IEEE1394_GASP_HDR_SIZE); } else { skb_pull(skb, ptask->max_payload); } if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, dg_size, fg_off, datagram_label); } else { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, dg_size, fg_off, datagram_label); ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; } fwnet_send_packet(ptask); } if (free) fwnet_free_ptask(ptask); } static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) { struct fwnet_device *dev = ptask->dev; unsigned long flags; bool free; spin_lock_irqsave(&dev->lock, flags); /* One fragment failed; don't try to send remaining fragments. */ ptask->outstanding_pkts = 0; /* Check whether we or the networking TX soft-IRQ is last user. */ free = ptask->enqueued; if (free) dec_queued_datagrams(dev); dev->netdev->stats.tx_dropped++; dev->netdev->stats.tx_errors++; spin_unlock_irqrestore(&dev->lock, flags); if (free) fwnet_free_ptask(ptask); } static void fwnet_write_complete(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { struct fwnet_packet_task *ptask = data; static unsigned long j; static int last_rcode, errors_skipped; if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); } else { if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { dev_err(&ptask->dev->netdev->dev, "fwnet_write_complete failed: %x (skipped %d)\n", rcode, errors_skipped); errors_skipped = 0; last_rcode = rcode; } else { errors_skipped++; } fwnet_transmit_packet_failed(ptask); } } static int fwnet_send_packet(struct fwnet_packet_task *ptask) { struct fwnet_device *dev; unsigned tx_len; struct rfc2734_header *bufhdr; unsigned long flags; bool free; dev = ptask->dev; tx_len = ptask->max_payload; switch (fwnet_get_hdr_lf(&ptask->hdr)) { case RFC2374_HDR_UNFRAG: bufhdr = (struct rfc2734_header *) skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); break; case RFC2374_HDR_FIRSTFRAG: case RFC2374_HDR_INTFRAG: case RFC2374_HDR_LASTFRAG: bufhdr = (struct rfc2734_header *) skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); break; default: BUG(); } if (ptask->dest_node == IEEE1394_ALL_NODES) { u8 *p; int generation; int node_id; unsigned int sw_version; /* ptask->generation may not have been set yet */ generation = dev->card->generation; smp_rmb(); node_id = dev->card->node_id; switch (ptask->skb->protocol) { default: sw_version = RFC2734_SW_VERSION; break; #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): sw_version = RFC3146_SW_VERSION; #endif } p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | sw_version, &p[4]); /* We should not transmit if broadcast_channel.valid == 0. */ fw_send_request(dev->card, &ptask->transaction, TCODE_STREAM_DATA, fw_stream_packet_destination_id(3, IEEE1394_BROADCAST_CHANNEL, 0), generation, SCODE_100, 0ULL, ptask->skb->data, tx_len + 8, fwnet_write_complete, ptask); spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) ptask->enqueued = true; else dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); goto out; } fw_send_request(dev->card, &ptask->transaction, TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, ptask->generation, ptask->speed, ptask->fifo_addr, ptask->skb->data, tx_len, fwnet_write_complete, ptask); spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) ptask->enqueued = true; else dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); netif_trans_update(dev->netdev); out: if (free) fwnet_free_ptask(ptask); return 0; } static void fwnet_fifo_stop(struct fwnet_device *dev) { if (dev->local_fifo == FWNET_NO_FIFO_ADDR) return; fw_core_remove_address_handler(&dev->handler); dev->local_fifo = FWNET_NO_FIFO_ADDR; } static int fwnet_fifo_start(struct fwnet_device *dev) { int retval; if (dev->local_fifo != FWNET_NO_FIFO_ADDR) return 0; dev->handler.length = 4096; dev->handler.address_callback = fwnet_receive_packet; dev->handler.callback_data = dev; retval = fw_core_add_address_handler(&dev->handler, &fw_high_memory_region); if (retval < 0) return retval; dev->local_fifo = dev->handler.offset; return 0; } static void __fwnet_broadcast_stop(struct fwnet_device *dev) { unsigned u; if (dev->broadcast_state != FWNET_BROADCAST_ERROR) { for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) kunmap(dev->broadcast_rcv_buffer.pages[u]); fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); } if (dev->broadcast_rcv_context) { fw_iso_context_destroy(dev->broadcast_rcv_context); dev->broadcast_rcv_context = NULL; } kfree(dev->broadcast_rcv_buffer_ptrs); dev->broadcast_rcv_buffer_ptrs = NULL; dev->broadcast_state = FWNET_BROADCAST_ERROR; } static void fwnet_broadcast_stop(struct fwnet_device *dev) { if (dev->broadcast_state == FWNET_BROADCAST_ERROR) return; fw_iso_context_stop(dev->broadcast_rcv_context); __fwnet_broadcast_stop(dev); } static int fwnet_broadcast_start(struct fwnet_device *dev) { struct fw_iso_context *context; int retval; unsigned num_packets; unsigned max_receive; struct fw_iso_packet packet; unsigned long offset; void **ptrptr; unsigned u; if (dev->broadcast_state != FWNET_BROADCAST_ERROR) return 0; max_receive = 1U << (dev->card->max_receive + 1); num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); if (!ptrptr) { retval = -ENOMEM; goto failed; } dev->broadcast_rcv_buffer_ptrs = ptrptr; context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, dev->card->link_speed, 8, fwnet_receive_broadcast, dev); if (IS_ERR(context)) { retval = PTR_ERR(context); goto failed; } retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); if (retval < 0) goto failed; dev->broadcast_state = FWNET_BROADCAST_STOPPED; for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { void *ptr; unsigned v; ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) *ptrptr++ = (void *) ((char *)ptr + v * max_receive); } dev->broadcast_rcv_context = context; packet.payload_length = max_receive; packet.interrupt = 1; packet.skip = 0; packet.tag = 3; packet.sy = 0; packet.header_length = IEEE1394_GASP_HDR_SIZE; offset = 0; for (u = 0; u < num_packets; u++) { retval = fw_iso_context_queue(context, &packet, &dev->broadcast_rcv_buffer, offset); if (retval < 0) goto failed; offset += max_receive; } dev->num_broadcast_rcv_ptrs = num_packets; dev->rcv_buffer_size = max_receive; dev->broadcast_rcv_next_ptr = 0U; retval = fw_iso_context_start(context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ if (retval < 0) goto failed; /* FIXME: adjust it according to the min. speed of all known peers? */ dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; dev->broadcast_state = FWNET_BROADCAST_RUNNING; return 0; failed: __fwnet_broadcast_stop(dev); return retval; } static void set_carrier_state(struct fwnet_device *dev) { if (dev->peer_count > 1) netif_carrier_on(dev->netdev); else netif_carrier_off(dev->netdev); } /* ifup */ static int fwnet_open(struct net_device *net) { struct fwnet_device *dev = netdev_priv(net); int ret; ret = fwnet_broadcast_start(dev); if (ret) return ret; netif_start_queue(net); spin_lock_irq(&dev->lock); set_carrier_state(dev); spin_unlock_irq(&dev->lock); return 0; } /* ifdown */ static int fwnet_stop(struct net_device *net) { struct fwnet_device *dev = netdev_priv(net); netif_stop_queue(net); fwnet_broadcast_stop(dev); return 0; } static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) { struct fwnet_header hdr_buf; struct fwnet_device *dev = netdev_priv(net); __be16 proto; u16 dest_node; unsigned max_payload; u16 dg_size; u16 *datagram_label_ptr; struct fwnet_packet_task *ptask; struct fwnet_peer *peer; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); /* Can this happen? */ if (netif_queue_stopped(dev->netdev)) { spin_unlock_irqrestore(&dev->lock, flags); return NETDEV_TX_BUSY; } ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); if (ptask == NULL) goto fail; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto fail; /* * Make a copy of the driver-specific header. * We might need to rebuild the header on tx failure. */ memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); proto = hdr_buf.h_proto; switch (proto) { case htons(ETH_P_ARP): case htons(ETH_P_IP): #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): #endif break; default: goto fail; } skb_pull(skb, sizeof(hdr_buf)); dg_size = skb->len; /* * Set the transmission type for the packet. ARP packets and IP * broadcast packets are sent via GASP. */ if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) { max_payload = dev->broadcast_xmt_max_payload; datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; ptask->fifo_addr = FWNET_NO_FIFO_ADDR; ptask->generation = 0; ptask->dest_node = IEEE1394_ALL_NODES; ptask->speed = SCODE_100; } else { union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest; __be64 guid = get_unaligned(&ha->uc.uniq_id); u8 generation; peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); if (!peer) goto fail; generation = peer->generation; dest_node = peer->node_id; max_payload = peer->max_payload; datagram_label_ptr = &peer->datagram_label; ptask->fifo_addr = fwnet_hwaddr_fifo(ha); ptask->generation = generation; ptask->dest_node = dest_node; ptask->speed = peer->speed; } ptask->hdr.w0 = 0; ptask->hdr.w1 = 0; ptask->skb = skb; ptask->dev = dev; /* Does it all fit in one packet? */ if (dg_size <= max_payload) { fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); ptask->outstanding_pkts = 1; max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; } else { u16 datagram_label; max_payload -= RFC2374_FRAG_OVERHEAD; datagram_label = (*datagram_label_ptr)++; fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, datagram_label); ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); max_payload += RFC2374_FRAG_HDR_SIZE; } if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) netif_stop_queue(dev->netdev); spin_unlock_irqrestore(&dev->lock, flags); ptask->max_payload = max_payload; ptask->enqueued = 0; fwnet_send_packet(ptask); return NETDEV_TX_OK; fail: spin_unlock_irqrestore(&dev->lock, flags); if (ptask) kmem_cache_free(fwnet_packet_task_cache, ptask); if (skb != NULL) dev_kfree_skb(skb); net->stats.tx_dropped++; net->stats.tx_errors++; /* * FIXME: According to a patch from 2003-02-26, "returning non-zero * causes serious problems" here, allegedly. Before that patch, * -ERRNO was returned which is not appropriate under Linux 2.6. * Perhaps more needs to be done? Stop the queue in serious * conditions and restart it elsewhere? */ return NETDEV_TX_OK; } static int fwnet_change_mtu(struct net_device *net, int new_mtu) { if (new_mtu < 68) return -EINVAL; net->mtu = new_mtu; return 0; } static const struct ethtool_ops fwnet_ethtool_ops = { .get_link = ethtool_op_get_link, }; static const struct net_device_ops fwnet_netdev_ops = { .ndo_open = fwnet_open, .ndo_stop = fwnet_stop, .ndo_start_xmit = fwnet_tx, .ndo_change_mtu = fwnet_change_mtu, }; static void fwnet_init_dev(struct net_device *net) { net->header_ops = &fwnet_header_ops; net->netdev_ops = &fwnet_netdev_ops; net->watchdog_timeo = 2 * HZ; net->flags = IFF_BROADCAST | IFF_MULTICAST; net->features = NETIF_F_HIGHDMA; net->addr_len = FWNET_ALEN; net->hard_header_len = FWNET_HLEN; net->type = ARPHRD_IEEE1394; net->tx_queue_len = FWNET_TX_QUEUE_LEN; net->ethtool_ops = &fwnet_ethtool_ops; } /* caller must hold fwnet_device_mutex */ static struct fwnet_device *fwnet_dev_find(struct fw_card *card) { struct fwnet_device *dev; list_for_each_entry(dev, &fwnet_device_list, dev_link) if (dev->card == card) return dev; return NULL; } static int fwnet_add_peer(struct fwnet_device *dev, struct fw_unit *unit, struct fw_device *device) { struct fwnet_peer *peer; peer = kmalloc(sizeof(*peer), GFP_KERNEL); if (!peer) return -ENOMEM; dev_set_drvdata(&unit->device, peer); peer->dev = dev; peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; INIT_LIST_HEAD(&peer->pd_list); peer->pdg_size = 0; peer->datagram_label = 0; peer->speed = device->max_speed; peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); peer->generation = device->generation; smp_rmb(); peer->node_id = device->node_id; spin_lock_irq(&dev->lock); list_add_tail(&peer->peer_link, &dev->peer_list); dev->peer_count++; set_carrier_state(dev); spin_unlock_irq(&dev->lock); return 0; } static int fwnet_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) { struct fw_device *device = fw_parent_device(unit); struct fw_card *card = device->card; struct net_device *net; bool allocated_netdev = false; struct fwnet_device *dev; unsigned max_mtu; int ret; union fwnet_hwaddr *ha; mutex_lock(&fwnet_device_mutex); dev = fwnet_dev_find(card); if (dev) { net = dev->netdev; goto have_dev; } net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN, fwnet_init_dev); if (net == NULL) { mutex_unlock(&fwnet_device_mutex); return -ENOMEM; } allocated_netdev = true; SET_NETDEV_DEV(net, card->device); dev = netdev_priv(net); spin_lock_init(&dev->lock); dev->broadcast_state = FWNET_BROADCAST_ERROR; dev->broadcast_rcv_context = NULL; dev->broadcast_xmt_max_payload = 0; dev->broadcast_xmt_datagramlabel = 0; dev->local_fifo = FWNET_NO_FIFO_ADDR; dev->queued_datagrams = 0; INIT_LIST_HEAD(&dev->peer_list); dev->card = card; dev->netdev = net; ret = fwnet_fifo_start(dev); if (ret < 0) goto out; dev->local_fifo = dev->handler.offset; /* * Use the RFC 2734 default 1500 octets or the maximum payload * as initial MTU */ max_mtu = (1 << (card->max_receive + 1)) - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; net->mtu = min(1500U, max_mtu); /* Set our hardware address while we're at it */ ha = (union fwnet_hwaddr *)net->dev_addr; put_unaligned_be64(card->guid, &ha->uc.uniq_id); ha->uc.max_rec = dev->card->max_receive; ha->uc.sspd = dev->card->link_speed; put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); memset(net->broadcast, -1, net->addr_len); ret = register_netdev(net); if (ret) goto out; list_add_tail(&dev->dev_link, &fwnet_device_list); dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n", dev_name(card->device)); have_dev: ret = fwnet_add_peer(dev, unit, device); if (ret && allocated_netdev) { unregister_netdev(net); list_del(&dev->dev_link); out: fwnet_fifo_stop(dev); free_netdev(net); } mutex_unlock(&fwnet_device_mutex); return ret; } /* * FIXME abort partially sent fragmented datagrams, * discard partially received fragmented datagrams */ static void fwnet_update(struct fw_unit *unit) { struct fw_device *device = fw_parent_device(unit); struct fwnet_peer *peer = dev_get_drvdata(&unit->device); int generation; generation = device->generation; spin_lock_irq(&peer->dev->lock); peer->node_id = device->node_id; peer->generation = generation; spin_unlock_irq(&peer->dev->lock); } static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) { struct fwnet_partial_datagram *pd, *pd_next; spin_lock_irq(&dev->lock); list_del(&peer->peer_link); dev->peer_count--; set_carrier_state(dev); spin_unlock_irq(&dev->lock); list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) fwnet_pd_delete(pd); kfree(peer); } static void fwnet_remove(struct fw_unit *unit) { struct fwnet_peer *peer = dev_get_drvdata(&unit->device); struct fwnet_device *dev = peer->dev; struct net_device *net; int i; mutex_lock(&fwnet_device_mutex); net = dev->netdev; fwnet_remove_peer(peer, dev); if (list_empty(&dev->peer_list)) { unregister_netdev(net); fwnet_fifo_stop(dev); for (i = 0; dev->queued_datagrams && i < 5; i++) ssleep(1); WARN_ON(dev->queued_datagrams); list_del(&dev->dev_link); free_netdev(net); } mutex_unlock(&fwnet_device_mutex); } static const struct ieee1394_device_id fwnet_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = IANA_SPECIFIER_ID, .version = RFC2734_SW_VERSION, }, #if IS_ENABLED(CONFIG_IPV6) { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = IANA_SPECIFIER_ID, .version = RFC3146_SW_VERSION, }, #endif { } }; static struct fw_driver fwnet_driver = { .driver = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, .bus = &fw_bus_type, }, .probe = fwnet_probe, .update = fwnet_update, .remove = fwnet_remove, .id_table = fwnet_id_table, }; static const u32 rfc2374_unit_directory_data[] = { 0x00040000, /* directory_length */ 0x1200005e, /* unit_specifier_id: IANA */ 0x81000003, /* textual descriptor offset */ 0x13000001, /* unit_sw_version: RFC 2734 */ 0x81000005, /* textual descriptor offset */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49414e41, /* I A N A */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49507634, /* I P v 4 */ }; static struct fw_descriptor rfc2374_unit_directory = { .length = ARRAY_SIZE(rfc2374_unit_directory_data), .key = (CSR_DIRECTORY | CSR_UNIT) << 24, .data = rfc2374_unit_directory_data }; #if IS_ENABLED(CONFIG_IPV6) static const u32 rfc3146_unit_directory_data[] = { 0x00040000, /* directory_length */ 0x1200005e, /* unit_specifier_id: IANA */ 0x81000003, /* textual descriptor offset */ 0x13000002, /* unit_sw_version: RFC 3146 */ 0x81000005, /* textual descriptor offset */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49414e41, /* I A N A */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49507636, /* I P v 6 */ }; static struct fw_descriptor rfc3146_unit_directory = { .length = ARRAY_SIZE(rfc3146_unit_directory_data), .key = (CSR_DIRECTORY | CSR_UNIT) << 24, .data = rfc3146_unit_directory_data }; #endif static int __init fwnet_init(void) { int err; err = fw_core_add_descriptor(&rfc2374_unit_directory); if (err) return err; #if IS_ENABLED(CONFIG_IPV6) err = fw_core_add_descriptor(&rfc3146_unit_directory); if (err) goto out; #endif fwnet_packet_task_cache = kmem_cache_create("packet_task", sizeof(struct fwnet_packet_task), 0, 0, NULL); if (!fwnet_packet_task_cache) { err = -ENOMEM; goto out2; } err = driver_register(&fwnet_driver.driver); if (!err) return 0; kmem_cache_destroy(fwnet_packet_task_cache); out2: #if IS_ENABLED(CONFIG_IPV6) fw_core_remove_descriptor(&rfc3146_unit_directory); out: #endif fw_core_remove_descriptor(&rfc2374_unit_directory); return err; } module_init(fwnet_init); static void __exit fwnet_cleanup(void) { driver_unregister(&fwnet_driver.driver); kmem_cache_destroy(fwnet_packet_task_cache); #if IS_ENABLED(CONFIG_IPV6) fw_core_remove_descriptor(&rfc3146_unit_directory); #endif fw_core_remove_descriptor(&rfc2374_unit_directory); } module_exit(fwnet_cleanup); MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5346_0
crossvul-cpp_data_good_4811_0
/* * Copyright (C) the libgit2 contributors. All rights reserved. * * This file is part of libgit2, distributed under the GNU GPL v2 with * a Linking Exception. For full terms see the included COPYING file. */ #ifndef GIT_WINHTTP #include "git2.h" #include "http_parser.h" #include "buffer.h" #include "netops.h" #include "global.h" #include "remote.h" #include "smart.h" #include "auth.h" #include "auth_negotiate.h" #include "tls_stream.h" #include "socket_stream.h" #include "curl_stream.h" git_http_auth_scheme auth_schemes[] = { { GIT_AUTHTYPE_NEGOTIATE, "Negotiate", GIT_CREDTYPE_DEFAULT, git_http_auth_negotiate }, { GIT_AUTHTYPE_BASIC, "Basic", GIT_CREDTYPE_USERPASS_PLAINTEXT, git_http_auth_basic }, }; static const char *upload_pack_service = "upload-pack"; static const char *upload_pack_ls_service_url = "/info/refs?service=git-upload-pack"; static const char *upload_pack_service_url = "/git-upload-pack"; static const char *receive_pack_service = "receive-pack"; static const char *receive_pack_ls_service_url = "/info/refs?service=git-receive-pack"; static const char *receive_pack_service_url = "/git-receive-pack"; static const char *get_verb = "GET"; static const char *post_verb = "POST"; #define OWNING_SUBTRANSPORT(s) ((http_subtransport *)(s)->parent.subtransport) #define PARSE_ERROR_GENERIC -1 #define PARSE_ERROR_REPLAY -2 /** Look at the user field */ #define PARSE_ERROR_EXT -3 #define CHUNK_SIZE 4096 enum last_cb { NONE, FIELD, VALUE }; typedef struct { git_smart_subtransport_stream parent; const char *service; const char *service_url; char *redirect_url; const char *verb; char *chunk_buffer; unsigned chunk_buffer_len; unsigned sent_request : 1, received_response : 1, chunked : 1, redirect_count : 3; } http_stream; typedef struct { git_smart_subtransport parent; transport_smart *owner; git_stream *io; gitno_connection_data connection_data; bool connected; /* Parser structures */ http_parser parser; http_parser_settings settings; gitno_buffer parse_buffer; git_buf parse_header_name; git_buf parse_header_value; char parse_buffer_data[NETIO_BUFSIZE]; char *content_type; char *location; git_vector www_authenticate; enum last_cb last_cb; int parse_error; int error; unsigned parse_finished : 1; /* Authentication */ git_cred *cred; git_cred *url_cred; git_vector auth_contexts; } http_subtransport; typedef struct { http_stream *s; http_subtransport *t; /* Target buffer details from read() */ char *buffer; size_t buf_size; size_t *bytes_read; } parser_context; static bool credtype_match(git_http_auth_scheme *scheme, void *data) { unsigned int credtype = *(unsigned int *)data; return !!(scheme->credtypes & credtype); } static bool challenge_match(git_http_auth_scheme *scheme, void *data) { const char *scheme_name = scheme->name; const char *challenge = (const char *)data; size_t scheme_len; scheme_len = strlen(scheme_name); return (strncasecmp(challenge, scheme_name, scheme_len) == 0 && (challenge[scheme_len] == '\0' || challenge[scheme_len] == ' ')); } static int auth_context_match( git_http_auth_context **out, http_subtransport *t, bool (*scheme_match)(git_http_auth_scheme *scheme, void *data), void *data) { git_http_auth_scheme *scheme = NULL; git_http_auth_context *context = NULL, *c; size_t i; *out = NULL; for (i = 0; i < ARRAY_SIZE(auth_schemes); i++) { if (scheme_match(&auth_schemes[i], data)) { scheme = &auth_schemes[i]; break; } } if (!scheme) return 0; /* See if authentication has already started for this scheme */ git_vector_foreach(&t->auth_contexts, i, c) { if (c->type == scheme->type) { context = c; break; } } if (!context) { if (scheme->init_context(&context, &t->connection_data) < 0) return -1; else if (!context) return 0; else if (git_vector_insert(&t->auth_contexts, context) < 0) return -1; } *out = context; return 0; } static int apply_credentials(git_buf *buf, http_subtransport *t) { git_cred *cred = t->cred; git_http_auth_context *context; /* Apply the credentials given to us in the URL */ if (!cred && t->connection_data.user && t->connection_data.pass) { if (!t->url_cred && git_cred_userpass_plaintext_new(&t->url_cred, t->connection_data.user, t->connection_data.pass) < 0) return -1; cred = t->url_cred; } if (!cred) return 0; /* Get or create a context for the best scheme for this cred type */ if (auth_context_match(&context, t, credtype_match, &cred->credtype) < 0) return -1; return context->next_token(buf, context, cred); } static const char *user_agent(void) { const char *custom = git_libgit2__user_agent(); if (custom) return custom; return "libgit2 " LIBGIT2_VERSION; } static int gen_request( git_buf *buf, http_stream *s, size_t content_length) { http_subtransport *t = OWNING_SUBTRANSPORT(s); const char *path = t->connection_data.path ? t->connection_data.path : "/"; size_t i; git_buf_printf(buf, "%s %s%s HTTP/1.1\r\n", s->verb, path, s->service_url); git_buf_printf(buf, "User-Agent: git/1.0 (%s)\r\n", user_agent()); git_buf_printf(buf, "Host: %s\r\n", t->connection_data.host); if (s->chunked || content_length > 0) { git_buf_printf(buf, "Accept: application/x-git-%s-result\r\n", s->service); git_buf_printf(buf, "Content-Type: application/x-git-%s-request\r\n", s->service); if (s->chunked) git_buf_puts(buf, "Transfer-Encoding: chunked\r\n"); else git_buf_printf(buf, "Content-Length: %"PRIuZ "\r\n", content_length); } else git_buf_puts(buf, "Accept: */*\r\n"); for (i = 0; i < t->owner->custom_headers.count; i++) { if (t->owner->custom_headers.strings[i]) git_buf_printf(buf, "%s\r\n", t->owner->custom_headers.strings[i]); } /* Apply credentials to the request */ if (apply_credentials(buf, t) < 0) return -1; git_buf_puts(buf, "\r\n"); if (git_buf_oom(buf)) return -1; return 0; } static int parse_authenticate_response( git_vector *www_authenticate, http_subtransport *t, int *allowed_types) { git_http_auth_context *context; char *challenge; size_t i; git_vector_foreach(www_authenticate, i, challenge) { if (auth_context_match(&context, t, challenge_match, challenge) < 0) return -1; else if (!context) continue; if (context->set_challenge && context->set_challenge(context, challenge) < 0) return -1; *allowed_types |= context->credtypes; } return 0; } static int on_header_ready(http_subtransport *t) { git_buf *name = &t->parse_header_name; git_buf *value = &t->parse_header_value; if (!strcasecmp("Content-Type", git_buf_cstr(name))) { if (!t->content_type) { t->content_type = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(t->content_type); } } else if (!strcasecmp("WWW-Authenticate", git_buf_cstr(name))) { char *dup = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(dup); git_vector_insert(&t->www_authenticate, dup); } else if (!strcasecmp("Location", git_buf_cstr(name))) { if (!t->location) { t->location = git__strdup(git_buf_cstr(value)); GITERR_CHECK_ALLOC(t->location); } } return 0; } static int on_header_field(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; /* Both parse_header_name and parse_header_value are populated * and ready for consumption */ if (VALUE == t->last_cb) if (on_header_ready(t) < 0) return t->parse_error = PARSE_ERROR_GENERIC; if (NONE == t->last_cb || VALUE == t->last_cb) git_buf_clear(&t->parse_header_name); if (git_buf_put(&t->parse_header_name, str, len) < 0) return t->parse_error = PARSE_ERROR_GENERIC; t->last_cb = FIELD; return 0; } static int on_header_value(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; assert(NONE != t->last_cb); if (FIELD == t->last_cb) git_buf_clear(&t->parse_header_value); if (git_buf_put(&t->parse_header_value, str, len) < 0) return t->parse_error = PARSE_ERROR_GENERIC; t->last_cb = VALUE; return 0; } static int on_headers_complete(http_parser *parser) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; http_stream *s = ctx->s; git_buf buf = GIT_BUF_INIT; int error = 0, no_callback = 0, allowed_auth_types = 0; /* Both parse_header_name and parse_header_value are populated * and ready for consumption. */ if (VALUE == t->last_cb) if (on_header_ready(t) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Capture authentication headers which may be a 401 (authentication * is not complete) or a 200 (simply informing us that auth *is* * complete.) */ if (parse_authenticate_response(&t->www_authenticate, t, &allowed_auth_types) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Check for an authentication failure. */ if (parser->status_code == 401 && get_verb == s->verb) { if (!t->owner->cred_acquire_cb) { no_callback = 1; } else { if (allowed_auth_types) { if (t->cred) { t->cred->free(t->cred); t->cred = NULL; } error = t->owner->cred_acquire_cb(&t->cred, t->owner->url, t->connection_data.user, allowed_auth_types, t->owner->cred_acquire_payload); if (error == GIT_PASSTHROUGH) { no_callback = 1; } else if (error < 0) { t->error = error; return t->parse_error = PARSE_ERROR_EXT; } else { assert(t->cred); if (!(t->cred->credtype & allowed_auth_types)) { giterr_set(GITERR_NET, "credentials callback returned an invalid cred type"); return t->parse_error = PARSE_ERROR_GENERIC; } /* Successfully acquired a credential. */ t->parse_error = PARSE_ERROR_REPLAY; return 0; } } } if (no_callback) { giterr_set(GITERR_NET, "authentication required but no callback set"); return t->parse_error = PARSE_ERROR_GENERIC; } } /* Check for a redirect. * Right now we only permit a redirect to the same hostname. */ if ((parser->status_code == 301 || parser->status_code == 302 || (parser->status_code == 303 && get_verb == s->verb) || parser->status_code == 307) && t->location) { if (s->redirect_count >= 7) { giterr_set(GITERR_NET, "Too many redirects"); return t->parse_error = PARSE_ERROR_GENERIC; } if (gitno_connection_data_from_url(&t->connection_data, t->location, s->service_url) < 0) return t->parse_error = PARSE_ERROR_GENERIC; /* Set the redirect URL on the stream. This is a transfer of * ownership of the memory. */ if (s->redirect_url) git__free(s->redirect_url); s->redirect_url = t->location; t->location = NULL; t->connected = 0; s->redirect_count++; t->parse_error = PARSE_ERROR_REPLAY; return 0; } /* Check for a 200 HTTP status code. */ if (parser->status_code != 200) { giterr_set(GITERR_NET, "Unexpected HTTP status code: %d", parser->status_code); return t->parse_error = PARSE_ERROR_GENERIC; } /* The response must contain a Content-Type header. */ if (!t->content_type) { giterr_set(GITERR_NET, "No Content-Type header in response"); return t->parse_error = PARSE_ERROR_GENERIC; } /* The Content-Type header must match our expectation. */ if (get_verb == s->verb) git_buf_printf(&buf, "application/x-git-%s-advertisement", ctx->s->service); else git_buf_printf(&buf, "application/x-git-%s-result", ctx->s->service); if (git_buf_oom(&buf)) return t->parse_error = PARSE_ERROR_GENERIC; if (strcmp(t->content_type, git_buf_cstr(&buf))) { git_buf_free(&buf); giterr_set(GITERR_NET, "Invalid Content-Type: %s", t->content_type); return t->parse_error = PARSE_ERROR_GENERIC; } git_buf_free(&buf); return 0; } static int on_message_complete(http_parser *parser) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; t->parse_finished = 1; return 0; } static int on_body_fill_buffer(http_parser *parser, const char *str, size_t len) { parser_context *ctx = (parser_context *) parser->data; http_subtransport *t = ctx->t; /* If our goal is to replay the request (either an auth failure or * a redirect) then don't bother buffering since we're ignoring the * content anyway. */ if (t->parse_error == PARSE_ERROR_REPLAY) return 0; if (ctx->buf_size < len) { giterr_set(GITERR_NET, "Can't fit data in the buffer"); return t->parse_error = PARSE_ERROR_GENERIC; } memcpy(ctx->buffer, str, len); *(ctx->bytes_read) += len; ctx->buffer += len; ctx->buf_size -= len; return 0; } static void clear_parser_state(http_subtransport *t) { http_parser_init(&t->parser, HTTP_RESPONSE); gitno_buffer_setup_fromstream(t->io, &t->parse_buffer, t->parse_buffer_data, sizeof(t->parse_buffer_data)); t->last_cb = NONE; t->parse_error = 0; t->parse_finished = 0; git_buf_free(&t->parse_header_name); git_buf_init(&t->parse_header_name, 0); git_buf_free(&t->parse_header_value); git_buf_init(&t->parse_header_value, 0); git__free(t->content_type); t->content_type = NULL; git__free(t->location); t->location = NULL; git_vector_free_deep(&t->www_authenticate); } static int write_chunk(git_stream *io, const char *buffer, size_t len) { git_buf buf = GIT_BUF_INIT; /* Chunk header */ git_buf_printf(&buf, "%" PRIxZ "\r\n", len); if (git_buf_oom(&buf)) return -1; if (git_stream_write(io, buf.ptr, buf.size, 0) < 0) { git_buf_free(&buf); return -1; } git_buf_free(&buf); /* Chunk body */ if (len > 0 && git_stream_write(io, buffer, len, 0) < 0) return -1; /* Chunk footer */ if (git_stream_write(io, "\r\n", 2, 0) < 0) return -1; return 0; } static int http_connect(http_subtransport *t) { int error; char *proxy_url; if (t->connected && http_should_keep_alive(&t->parser) && t->parse_finished) return 0; if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; t->connected = 0; } if (t->connection_data.use_ssl) { error = git_tls_stream_new(&t->io, t->connection_data.host, t->connection_data.port); } else { #ifdef GIT_CURL error = git_curl_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #else error = git_socket_stream_new(&t->io, t->connection_data.host, t->connection_data.port); #endif } if (error < 0) return error; GITERR_CHECK_VERSION(t->io, GIT_STREAM_VERSION, "git_stream"); if (git_stream_supports_proxy(t->io) && !git_remote__get_http_proxy(t->owner->owner, !!t->connection_data.use_ssl, &proxy_url)) { error = git_stream_set_proxy(t->io, proxy_url); git__free(proxy_url); if (error < 0) return error; } error = git_stream_connect(t->io); #if defined(GIT_OPENSSL) || defined(GIT_SECURE_TRANSPORT) || defined(GIT_CURL) if ((!error || error == GIT_ECERTIFICATE) && t->owner->certificate_check_cb != NULL && git_stream_is_encrypted(t->io)) { git_cert *cert; int is_valid = (error == GIT_OK); if ((error = git_stream_certificate(&cert, t->io)) < 0) return error; giterr_clear(); error = t->owner->certificate_check_cb(cert, is_valid, t->connection_data.host, t->owner->message_cb_payload); if (error < 0) { if (!giterr_last()) giterr_set(GITERR_NET, "user cancelled certificate check"); return error; } } #endif if (error < 0) return error; t->connected = 1; return 0; } static int http_stream_read( git_smart_subtransport_stream *stream, char *buffer, size_t buf_size, size_t *bytes_read) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); parser_context ctx; size_t bytes_parsed; replay: *bytes_read = 0; assert(t->connected); if (!s->sent_request) { git_buf request = GIT_BUF_INIT; clear_parser_state(t); if (gen_request(&request, s, 0) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) { git_buf_free(&request); return -1; } git_buf_free(&request); s->sent_request = 1; } if (!s->received_response) { if (s->chunked) { assert(s->verb == post_verb); /* Flush, if necessary */ if (s->chunk_buffer_len > 0 && write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; /* Write the final chunk. */ if (git_stream_write(t->io, "0\r\n\r\n", 5, 0) < 0) return -1; } s->received_response = 1; } while (!*bytes_read && !t->parse_finished) { size_t data_offset; int error; /* * Make the parse_buffer think it's as full of data as * the buffer, so it won't try to recv more data than * we can put into it. * * data_offset is the actual data offset from which we * should tell the parser to start reading. */ if (buf_size >= t->parse_buffer.len) { t->parse_buffer.offset = 0; } else { t->parse_buffer.offset = t->parse_buffer.len - buf_size; } data_offset = t->parse_buffer.offset; if (gitno_recv(&t->parse_buffer) < 0) return -1; /* This call to http_parser_execute will result in invocations of the * on_* family of callbacks. The most interesting of these is * on_body_fill_buffer, which is called when data is ready to be copied * into the target buffer. We need to marshal the buffer, buf_size, and * bytes_read parameters to this callback. */ ctx.t = t; ctx.s = s; ctx.buffer = buffer; ctx.buf_size = buf_size; ctx.bytes_read = bytes_read; /* Set the context, call the parser, then unset the context. */ t->parser.data = &ctx; bytes_parsed = http_parser_execute(&t->parser, &t->settings, t->parse_buffer.data + data_offset, t->parse_buffer.offset - data_offset); t->parser.data = NULL; /* If there was a handled authentication failure, then parse_error * will have signaled us that we should replay the request. */ if (PARSE_ERROR_REPLAY == t->parse_error) { s->sent_request = 0; if ((error = http_connect(t)) < 0) return error; goto replay; } if (t->parse_error == PARSE_ERROR_EXT) { return t->error; } if (t->parse_error < 0) return -1; if (bytes_parsed != t->parse_buffer.offset - data_offset) { giterr_set(GITERR_NET, "HTTP parser error: %s", http_errno_description((enum http_errno)t->parser.http_errno)); return -1; } } return 0; } static int http_stream_write_chunked( git_smart_subtransport_stream *stream, const char *buffer, size_t len) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); assert(t->connected); /* Send the request, if necessary */ if (!s->sent_request) { git_buf request = GIT_BUF_INIT; clear_parser_state(t); if (gen_request(&request, s, 0) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) { git_buf_free(&request); return -1; } git_buf_free(&request); s->sent_request = 1; } if (len > CHUNK_SIZE) { /* Flush, if necessary */ if (s->chunk_buffer_len > 0) { if (write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; } /* Write chunk directly */ if (write_chunk(t->io, buffer, len) < 0) return -1; } else { /* Append as much to the buffer as we can */ int count = min(CHUNK_SIZE - s->chunk_buffer_len, len); if (!s->chunk_buffer) s->chunk_buffer = git__malloc(CHUNK_SIZE); memcpy(s->chunk_buffer + s->chunk_buffer_len, buffer, count); s->chunk_buffer_len += count; buffer += count; len -= count; /* Is the buffer full? If so, then flush */ if (CHUNK_SIZE == s->chunk_buffer_len) { if (write_chunk(t->io, s->chunk_buffer, s->chunk_buffer_len) < 0) return -1; s->chunk_buffer_len = 0; if (len > 0) { memcpy(s->chunk_buffer, buffer, len); s->chunk_buffer_len = len; } } } return 0; } static int http_stream_write_single( git_smart_subtransport_stream *stream, const char *buffer, size_t len) { http_stream *s = (http_stream *)stream; http_subtransport *t = OWNING_SUBTRANSPORT(s); git_buf request = GIT_BUF_INIT; assert(t->connected); if (s->sent_request) { giterr_set(GITERR_NET, "Subtransport configured for only one write"); return -1; } clear_parser_state(t); if (gen_request(&request, s, len) < 0) return -1; if (git_stream_write(t->io, request.ptr, request.size, 0) < 0) goto on_error; if (len && git_stream_write(t->io, buffer, len, 0) < 0) goto on_error; git_buf_free(&request); s->sent_request = 1; return 0; on_error: git_buf_free(&request); return -1; } static void http_stream_free(git_smart_subtransport_stream *stream) { http_stream *s = (http_stream *)stream; if (s->chunk_buffer) git__free(s->chunk_buffer); if (s->redirect_url) git__free(s->redirect_url); git__free(s); } static int http_stream_alloc(http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (!stream) return -1; s = git__calloc(sizeof(http_stream), 1); GITERR_CHECK_ALLOC(s); s->parent.subtransport = &t->parent; s->parent.read = http_stream_read; s->parent.write = http_stream_write_single; s->parent.free = http_stream_free; *stream = (git_smart_subtransport_stream *)s; return 0; } static int http_uploadpack_ls( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = upload_pack_service; s->service_url = upload_pack_ls_service_url; s->verb = get_verb; return 0; } static int http_uploadpack( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = upload_pack_service; s->service_url = upload_pack_service_url; s->verb = post_verb; return 0; } static int http_receivepack_ls( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; s->service = receive_pack_service; s->service_url = receive_pack_ls_service_url; s->verb = get_verb; return 0; } static int http_receivepack( http_subtransport *t, git_smart_subtransport_stream **stream) { http_stream *s; if (http_stream_alloc(t, stream) < 0) return -1; s = (http_stream *)*stream; /* Use Transfer-Encoding: chunked for this request */ s->chunked = 1; s->parent.write = http_stream_write_chunked; s->service = receive_pack_service; s->service_url = receive_pack_service_url; s->verb = post_verb; return 0; } static int http_action( git_smart_subtransport_stream **stream, git_smart_subtransport *subtransport, const char *url, git_smart_service_t action) { http_subtransport *t = (http_subtransport *)subtransport; int ret; if (!stream) return -1; if ((!t->connection_data.host || !t->connection_data.port || !t->connection_data.path) && (ret = gitno_connection_data_from_url(&t->connection_data, url, NULL)) < 0) return ret; if ((ret = http_connect(t)) < 0) return ret; switch (action) { case GIT_SERVICE_UPLOADPACK_LS: return http_uploadpack_ls(t, stream); case GIT_SERVICE_UPLOADPACK: return http_uploadpack(t, stream); case GIT_SERVICE_RECEIVEPACK_LS: return http_receivepack_ls(t, stream); case GIT_SERVICE_RECEIVEPACK: return http_receivepack(t, stream); } *stream = NULL; return -1; } static int http_close(git_smart_subtransport *subtransport) { http_subtransport *t = (http_subtransport *) subtransport; git_http_auth_context *context; size_t i; clear_parser_state(t); if (t->io) { git_stream_close(t->io); git_stream_free(t->io); t->io = NULL; } if (t->cred) { t->cred->free(t->cred); t->cred = NULL; } if (t->url_cred) { t->url_cred->free(t->url_cred); t->url_cred = NULL; } git_vector_foreach(&t->auth_contexts, i, context) { if (context->free) context->free(context); } git_vector_clear(&t->auth_contexts); gitno_connection_data_free_ptrs(&t->connection_data); memset(&t->connection_data, 0x0, sizeof(gitno_connection_data)); return 0; } static void http_free(git_smart_subtransport *subtransport) { http_subtransport *t = (http_subtransport *) subtransport; http_close(subtransport); git_vector_free(&t->auth_contexts); git__free(t); } int git_smart_subtransport_http(git_smart_subtransport **out, git_transport *owner, void *param) { http_subtransport *t; GIT_UNUSED(param); if (!out) return -1; t = git__calloc(sizeof(http_subtransport), 1); GITERR_CHECK_ALLOC(t); t->owner = (transport_smart *)owner; t->parent.action = http_action; t->parent.close = http_close; t->parent.free = http_free; t->settings.on_header_field = on_header_field; t->settings.on_header_value = on_header_value; t->settings.on_headers_complete = on_headers_complete; t->settings.on_body = on_body_fill_buffer; t->settings.on_message_complete = on_message_complete; *out = (git_smart_subtransport *) t; return 0; } #endif /* !GIT_WINHTTP */
./CrossVul/dataset_final_sorted/CWE-284/c/good_4811_0
crossvul-cpp_data_good_4786_2
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII FFFFF FFFFF % % V V I F F % % V V I FFF FFF % % V V I F F % % V IIIII F F % % % % % % Read/Write Khoros Visualization Image Format % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/module.h" /* Forward declarations. */ static MagickBooleanType WriteVIFFImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s V I F F % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsVIFF() returns MagickTrue if the image format type, identified by the % magick string, is VIFF. % % The format of the IsVIFF method is: % % MagickBooleanType IsVIFF(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsVIFF(const unsigned char *magick,const size_t length) { if (length < 2) return(MagickFalse); if (memcmp(magick,"\253\001",2) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadVIFFImage() reads a Khoros Visualization image file and returns % it. It allocates the memory necessary for the new Image structure and % returns a pointer to the new image. % % The format of the ReadVIFFImage method is: % % Image *ReadVIFFImage(const ImageInfo *image_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Method ReadVIFFImage returns a pointer to the image after % reading. A null image is returned if there is a memory shortage or if % the image cannot be read. % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CheckMemoryOverflow(const size_t count, const size_t quantum) { size_t size; size=count*quantum; if ((count == 0) || (quantum != (size/count))) { errno=ENOMEM; return(MagickTrue); } return(MagickFalse); } static Image *ReadVIFFImage(const ImageInfo *image_info, ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_ntscRGB 1 #define VFF_CM_NONE 0 #define VFF_DEP_DECORDER 0x4 #define VFF_DEP_NSORDER 0x8 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MAPTYP_2_BYTE 2 #define VFF_MAPTYP_4_BYTE 4 #define VFF_MAPTYP_FLOAT 5 #define VFF_MAPTYP_DOUBLE 7 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_MS_SHARED 3 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 #define VFF_TYP_2_BYTE 2 #define VFF_TYP_4_BYTE 4 #define VFF_TYP_FLOAT 5 #define VFF_TYP_DOUBLE 9 typedef struct _ViffInfo { unsigned char identifier, file_type, release, version, machine_dependency, reserve[3]; char comment[512]; unsigned int rows, columns, subrows; int x_offset, y_offset; float x_bits_per_pixel, y_bits_per_pixel; unsigned int location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; double min_value, scale_factor, value; Image *image; int bit; MagickBooleanType status; MagickSizeType number_pixels; register ssize_t x; register Quantum *q; register ssize_t i; register unsigned char *p; size_t bytes_per_pixel, max_packets, quantum; ssize_t count, y; unsigned char *pixels; unsigned long lsb_first; ViffInfo viff_info; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read VIFF header (1024 bytes). */ count=ReadBlob(image,1,&viff_info.identifier); do { /* Verify VIFF identifier. */ if ((count != 1) || ((unsigned char) viff_info.identifier != 0xab)) ThrowReaderException(CorruptImageError,"NotAVIFFImage"); /* Initialize VIFF image. */ (void) ReadBlob(image,sizeof(viff_info.file_type),&viff_info.file_type); (void) ReadBlob(image,sizeof(viff_info.release),&viff_info.release); (void) ReadBlob(image,sizeof(viff_info.version),&viff_info.version); (void) ReadBlob(image,sizeof(viff_info.machine_dependency), &viff_info.machine_dependency); (void) ReadBlob(image,sizeof(viff_info.reserve),viff_info.reserve); count=ReadBlob(image,512,(unsigned char *) viff_info.comment); viff_info.comment[511]='\0'; if (strlen(viff_info.comment) > 4) (void) SetImageProperty(image,"comment",viff_info.comment,exception); if ((viff_info.machine_dependency == VFF_DEP_DECORDER) || (viff_info.machine_dependency == VFF_DEP_NSORDER)) image->endian=LSBEndian; else image->endian=MSBEndian; viff_info.rows=ReadBlobLong(image); viff_info.columns=ReadBlobLong(image); viff_info.subrows=ReadBlobLong(image); viff_info.x_offset=(int) ReadBlobLong(image); viff_info.y_offset=(int) ReadBlobLong(image); viff_info.x_bits_per_pixel=(float) ReadBlobLong(image); viff_info.y_bits_per_pixel=(float) ReadBlobLong(image); viff_info.location_type=ReadBlobLong(image); viff_info.location_dimension=ReadBlobLong(image); viff_info.number_of_images=ReadBlobLong(image); viff_info.number_data_bands=ReadBlobLong(image); viff_info.data_storage_type=ReadBlobLong(image); viff_info.data_encode_scheme=ReadBlobLong(image); viff_info.map_scheme=ReadBlobLong(image); viff_info.map_storage_type=ReadBlobLong(image); viff_info.map_rows=ReadBlobLong(image); viff_info.map_columns=ReadBlobLong(image); viff_info.map_subrows=ReadBlobLong(image); viff_info.map_enable=ReadBlobLong(image); viff_info.maps_per_cycle=ReadBlobLong(image); viff_info.color_space_model=ReadBlobLong(image); for (i=0; i < 420; i++) (void) ReadBlobByte(image); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); image->columns=viff_info.rows; image->rows=viff_info.columns; image->depth=viff_info.x_bits_per_pixel <= 8 ? 8UL : MAGICKCORE_QUANTUM_DEPTH; /* Verify that we can read this VIFF image. */ number_pixels=(MagickSizeType) viff_info.columns*viff_info.rows; if (number_pixels != (size_t) number_pixels) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (number_pixels == 0) ThrowReaderException(CoderError,"ImageColumnOrRowSizeIsNotSupported"); if ((viff_info.number_data_bands < 1) || (viff_info.number_data_bands > 4)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if ((viff_info.data_storage_type != VFF_TYP_BIT) && (viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.data_storage_type != VFF_TYP_2_BYTE) && (viff_info.data_storage_type != VFF_TYP_4_BYTE) && (viff_info.data_storage_type != VFF_TYP_FLOAT) && (viff_info.data_storage_type != VFF_TYP_DOUBLE)) ThrowReaderException(CoderError,"DataStorageTypeIsNotSupported"); if (viff_info.data_encode_scheme != VFF_DES_RAW) ThrowReaderException(CoderError,"DataEncodingSchemeIsNotSupported"); if ((viff_info.map_storage_type != VFF_MAPTYP_NONE) && (viff_info.map_storage_type != VFF_MAPTYP_1_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_2_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_4_BYTE) && (viff_info.map_storage_type != VFF_MAPTYP_FLOAT) && (viff_info.map_storage_type != VFF_MAPTYP_DOUBLE)) ThrowReaderException(CoderError,"MapStorageTypeIsNotSupported"); if ((viff_info.color_space_model != VFF_CM_NONE) && (viff_info.color_space_model != VFF_CM_ntscRGB) && (viff_info.color_space_model != VFF_CM_genericRGB)) ThrowReaderException(CoderError,"ColorspaceModelIsNotSupported"); if (viff_info.location_type != VFF_LOC_IMPLICIT) ThrowReaderException(CoderError,"LocationTypeIsNotSupported"); if (viff_info.number_of_images != 1) ThrowReaderException(CoderError,"NumberOfImagesIsNotSupported"); if (viff_info.map_rows == 0) viff_info.map_scheme=VFF_MS_NONE; switch ((int) viff_info.map_scheme) { case VFF_MS_NONE: { if (viff_info.number_data_bands < 3) { /* Create linear color ramp. */ if (viff_info.data_storage_type == VFF_TYP_BIT) image->colors=2; else if (viff_info.data_storage_type == VFF_MAPTYP_1_BYTE) image->colors=256UL; else image->colors=image->depth <= 8 ? 256UL : 65536UL; status=AcquireImageColormap(image,image->colors,exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); } break; } case VFF_MS_ONEPERBAND: case VFF_MS_SHARED: { unsigned char *viff_colormap; /* Allocate VIFF colormap. */ switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_1_BYTE: bytes_per_pixel=1; break; case VFF_MAPTYP_2_BYTE: bytes_per_pixel=2; break; case VFF_MAPTYP_4_BYTE: bytes_per_pixel=4; break; case VFF_MAPTYP_FLOAT: bytes_per_pixel=4; break; case VFF_MAPTYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } image->colors=viff_info.map_columns; if (AcquireImageColormap(image,image->colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (viff_info.map_rows > (viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, viff_info.map_rows*bytes_per_pixel*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); /* Read VIFF raster colormap. */ count=ReadBlob(image,bytes_per_pixel*image->colors*viff_info.map_rows, viff_colormap); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: { MSBOrderShort(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } case VFF_MAPTYP_4_BYTE: case VFF_MAPTYP_FLOAT: { MSBOrderLong(viff_colormap,(bytes_per_pixel*image->colors* viff_info.map_rows)); break; } default: break; } for (i=0; i < (ssize_t) (viff_info.map_rows*image->colors); i++) { switch ((int) viff_info.map_storage_type) { case VFF_MAPTYP_2_BYTE: value=1.0*((short *) viff_colormap)[i]; break; case VFF_MAPTYP_4_BYTE: value=1.0*((int *) viff_colormap)[i]; break; case VFF_MAPTYP_FLOAT: value=((float *) viff_colormap)[i]; break; case VFF_MAPTYP_DOUBLE: value=((double *) viff_colormap)[i]; break; default: value=1.0*viff_colormap[i]; break; } if (i < (ssize_t) image->colors) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) value); image->colormap[i].green= ScaleCharToQuantum((unsigned char) value); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) value); } else if (i < (ssize_t) (2*image->colors)) image->colormap[i % image->colors].green= ScaleCharToQuantum((unsigned char) value); else if (i < (ssize_t) (3*image->colors)) image->colormap[i % image->colors].blue= ScaleCharToQuantum((unsigned char) value); } viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); break; } default: ThrowReaderException(CoderError,"ColormapTypeNotSupported"); } /* Initialize image structure. */ image->alpha_trait=viff_info.number_data_bands == 4 ? BlendPixelTrait : UndefinedPixelTrait; image->storage_class=(viff_info.number_data_bands < 3 ? PseudoClass : DirectClass); image->columns=viff_info.rows; image->rows=viff_info.columns; if ((image_info->ping != MagickFalse) && (image_info->number_scenes != 0)) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); /* Allocate VIFF pixels. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: bytes_per_pixel=2; break; case VFF_TYP_4_BYTE: bytes_per_pixel=4; break; case VFF_TYP_FLOAT: bytes_per_pixel=4; break; case VFF_TYP_DOUBLE: bytes_per_pixel=8; break; default: bytes_per_pixel=1; break; } if (viff_info.data_storage_type == VFF_TYP_BIT) { if (CheckMemoryOverflow((image->columns+7UL) >> 3UL,image->rows) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=((image->columns+7UL) >> 3UL)*image->rows; } else { if (CheckMemoryOverflow(number_pixels,viff_info.number_data_bands) != MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); max_packets=(size_t) (number_pixels*viff_info.number_data_bands); } pixels=(unsigned char *) AcquireQuantumMemory(MagickMax(number_pixels, max_packets),bytes_per_pixel*sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,bytes_per_pixel*max_packets,pixels); lsb_first=1; if (*(char *) &lsb_first && ((viff_info.machine_dependency != VFF_DEP_DECORDER) && (viff_info.machine_dependency != VFF_DEP_NSORDER))) switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: { MSBOrderShort(pixels,bytes_per_pixel*max_packets); break; } case VFF_TYP_4_BYTE: case VFF_TYP_FLOAT: { MSBOrderLong(pixels,bytes_per_pixel*max_packets); break; } default: break; } min_value=0.0; scale_factor=1.0; if ((viff_info.data_storage_type != VFF_TYP_1_BYTE) && (viff_info.map_scheme == VFF_MS_NONE)) { double max_value; /* Determine scale factor. */ switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[0]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[0]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[0]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[0]; break; default: value=1.0*pixels[0]; break; } max_value=value; min_value=value; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (value > max_value) max_value=value; else if (value < min_value) min_value=value; } if ((min_value == 0) && (max_value == 0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); } /* Convert pixels to Quantum size. */ p=(unsigned char *) pixels; for (i=0; i < (ssize_t) max_packets; i++) { switch ((int) viff_info.data_storage_type) { case VFF_TYP_2_BYTE: value=1.0*((short *) pixels)[i]; break; case VFF_TYP_4_BYTE: value=1.0*((int *) pixels)[i]; break; case VFF_TYP_FLOAT: value=((float *) pixels)[i]; break; case VFF_TYP_DOUBLE: value=((double *) pixels)[i]; break; default: value=1.0*pixels[i]; break; } if (viff_info.map_scheme == VFF_MS_NONE) { value=(value-min_value)*scale_factor; if (value > QuantumRange) value=QuantumRange; else if (value < 0) value=0; } *p=(unsigned char) ((Quantum) value); p++; } /* Convert VIFF raster image to pixel packets. */ p=(unsigned char *) pixels; if (viff_info.data_storage_type == VFF_TYP_BIT) { /* Convert bitmap scanline. */ for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) (image->columns-7); x+=8) { for (bit=0; bit < 8; bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q); if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) quantum,q); q+=GetPixelChannels(image); } p++; } if ((image->columns % 8) != 0) { for (bit=0; bit < (int) (image->columns % 8); bit++) { quantum=(size_t) ((*p) & (0x01 << bit) ? 0 : 1); SetPixelRed(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelGreen(image,quantum == 0 ? 0 : QuantumRange,q); SetPixelBlue(image,quantum == 0 ? 0 : QuantumRange,q); if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) quantum,q); q+=GetPixelChannels(image); } p++; } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->storage_class == PseudoClass) for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,*p++,q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } else { /* Convert DirectColor scanline. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { q=QueueAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(image,ScaleCharToQuantum(*p),q); SetPixelGreen(image,ScaleCharToQuantum(*(p+number_pixels)),q); SetPixelBlue(image,ScaleCharToQuantum(*(p+2*number_pixels)),q); if (image->colors != 0) { ssize_t index; index=(ssize_t) GetPixelRed(image,q); SetPixelRed(image,image->colormap[ ConstrainColormapIndex(image,index,exception)].red,q); index=(ssize_t) GetPixelGreen(image,q); SetPixelGreen(image,image->colormap[ ConstrainColormapIndex(image,index,exception)].green,q); index=(ssize_t) GetPixelBlue(image,q); SetPixelBlue(image,image->colormap[ ConstrainColormapIndex(image,index,exception)].blue,q); } SetPixelAlpha(image,image->alpha_trait != UndefinedPixelTrait ? ScaleCharToQuantum(*(p+number_pixels*3)) : OpaqueAlpha,q); p++; q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; if (image->previous == (Image *) NULL) { status=SetImageProgress(image,LoadImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } pixels=(unsigned char *) RelinquishMagickMemory(pixels); if (image->storage_class == PseudoClass) (void) SyncImage(image,exception); if (EOFBlob(image) != MagickFalse) { ThrowFileException(exception,CorruptImageError,"UnexpectedEndOfFile", image->filename); break; } /* Proceed to next image. */ if (image_info->number_scenes != 0) if (image->scene >= (image_info->scene+image_info->number_scenes-1)) break; count=ReadBlob(image,1,&viff_info.identifier); if ((count != 0) && (viff_info.identifier == 0xab)) { /* Allocate next image structure. */ AcquireNextImage(image_info,image,exception); if (GetNextImageInList(image) == (Image *) NULL) { image=DestroyImageList(image); return((Image *) NULL); } image=SyncNextImageInList(image); status=SetImageProgress(image,LoadImagesTag,TellBlob(image), GetBlobSize(image)); if (status == MagickFalse) break; } } while ((count != 0) && (viff_info.identifier == 0xab)); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterVIFFImage() adds properties for the VIFF image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterVIFFImage method is: % % size_t RegisterVIFFImage(void) % */ ModuleExport size_t RegisterVIFFImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("VIFF","VIFF","Khoros Visualization image"); entry->decoder=(DecodeImageHandler *) ReadVIFFImage; entry->encoder=(EncodeImageHandler *) WriteVIFFImage; entry->magick=(IsImageFormatHandler *) IsVIFF; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("VIFF","XV","Khoros Visualization image"); entry->decoder=(DecodeImageHandler *) ReadVIFFImage; entry->encoder=(EncodeImageHandler *) WriteVIFFImage; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterVIFFImage() removes format registrations made by the % VIFF module from the list of supported formats. % % The format of the UnregisterVIFFImage method is: % % UnregisterVIFFImage(void) % */ ModuleExport void UnregisterVIFFImage(void) { (void) UnregisterMagickInfo("VIFF"); (void) UnregisterMagickInfo("XV"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e V I F F I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WriteVIFFImage() writes an image to a file in the VIFF image format. % % The format of the WriteVIFFImage method is: % % MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WriteVIFFImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define VFF_CM_genericRGB 15 #define VFF_CM_NONE 0 #define VFF_DEP_IEEEORDER 0x2 #define VFF_DES_RAW 0 #define VFF_LOC_IMPLICIT 1 #define VFF_MAPTYP_NONE 0 #define VFF_MAPTYP_1_BYTE 1 #define VFF_MS_NONE 0 #define VFF_MS_ONEPERBAND 1 #define VFF_TYP_BIT 0 #define VFF_TYP_1_BYTE 1 typedef struct _ViffInfo { char identifier, file_type, release, version, machine_dependency, reserve[3], comment[512]; size_t rows, columns, subrows; int x_offset, y_offset; unsigned int x_bits_per_pixel, y_bits_per_pixel, location_type, location_dimension, number_of_images, number_data_bands, data_storage_type, data_encode_scheme, map_scheme, map_storage_type, map_rows, map_columns, map_subrows, map_enable, maps_per_cycle, color_space_model; } ViffInfo; const char *value; MagickBooleanType status; MagickOffsetType scene; MagickSizeType number_pixels, packets; MemoryInfo *pixel_info; register const Quantum *p; register ssize_t x; register ssize_t i; register unsigned char *q; ssize_t y; unsigned char *pixels; ViffInfo viff_info; /* Open output image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); (void) ResetMagickMemory(&viff_info,0,sizeof(ViffInfo)); scene=0; do { /* Initialize VIFF image structure. */ (void) TransformImageColorspace(image,sRGBColorspace,exception); DisableMSCWarning(4310) viff_info.identifier=(char) 0xab; RestoreMSCWarning viff_info.file_type=1; viff_info.release=1; viff_info.version=3; viff_info.machine_dependency=VFF_DEP_IEEEORDER; /* IEEE byte ordering */ *viff_info.comment='\0'; value=GetImageProperty(image,"comment",exception); if (value != (const char *) NULL) (void) CopyMagickString(viff_info.comment,value,MagickMin(strlen(value), 511)+1); viff_info.rows=image->columns; viff_info.columns=image->rows; viff_info.subrows=0; viff_info.x_offset=(~0); viff_info.y_offset=(~0); viff_info.x_bits_per_pixel=0; viff_info.y_bits_per_pixel=0; viff_info.location_type=VFF_LOC_IMPLICIT; viff_info.location_dimension=0; viff_info.number_of_images=1; viff_info.data_encode_scheme=VFF_DES_RAW; viff_info.map_scheme=VFF_MS_NONE; viff_info.map_storage_type=VFF_MAPTYP_NONE; viff_info.map_rows=0; viff_info.map_columns=0; viff_info.map_subrows=0; viff_info.map_enable=1; /* no colormap */ viff_info.maps_per_cycle=0; number_pixels=(MagickSizeType) image->columns*image->rows; if (image->storage_class == DirectClass) { /* Full color VIFF raster. */ viff_info.number_data_bands=image->alpha_trait ? 4U : 3U; viff_info.color_space_model=VFF_CM_genericRGB; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=viff_info.number_data_bands*number_pixels; } else { viff_info.number_data_bands=1; viff_info.color_space_model=VFF_CM_NONE; viff_info.data_storage_type=VFF_TYP_1_BYTE; packets=number_pixels; if (SetImageGray(image,exception) == MagickFalse) { /* Colormapped VIFF raster. */ viff_info.map_scheme=VFF_MS_ONEPERBAND; viff_info.map_storage_type=VFF_MAPTYP_1_BYTE; viff_info.map_rows=3; viff_info.map_columns=(unsigned int) image->colors; } else if (image->colors <= 2) { /* Monochrome VIFF raster. */ viff_info.data_storage_type=VFF_TYP_BIT; packets=((image->columns+7) >> 3)*image->rows; } } /* Write VIFF image header (pad to 1024 bytes). */ (void) WriteBlob(image,sizeof(viff_info.identifier),(unsigned char *) &viff_info.identifier); (void) WriteBlob(image,sizeof(viff_info.file_type),(unsigned char *) &viff_info.file_type); (void) WriteBlob(image,sizeof(viff_info.release),(unsigned char *) &viff_info.release); (void) WriteBlob(image,sizeof(viff_info.version),(unsigned char *) &viff_info.version); (void) WriteBlob(image,sizeof(viff_info.machine_dependency), (unsigned char *) &viff_info.machine_dependency); (void) WriteBlob(image,sizeof(viff_info.reserve),(unsigned char *) viff_info.reserve); (void) WriteBlob(image,512,(unsigned char *) viff_info.comment); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_offset); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_offset); viff_info.x_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.x_bits_per_pixel); viff_info.y_bits_per_pixel=(unsigned int) ((63 << 24) | (128 << 16)); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.y_bits_per_pixel); (void) WriteBlobMSBLong(image,viff_info.location_type); (void) WriteBlobMSBLong(image,viff_info.location_dimension); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_of_images); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.number_data_bands); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.data_encode_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_scheme); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_storage_type); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_rows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_columns); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_subrows); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.map_enable); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.maps_per_cycle); (void) WriteBlobMSBLong(image,(unsigned int) viff_info.color_space_model); for (i=0; i < 420; i++) (void) WriteBlobByte(image,'\0'); /* Convert MIFF to VIFF raster pixels. */ pixel_info=AcquireVirtualMemory((size_t) packets,sizeof(*pixels)); if (pixel_info == (MemoryInfo *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); pixels=(unsigned char *) GetVirtualMemoryBlob(pixel_info); q=pixels; if (image->storage_class == DirectClass) { /* Convert DirectClass packet to VIFF RGB pixel. */ number_pixels=(MagickSizeType) image->columns*image->rows; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q=ScaleQuantumToChar(GetPixelRed(image,p)); *(q+number_pixels)=ScaleQuantumToChar(GetPixelGreen(image,p)); *(q+number_pixels*2)=ScaleQuantumToChar(GetPixelBlue(image,p)); if (image->alpha_trait != UndefinedPixelTrait) *(q+number_pixels*3)=ScaleQuantumToChar((Quantum) (GetPixelAlpha(image,p))); p+=GetPixelChannels(image); q++; } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (SetImageGray(image,exception) == MagickFalse) { unsigned char *viff_colormap; /* Dump colormap to file. */ viff_colormap=(unsigned char *) AcquireQuantumMemory(image->colors, 3*sizeof(*viff_colormap)); if (viff_colormap == (unsigned char *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); q=viff_colormap; for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].red); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].green); for (i=0; i < (ssize_t) image->colors; i++) *q++=ScaleQuantumToChar(image->colormap[i].blue); (void) WriteBlob(image,3*image->colors,viff_colormap); viff_colormap=(unsigned char *) RelinquishMagickMemory(viff_colormap); /* Convert PseudoClass packet to VIFF colormapped pixels. */ q=pixels; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) GetPixelIndex(image,p); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y, image->rows); if (status == MagickFalse) break; } } } else if (image->colors <= 2) { ssize_t x, y; register unsigned char bit, byte; /* Convert PseudoClass image to a VIFF monochrome image. */ (void) SetImageType(image,BilevelType,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; bit=0; byte=0; for (x=0; x < (ssize_t) image->columns; x++) { byte>>=1; if (GetPixelLuma(image,p) < (QuantumRange/2.0)) byte|=0x80; bit++; if (bit == 8) { *q++=byte; bit=0; byte=0; } p+=GetPixelChannels(image); } if (bit != 0) *q++=byte >> (8-bit); if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } else { /* Convert PseudoClass packet to VIFF grayscale pixel. */ for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { *q++=(unsigned char) ClampToQuantum(GetPixelLuma(image,p)); p+=GetPixelChannels(image); } if (image->previous == (Image *) NULL) { status=SetImageProgress(image,SaveImageTag,(MagickOffsetType) y,image->rows); if (status == MagickFalse) break; } } } (void) WriteBlob(image,(size_t) packets,pixels); pixel_info=RelinquishVirtualMemory(pixel_info); if (GetNextImageInList(image) == (Image *) NULL) break; image=SyncNextImageInList(image); status=SetImageProgress(image,SaveImagesTag,scene++, GetImageListLength(image)); if (status == MagickFalse) break; } while (image_info->adjoin != MagickFalse); (void) CloseBlob(image); return(MagickTrue); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_4786_2
crossvul-cpp_data_good_5349_1
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * IPv4 specific functions * * * code split from: * linux/ipv4/tcp.c * linux/ipv4/tcp_input.c * linux/ipv4/tcp_output.c * * See tcp.c for author information * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * David S. Miller : New socket lookup architecture. * This code is dedicated to John Dyson. * David S. Miller : Change semantics of established hash, * half is devoted to TIME_WAIT sockets * and the rest go in the other half. * Andi Kleen : Add support for syncookies and fixed * some bugs: ip options weren't passed to * the TCP layer, missed a check for an * ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the * request_sock handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen semantics. * Mike McLagan : Routing by source * Juan Jose Ciarlante: ip_dynaddr bits * Andi Kleen: various fixes. * Vitaly E. Lavrov : Transparent proxy revived after year * coma. * Andi Kleen : Fix new listen. * Andi Kleen : Fix accept error reporting. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/bottom_half.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/random.h> #include <linux/cache.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/times.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/tcp.h> #include <net/transp_v6.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/timewait_sock.h> #include <net/xfrm.h> #include <net/secure_seq.h> #include <net/busy_poll.h> #include <linux/inet.h> #include <linux/ipv6.h> #include <linux/stddef.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <crypto/hash.h> #include <linux/scatterlist.h> int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); #endif struct inet_hashinfo tcp_hashinfo; EXPORT_SYMBOL(tcp_hashinfo); static __u32 tcp_v4_init_sequence(const struct sk_buff *skb) { return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && (!twp || (sysctl_tcp_tw_reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; tp->rx_opt.ts_recent = tcptw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; sock_hold(sktw); return 1; } return 0; } EXPORT_SYMBOL_GPL(tcp_twsk_unique); /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; struct ip_options_rcu *inet_opt; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; nexthop = inet_opt->opt.faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet_opt || !inet_opt->opt.srr) daddr = fl4->daddr; if (!inet->inet_saddr) inet->inet_saddr = fl4->saddr; sk_rcv_saddr_set(sk, inet->inet_saddr); if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; if (likely(!tp->repair)) tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) tcp_fetch_timewait_stamp(sk, &rt->dst); inet->inet_dport = usin->sin_port; sk_daddr_set(sk, daddr); inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(&tcp_death_row, sk); if (err) goto failure; sk_set_txhash(sk); rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port); inet->inet_id = tp->write_seq ^ jiffies; err = tcp_connect(sk); rt = NULL; if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; } EXPORT_SYMBOL(tcp_v4_connect); /* * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. * It can be called through tcp_release_cb() if socket was owned by user * at the time tcp_v4_err() was called to handle ICMP message. */ void tcp_v4_mtu_reduced(struct sock *sk) { struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); u32 mtu = tcp_sk(sk)->mtu_info; dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && ip_sk_accept_pmtu(sk) && inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } EXPORT_SYMBOL(tcp_v4_mtu_reduced); static void do_redirect(struct sk_buff *skb, struct sock *sk) { struct dst_entry *dst = __sk_dst_check(sk, 0); if (dst) dst->ops->redirect(dst, sk, skb); } /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ void tcp_req_err(struct sock *sk, u32 seq, bool abort) { struct request_sock *req = inet_reqsk(sk); struct net *net = sock_net(sk); /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ if (seq != tcp_rsk(req)->snt_isn) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); } else if (abort) { /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(req->rsk_listener, req); tcp_listendrop(req->rsk_listener); } reqsk_put(req); } EXPORT_SYMBOL(tcp_req_err); /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); struct inet_connection_sock *icsk; struct tcp_sock *tp; struct inet_sock *inet; const int type = icmp_hdr(icmp_skb)->type; const int code = icmp_hdr(icmp_skb)->code; struct sock *sk; struct sk_buff *skb; struct request_sock *fastopen; __u32 seq, snd_una; __u32 remaining; int err; struct net *net = dev_net(icmp_skb->dev); sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, th->dest, iph->saddr, ntohs(th->source), inet_iif(icmp_skb)); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || type == ICMP_TIME_EXCEEDED || (type == ICMP_DEST_UNREACH && (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held. */ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) goto out; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } icsk = inet_csk(sk); tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_REDIRECT: do_redirect(icmp_skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) sock_hold(sk); } goto out; } err = icmp_err_convert[code].errno; /* check if icmp_skb allows revert of backoff * (see draft-zimmermann-tcp-lcd) */ if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) break; if (seq != tp->snd_una || !icsk->icsk_retransmits || !icsk->icsk_backoff || fastopen) break; if (sock_owned_by_user(sk)) break; icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); skb = tcp_write_queue_head(sk); BUG_ON(!skb); remaining = icsk->icsk_rto - min(icsk->icsk_rto, tcp_time_stamp - tcp_skb_timestamp(skb)); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ tcp_retransmit_timer(sk); } break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * is already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); tcp_done(sk); } else { sk->sk_err_soft = err; } goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out: bh_unlock_sock(sk); sock_put(sk); } void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(skb->len, saddr, daddr, csum_partial(th, th->doff << 2, skb->csum)); } } /* This routine computes an IPv4 TCP checksum. */ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); } EXPORT_SYMBOL(tcp_v4_send_check); /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; #endif } rep; struct ip_reply_arg arg; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key = NULL; const __u8 *hash_location = NULL; unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif struct net *net; /* Never send a reset in response to a reset. */ if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ memset(&rep, 0, sizeof(rep)); rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = sizeof(struct tcphdr) / 4; rep.th.rst = 1; if (th->ack) { rep.th.seq = th->ack_seq; } else { rep.th.ack = 1; rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); } else if (hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, ip_hdr(skb)->saddr, th->source, ip_hdr(skb)->daddr, ntohs(th->source), inet_iif(skb)); /* don't send rst if it can't find key */ if (!sk1) goto out; key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); if (!key) goto out; genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto out; } if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* Update length and the length the header thinks exists */ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len / 4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. * routing might fail in this case. No choice here, if we choose to force * input interface, we will misroute in case of asymmetric route. */ if (sk) arg.bound_dev_if = sk->sk_bound_dev_if; BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != offsetof(struct inet_timewait_sock, tw_bound_dev_if)); arg.tos = ip_hdr(skb)->tos; local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG out: rcu_read_unlock(); #endif } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */ static void tcp_v4_send_ack(struct net *net, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int reply_flags, u8 tos) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) #ifdef CONFIG_TCP_MD5SIG + (TCPOLEN_MD5SIG_ALIGNED >> 2) #endif ]; } rep; struct ip_reply_arg arg; memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (tsecr) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.opt[1] = htonl(tsval); rep.opt[2] = htonl(tsecr); arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; } /* Swap the send and the receive. */ rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = arg.iov[0].iov_len / 4; rep.th.seq = htonl(seq); rep.th.ack_seq = htonl(ack); rep.th.ack = 1; rep.th.window = htons(win); #ifdef CONFIG_TCP_MD5SIG if (key) { int offset = (tsecr) ? 3 : 0; rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len/4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.flags = reply_flags; arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; if (oif) arg.bound_dev_if = oif; arg.tos = tos; local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); local_bh_enable(); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v4_send_ack(sock_net(sk), skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, tw->tw_tos ); inet_twsk_put(tw); } static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt; /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v4_send_ack(sock_net(sk), skb, seq, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, 0, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos); } /* * Send a SYN-ACK after having received a SYN. * This still operates on a request_sock only, not on a big * socket. */ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff *skb; /* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, ireq->opt); err = net_xmit_eval(err); } return err; } /* * IPv4 request_sock destructor. */ static void tcp_v4_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->opt); } #ifdef CONFIG_TCP_MD5SIG /* * RFC2385 MD5 checksumming requires a mapping of * IP address->MD5 Key. * We need to maintain these in the sk structure. */ /* Find the Key structure for an address. */ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family) { const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; unsigned int size = sizeof(struct in_addr); const struct tcp_md5sig_info *md5sig; /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) return NULL; #if IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) size = sizeof(struct in6_addr); #endif hlist_for_each_entry_rcu(key, &md5sig->head, node) { if (key->family != family) continue; if (!memcmp(&key->addr, addr, size)) return key; } return NULL; } EXPORT_SYMBOL(tcp_md5_do_lookup); struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { const union tcp_md5_addr *addr; addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; return tcp_md5_do_lookup(sk, addr, AF_INET); } EXPORT_SYMBOL(tcp_v4_md5_lookup); /* This can be called on a newly created socket, from other files */ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, const u8 *newkey, u8 newkeylen, gfp_t gfp) { /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup(sk, addr, family); if (key) { /* Pre-existing entry - just update that one. */ memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; return 0; } md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) { md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) return -ENOMEM; sk_nocaps_add(sk, NETIF_F_GSO_MASK); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); } key = sock_kmalloc(sk, sizeof(*key), gfp); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { sock_kfree_s(sk, key, sizeof(*key)); return -ENOMEM; } memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; key->family = family; memcpy(&key->addr, addr, (family == AF_INET6) ? sizeof(struct in6_addr) : sizeof(struct in_addr)); hlist_add_head_rcu(&key->node, &md5sig->head); return 0; } EXPORT_SYMBOL(tcp_md5_do_add); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) { struct tcp_md5sig_key *key; key = tcp_md5_do_lookup(sk, addr, family); if (!key) return -ENOENT; hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); return 0; } EXPORT_SYMBOL(tcp_md5_do_del); static void tcp_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct hlist_node *n; struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, 1); hlist_for_each_entry_safe(key, n, &md5sig->head, node) { hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); } } static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) return -EINVAL; if (!cmd.tcpm_keylen) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET); if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); } static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp, __be32 daddr, __be32 saddr, const struct tcphdr *th, int nbytes) { struct tcp4_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; bp->saddr = saddr; bp->daddr = daddr; bp->pad = 0; bp->protocol = IPPROTO_TCP; bp->len = cpu_to_be16(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->md5_req); } static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct ahash_request *req; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { struct tcp_md5sig_pool *hp; struct ahash_request *req; const struct tcphdr *th = tcp_hdr(skb); __be32 saddr, daddr; if (sk) { /* valid for establish/request sockets */ saddr = sk->sk_rcv_saddr; daddr = sk->sk_daddr; } else { const struct iphdr *iph = ip_hdr(skb); saddr = iph->saddr; daddr = iph->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } EXPORT_SYMBOL(tcp_v4_md5_hash_skb); #endif /* Called with rcu_read_lock() */ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_TCP_MD5SIG /* * This gets called for each TCP segment that arrives * so we want to be efficient. * We have 3 drop cases: * o No MD5 hash and one expected. * o MD5 hash and we're not expecting one. * o MD5 hash and its wrong. */ const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; unsigned char newhash[16]; hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, AF_INET); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return false; if (hash_expected && !hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } /* Okay, so this is hash_expected and hash_location - * so we need to calculate the checksum. */ genhash = tcp_v4_md5_hash_skb(newhash, hash_expected, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); return true; } return false; #endif return false; } static void tcp_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); ireq->opt = tcp_v4_save_options(skb); } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, struct flowi *fl, const struct request_sock *req, bool *strict) { struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); if (strict) { if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) *strict = true; else *strict = false; } return dst; } struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .mss_clamp = TCP_MSS_DEFAULT, #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, #endif .init_req = tcp_v4_init_req, #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v4_init_sequence, #endif .route_req = tcp_v4_route_req, .init_seq = tcp_v4_init_sequence, .send_synack = tcp_v4_send_synack, }; int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; return tcp_conn_request(&tcp_request_sock_ops, &tcp_request_sock_ipv4_ops, sk, skb); drop: tcp_listendrop(sk); return 0; } EXPORT_SYMBOL(tcp_v4_conn_request); /* * The three way handshake has completed - we got a valid synack - * now create the new socket. */ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif struct ip_options_rcu *inet_opt; if (sk_acceptq_is_full(sk)) goto exit_overflow; newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; inet_sk_rx_dst_set(newsk, skb); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newsk->sk_bound_dev_if = ireq->ir_iif; newinet->inet_saddr = ireq->ir_loc_addr; inet_opt = ireq->opt; rcu_assign_pointer(newinet->inet_opt, inet_opt); ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; newinet->inet_id = newtp->write_seq ^ jiffies; if (!dst) { dst = inet_csk_route_child_sock(sk, newsk, req); if (!dst) goto put_and_exit; } else { /* syncookie case : see end of cookie_v4_check() */ } sk_setup_caps(newsk, dst); tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET); if (key) { /* * We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET, key->key, key->keylen, GFP_ATOMIC); sk_nocaps_add(newsk, NETIF_F_GSO_MASK); } #endif if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); if (*own_req) tcp_move_syn(newtp, req); return newsk; exit_overflow: NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: tcp_listendrop(sk); return NULL; put_and_exit: inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; } EXPORT_SYMBOL(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v4_check(sk, skb); #endif return sk; } /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v4_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; } return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) { rsk = sk; goto reset; } return 0; reset: tcp_v4_send_reset(rsk, skb); discard: kfree_skb(skb); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned. */ return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } EXPORT_SYMBOL(tcp_v4_do_rcv); void tcp_v4_early_demux(struct sk_buff *skb) { const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; iph = ip_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, ntohs(th->dest), skb->skb_iif); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); if (dst && inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } } /* Packet is added to VJ-style prequeue for processing in process * context, if a reader task is waiting. Apparently, this exciting * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) * failed somewhere. Latency? Burstiness? Well, at least now we will * see, why it failed. 8)8) --ANK * */ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (sysctl_tcp_low_latency || !tp->ucopy.task) return false; if (skb->len <= tcp_hdrlen(skb) && skb_queue_len(&tp->ucopy.prequeue) == 0) return false; /* Before escaping RCU protected region, we need to take care of skb * dst. Prequeue is only enabled for established sockets. * For such sockets, we might need the skb dst only to set sk->sk_rx_dst * Instead of doing full sk_rx_dst validity here, let's perform * an optimistic check. */ if (likely(sk->sk_rx_dst)) skb_dst_drop(skb); else skb_dst_force_safe(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (skb_queue_len(&tp->ucopy.prequeue) >= 32 || tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) { struct sk_buff *skb1; BUG_ON(sock_owned_by_user(sk)); __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED, skb_queue_len(&tp->ucopy.prequeue)); while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb1); tp->ucopy.memory = 0; } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | POLLRDNORM | POLLRDBAND); if (!inet_csk_ack_scheduled(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, (3 * tcp_rto_min(sk)) / 4, TCP_RTO_MAX); } return true; } EXPORT_SYMBOL(tcp_prequeue); bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; /* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ limit += 64*1024; /* In case all data was pulled from skb frags (in __pskb_pull_tail()), * we can fix skb->truesize to its real value to avoid future drops. * This is valid because skb is not yet charged to the socket. * It has been noticed pure SACK packets were sometimes dropped * (if cooked by drivers without copybreak feature). */ if (!skb->data_len) skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); return true; } return false; } EXPORT_SYMBOL(tcp_add_backlog); int tcp_filter(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = (struct tcphdr *)skb->data; unsigned int eaten = skb->len; int err; err = sk_filter_trim_cap(sk, skb, th->doff * 4); if (!err) { eaten -= skb->len; TCP_SKB_CB(skb)->end_seq -= eaten; } return err; } EXPORT_SYMBOL(tcp_filter); /* * From tcp_input.c */ int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct tcphdr *th; bool refcounted; struct sock *sk; int ret; if (skb->pkt_type != PACKET_HOST) goto discard_it; /* Count it even if it's bad */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated. * So, we defer the checks. */ if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() * barrier() makes sure compiler wont play fool^Waliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), sizeof(struct inet_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, &refcounted); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); struct sock *nsk; sk = req->rsk_listener; if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } /* We own a reference on the listener, increase it again * as we might lose it too soon. */ sock_hold(sk); refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); goto discard_and_relse; } if (nsk == sk) { reqsk_put(req); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse; } else { sock_put(sk); return 0; } } if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; if (tcp_v4_inbound_md5_hash(sk, skb)) goto discard_and_relse; nf_reset(skb); if (tcp_filter(sk, skb)) goto discard_and_relse; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v4_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret; no_tcp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v4_send_reset(NULL, skb); } discard_it: /* Discard frame. */ kfree_skb(skb); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, skb, __tcp_hdrlen(th), iph->saddr, th->source, iph->daddr, th->dest, inet_iif(skb)); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; refcounted = false; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v4_timewait_ack(sk, skb); break; case TCP_TW_RST: tcp_v4_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS:; } goto discard_it; } static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor= tcp_twsk_destructor, }; void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } } EXPORT_SYMBOL(inet_sk_rx_dst_set); const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif .mtu_reduced = tcp_v4_mtu_reduced, }; EXPORT_SYMBOL(ipv4_specific); #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v4_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; #endif return 0; } void tcp_v4_destroy_sock(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_clear_xmit_timers(sk); tcp_cleanup_congestion_control(sk); /* Cleanup up the write buffer. */ tcp_write_queue_purge(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ skb_rbtree_purge(&tp->out_of_order_queue); #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { tcp_clear_md5_list(sk); kfree_rcu(tp->md5sig_info, rcu); tp->md5sig_info = NULL; } #endif /* Clean prequeue, it must be empty really */ __skb_queue_purge(&tp->ucopy.prequeue); /* Clean up a referenced TCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); BUG_ON(tp->fastopen_rsk); /* If socket is aborted during connect operation */ tcp_free_fastopen_req(tp); tcp_saved_syn_free(tp); local_bh_disable(); sk_sockets_allocated_dec(sk); local_bh_enable(); } EXPORT_SYMBOL(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ /* * Get next listener socket follow cur. If cur is NULL, get first socket * starting from bucket given in st->bucket; when st->bucket is zero the * very first socket in the hash table is returned. */ static void *listening_get_next(struct seq_file *seq, void *cur) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct sock *sk = cur; if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); sk = sk_head(&ilb->head); st->offset = 0; goto get_sk; } ilb = &tcp_hashinfo.listening_hash[st->bucket]; ++st->num; ++st->offset; sk = sk_next(sk); get_sk: sk_for_each_from(sk) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == st->family) return sk; } spin_unlock_bh(&ilb->lock); st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) goto get_head; return NULL; } static void *listening_get_idx(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; st->offset = 0; rc = listening_get_next(seq, NULL); while (rc && *pos) { rc = listening_get_next(seq, rc); --*pos; } return rc; } static inline bool empty_bucket(const struct tcp_iter_state *st) { return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); } /* * Get first established socket starting from bucket given in st->bucket. * If st->bucket is zero, the very first socket in the hash is returned. */ static void *established_get_first(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(st)) continue; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; } rc = sk; goto out; } spin_unlock_bh(lock); } out: return rc; } static void *established_get_next(struct seq_file *seq, void *cur) { struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) return sk; } spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); ++st->bucket; return established_get_first(seq); } static void *established_get_idx(struct seq_file *seq, loff_t pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; rc = established_get_first(seq); while (rc && pos) { rc = established_get_next(seq, rc); --pos; } return rc; } static void *tcp_get_idx(struct seq_file *seq, loff_t pos) { void *rc; struct tcp_iter_state *st = seq->private; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_idx(seq, &pos); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; rc = established_get_idx(seq, pos); } return rc; } static void *tcp_seek_last_pos(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; int offset = st->offset; int orig_num = st->num; void *rc = NULL; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (st->bucket >= INET_LHTABLE_SIZE) break; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_next(seq, NULL); while (offset-- && rc) rc = listening_get_next(seq, rc); if (rc) break; st->bucket = 0; st->state = TCP_SEQ_STATE_ESTABLISHED; /* Fallthrough */ case TCP_SEQ_STATE_ESTABLISHED: if (st->bucket > tcp_hashinfo.ehash_mask) break; rc = established_get_first(seq); while (offset-- && rc) rc = established_get_next(seq, rc); } st->num = orig_num; return rc; } static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; if (*pos && *pos == st->last_pos) { rc = tcp_seek_last_pos(seq); if (rc) goto out; } st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; st->bucket = 0; st->offset = 0; rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; out: st->last_pos = *pos; return rc; } static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc = NULL; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); goto out; } switch (st->state) { case TCP_SEQ_STATE_LISTENING: rc = listening_get_next(seq, v); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; st->bucket = 0; st->offset = 0; rc = established_get_first(seq); } break; case TCP_SEQ_STATE_ESTABLISHED: rc = established_get_next(seq, v); break; } out: ++*pos; st->last_pos = *pos; return rc; } static void tcp_seq_stop(struct seq_file *seq, void *v) { struct tcp_iter_state *st = seq->private; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); break; case TCP_SEQ_STATE_ESTABLISHED: if (v) spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); break; } } int tcp_seq_open(struct inode *inode, struct file *file) { struct tcp_seq_afinfo *afinfo = PDE_DATA(inode); struct tcp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct tcp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->last_pos = 0; return 0; } EXPORT_SYMBOL(tcp_seq_open); int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) { int rc = 0; struct proc_dir_entry *p; afinfo->seq_ops.start = tcp_seq_start; afinfo->seq_ops.next = tcp_seq_next; afinfo->seq_ops.stop = tcp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(tcp_proc_register); void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL(tcp_proc_unregister); static void get_openreq4(const struct request_sock *req, struct seq_file *f, int i) { const struct inet_request_sock *ireq = inet_rsk(req); long delta = req->rsk_timer.expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", i, ireq->ir_loc_addr, ireq->ir_num, ireq->ir_rmt_addr, ntohs(ireq->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_delta_to_clock_t(delta), req->num_timeout, from_kuid_munged(seq_user_ns(f), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) { int timer_active; unsigned long timer_expires; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_sock *inet = inet_sk(sk); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); int rx_queue; int state; if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sk->sk_timer)) { timer_active = 2; timer_expires = sk->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = sk_state_load(sk); if (state == TCP_LISTEN) rx_queue = sk->sk_ack_backlog; else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", i, src, srcp, dest, destp, state, tp->write_seq - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), icsk->icsk_probes_out, sock_i_ino(sk), atomic_read(&sk->sk_refcnt), sk, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); } static void get_timewait4_sock(const struct inet_timewait_sock *tw, struct seq_file *f, int i) { long delta = tw->tw_timer.expires - jiffies; __be32 dest, src; __u16 destp, srcp; dest = tw->tw_daddr; src = tw->tw_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK", i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw); } #define TMPSZ 150 static int tcp4_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; seq_setwidth(seq, TMPSZ - 1); if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait4_sock(v, seq, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq4(v, seq, st->num); else get_tcp4_sock(v, seq, st->num); out: seq_pad(seq, '\n'); return 0; } static const struct file_operations tcp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp4_seq_afinfo = { .name = "tcp", .family = AF_INET, .seq_fops = &tcp_afinfo_seq_fops, .seq_ops = { .show = tcp4_seq_show, }, }; static int __net_init tcp4_proc_init_net(struct net *net) { return tcp_proc_register(net, &tcp4_seq_afinfo); } static void __net_exit tcp4_proc_exit_net(struct net *net) { tcp_proc_unregister(net, &tcp4_seq_afinfo); } static struct pernet_operations tcp4_net_ops = { .init = tcp4_proc_init_net, .exit = tcp4_proc_exit_net, }; int __init tcp4_proc_init(void) { return register_pernet_subsys(&tcp4_net_ops); } void tcp4_proc_exit(void) { unregister_pernet_subsys(&tcp4_net_ops); } #endif /* CONFIG_PROC_FS */ struct proto tcp_prot = { .name = "TCP", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v4_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v4_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .release_cb = tcp_release_cb, .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL(tcp_prot); static void __net_exit tcp_sk_exit(struct net *net) { int cpu; for_each_possible_cpu(cpu) inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); free_percpu(net->ipv4.tcp_sk); } static int __net_init tcp_sk_init(struct net *net) { int res, cpu; net->ipv4.tcp_sk = alloc_percpu(struct sock *); if (!net->ipv4.tcp_sk) return -ENOMEM; for_each_possible_cpu(cpu) { struct sock *sk; res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_TCP, net); if (res) goto fail; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } net->ipv4.sysctl_tcp_ecn = 2; net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES; net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; net->ipv4.sysctl_tcp_syncookies = 1; net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH; net->ipv4.sysctl_tcp_retries1 = TCP_RETR1; net->ipv4.sysctl_tcp_retries2 = TCP_RETR2; net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; return 0; fail: tcp_sk_exit(net); return res; } static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET); } static struct pernet_operations __net_initdata tcp_sk_ops = { .init = tcp_sk_init, .exit = tcp_sk_exit, .exit_batch = tcp_sk_exit_batch, }; void __init tcp_v4_init(void) { inet_hashinfo_init(&tcp_hashinfo); if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5349_1
crossvul-cpp_data_bad_5349_1
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Implementation of the Transmission Control Protocol(TCP). * * IPv4 specific functions * * * code split from: * linux/ipv4/tcp.c * linux/ipv4/tcp_input.c * linux/ipv4/tcp_output.c * * See tcp.c for author information * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* * Changes: * David S. Miller : New socket lookup architecture. * This code is dedicated to John Dyson. * David S. Miller : Change semantics of established hash, * half is devoted to TIME_WAIT sockets * and the rest go in the other half. * Andi Kleen : Add support for syncookies and fixed * some bugs: ip options weren't passed to * the TCP layer, missed a check for an * ACK bit. * Andi Kleen : Implemented fast path mtu discovery. * Fixed many serious bugs in the * request_sock handling and moved * most of it into the af independent code. * Added tail drop and some other bugfixes. * Added new listen semantics. * Mike McLagan : Routing by source * Juan Jose Ciarlante: ip_dynaddr bits * Andi Kleen: various fixes. * Vitaly E. Lavrov : Transparent proxy revived after year * coma. * Andi Kleen : Fix new listen. * Andi Kleen : Fix accept error reporting. * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. */ #define pr_fmt(fmt) "TCP: " fmt #include <linux/bottom_half.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/module.h> #include <linux/random.h> #include <linux/cache.h> #include <linux/jhash.h> #include <linux/init.h> #include <linux/times.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/icmp.h> #include <net/inet_hashtables.h> #include <net/tcp.h> #include <net/transp_v6.h> #include <net/ipv6.h> #include <net/inet_common.h> #include <net/timewait_sock.h> #include <net/xfrm.h> #include <net/secure_seq.h> #include <net/busy_poll.h> #include <linux/inet.h> #include <linux/ipv6.h> #include <linux/stddef.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <crypto/hash.h> #include <linux/scatterlist.h> int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; #ifdef CONFIG_TCP_MD5SIG static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th); #endif struct inet_hashinfo tcp_hashinfo; EXPORT_SYMBOL(tcp_hashinfo); static __u32 tcp_v4_init_sequence(const struct sk_buff *skb) { return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && (!twp || (sysctl_tcp_tw_reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; tp->rx_opt.ts_recent = tcptw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; sock_hold(sktw); return 1; } return 0; } EXPORT_SYMBOL_GPL(tcp_twsk_unique); /* This will initiate an outgoing connection. */ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; struct ip_options_rcu *inet_opt; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; nexthop = inet_opt->opt.faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet_opt || !inet_opt->opt.srr) daddr = fl4->daddr; if (!inet->inet_saddr) inet->inet_saddr = fl4->saddr; sk_rcv_saddr_set(sk, inet->inet_saddr); if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; if (likely(!tp->repair)) tp->write_seq = 0; } if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) tcp_fetch_timewait_stamp(sk, &rt->dst); inet->inet_dport = usin->sin_port; sk_daddr_set(sk, daddr); inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(&tcp_death_row, sk); if (err) goto failure; sk_set_txhash(sk); rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port); inet->inet_id = tp->write_seq ^ jiffies; err = tcp_connect(sk); rt = NULL; if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; } EXPORT_SYMBOL(tcp_v4_connect); /* * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. * It can be called through tcp_release_cb() if socket was owned by user * at the time tcp_v4_err() was called to handle ICMP message. */ void tcp_v4_mtu_reduced(struct sock *sk) { struct dst_entry *dst; struct inet_sock *inet = inet_sk(sk); u32 mtu = tcp_sk(sk)->mtu_info; dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && ip_sk_accept_pmtu(sk) && inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ } EXPORT_SYMBOL(tcp_v4_mtu_reduced); static void do_redirect(struct sk_buff *skb, struct sock *sk) { struct dst_entry *dst = __sk_dst_check(sk, 0); if (dst) dst->ops->redirect(dst, sk, skb); } /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */ void tcp_req_err(struct sock *sk, u32 seq, bool abort) { struct request_sock *req = inet_reqsk(sk); struct net *net = sock_net(sk); /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ if (seq != tcp_rsk(req)->snt_isn) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); } else if (abort) { /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(req->rsk_listener, req); tcp_listendrop(req->rsk_listener); } reqsk_put(req); } EXPORT_SYMBOL(tcp_req_err); /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); struct inet_connection_sock *icsk; struct tcp_sock *tp; struct inet_sock *inet; const int type = icmp_hdr(icmp_skb)->type; const int code = icmp_hdr(icmp_skb)->code; struct sock *sk; struct sk_buff *skb; struct request_sock *fastopen; __u32 seq, snd_una; __u32 remaining; int err; struct net *net = dev_net(icmp_skb->dev); sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, th->dest, iph->saddr, ntohs(th->source), inet_iif(icmp_skb)); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || type == ICMP_TIME_EXCEEDED || (type == ICMP_DEST_UNREACH && (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held. */ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) goto out; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } icsk = inet_csk(sk); tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_REDIRECT: do_redirect(icmp_skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) sock_hold(sk); } goto out; } err = icmp_err_convert[code].errno; /* check if icmp_skb allows revert of backoff * (see draft-zimmermann-tcp-lcd) */ if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) break; if (seq != tp->snd_una || !icsk->icsk_retransmits || !icsk->icsk_backoff || fastopen) break; if (sock_owned_by_user(sk)) break; icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); skb = tcp_write_queue_head(sk); BUG_ON(!skb); remaining = icsk->icsk_rto - min(icsk->icsk_rto, tcp_time_stamp - tcp_skb_timestamp(skb)); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ tcp_retransmit_timer(sk); } break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * is already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); tcp_done(sk); } else { sk->sk_err_soft = err; } goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out: bh_unlock_sock(sk); sock_put(sk); } void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(skb->len, saddr, daddr, csum_partial(th, th->doff << 2, skb->csum)); } } /* This routine computes an IPv4 TCP checksum. */ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); } EXPORT_SYMBOL(tcp_v4_send_check); /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; #endif } rep; struct ip_reply_arg arg; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key = NULL; const __u8 *hash_location = NULL; unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif struct net *net; /* Never send a reset in response to a reset. */ if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ memset(&rep, 0, sizeof(rep)); rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = sizeof(struct tcphdr) / 4; rep.th.rst = 1; if (th->ack) { rep.th.seq = th->ack_seq; } else { rep.th.ack = 1; rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); } else if (hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, ip_hdr(skb)->saddr, th->source, ip_hdr(skb)->daddr, ntohs(th->source), inet_iif(skb)); /* don't send rst if it can't find key */ if (!sk1) goto out; key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); if (!key) goto out; genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto out; } if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* Update length and the length the header thinks exists */ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len / 4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. * routing might fail in this case. No choice here, if we choose to force * input interface, we will misroute in case of asymmetric route. */ if (sk) arg.bound_dev_if = sk->sk_bound_dev_if; BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != offsetof(struct inet_timewait_sock, tw_bound_dev_if)); arg.tos = ip_hdr(skb)->tos; local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG out: rcu_read_unlock(); #endif } /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */ static void tcp_v4_send_ack(struct net *net, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int reply_flags, u8 tos) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) #ifdef CONFIG_TCP_MD5SIG + (TCPOLEN_MD5SIG_ALIGNED >> 2) #endif ]; } rep; struct ip_reply_arg arg; memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (tsecr) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.opt[1] = htonl(tsval); rep.opt[2] = htonl(tsecr); arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; } /* Swap the send and the receive. */ rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = arg.iov[0].iov_len / 4; rep.th.seq = htonl(seq); rep.th.ack_seq = htonl(ack); rep.th.ack = 1; rep.th.window = htons(win); #ifdef CONFIG_TCP_MD5SIG if (key) { int offset = (tsecr) ? 3 : 0; rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len/4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.flags = reply_flags; arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; if (oif) arg.bound_dev_if = oif; arg.tos = tos; local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); local_bh_enable(); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v4_send_ack(sock_net(sk), skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, tw->tw_tos ); inet_twsk_put(tw); } static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt; /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v4_send_ack(sock_net(sk), skb, seq, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, 0, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos); } /* * Send a SYN-ACK after having received a SYN. * This still operates on a request_sock only, not on a big * socket. */ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff *skb; /* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, ireq->opt); err = net_xmit_eval(err); } return err; } /* * IPv4 request_sock destructor. */ static void tcp_v4_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->opt); } #ifdef CONFIG_TCP_MD5SIG /* * RFC2385 MD5 checksumming requires a mapping of * IP address->MD5 Key. * We need to maintain these in the sk structure. */ /* Find the Key structure for an address. */ struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family) { const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; unsigned int size = sizeof(struct in_addr); const struct tcp_md5sig_info *md5sig; /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) return NULL; #if IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) size = sizeof(struct in6_addr); #endif hlist_for_each_entry_rcu(key, &md5sig->head, node) { if (key->family != family) continue; if (!memcmp(&key->addr, addr, size)) return key; } return NULL; } EXPORT_SYMBOL(tcp_md5_do_lookup); struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { const union tcp_md5_addr *addr; addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; return tcp_md5_do_lookup(sk, addr, AF_INET); } EXPORT_SYMBOL(tcp_v4_md5_lookup); /* This can be called on a newly created socket, from other files */ int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, const u8 *newkey, u8 newkeylen, gfp_t gfp) { /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup(sk, addr, family); if (key) { /* Pre-existing entry - just update that one. */ memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; return 0; } md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) { md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) return -ENOMEM; sk_nocaps_add(sk, NETIF_F_GSO_MASK); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); } key = sock_kmalloc(sk, sizeof(*key), gfp); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { sock_kfree_s(sk, key, sizeof(*key)); return -ENOMEM; } memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; key->family = family; memcpy(&key->addr, addr, (family == AF_INET6) ? sizeof(struct in6_addr) : sizeof(struct in_addr)); hlist_add_head_rcu(&key->node, &md5sig->head); return 0; } EXPORT_SYMBOL(tcp_md5_do_add); int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) { struct tcp_md5sig_key *key; key = tcp_md5_do_lookup(sk, addr, family); if (!key) return -ENOENT; hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); return 0; } EXPORT_SYMBOL(tcp_md5_do_del); static void tcp_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct hlist_node *n; struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, 1); hlist_for_each_entry_safe(key, n, &md5sig->head, node) { hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); } } static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) return -EINVAL; if (!cmd.tcpm_keylen) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET); if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); } static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp, __be32 daddr, __be32 saddr, const struct tcphdr *th, int nbytes) { struct tcp4_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; bp->saddr = saddr; bp->daddr = daddr; bp->pad = 0; bp->protocol = IPPROTO_TCP; bp->len = cpu_to_be16(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->md5_req); } static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct ahash_request *req; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { struct tcp_md5sig_pool *hp; struct ahash_request *req; const struct tcphdr *th = tcp_hdr(skb); __be32 saddr, daddr; if (sk) { /* valid for establish/request sockets */ saddr = sk->sk_rcv_saddr; daddr = sk->sk_daddr; } else { const struct iphdr *iph = ip_hdr(skb); saddr = iph->saddr; daddr = iph->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } EXPORT_SYMBOL(tcp_v4_md5_hash_skb); #endif /* Called with rcu_read_lock() */ static bool tcp_v4_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_TCP_MD5SIG /* * This gets called for each TCP segment that arrives * so we want to be efficient. * We have 3 drop cases: * o No MD5 hash and one expected. * o MD5 hash and we're not expecting one. * o MD5 hash and its wrong. */ const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; unsigned char newhash[16]; hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, AF_INET); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return false; if (hash_expected && !hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } /* Okay, so this is hash_expected and hash_location - * so we need to calculate the checksum. */ genhash = tcp_v4_md5_hash_skb(newhash, hash_expected, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); return true; } return false; #endif return false; } static void tcp_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); ireq->opt = tcp_v4_save_options(skb); } static struct dst_entry *tcp_v4_route_req(const struct sock *sk, struct flowi *fl, const struct request_sock *req, bool *strict) { struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); if (strict) { if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) *strict = true; else *strict = false; } return dst; } struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .mss_clamp = TCP_MSS_DEFAULT, #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, #endif .init_req = tcp_v4_init_req, #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v4_init_sequence, #endif .route_req = tcp_v4_route_req, .init_seq = tcp_v4_init_sequence, .send_synack = tcp_v4_send_synack, }; int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; return tcp_conn_request(&tcp_request_sock_ops, &tcp_request_sock_ipv4_ops, sk, skb); drop: tcp_listendrop(sk); return 0; } EXPORT_SYMBOL(tcp_v4_conn_request); /* * The three way handshake has completed - we got a valid synack - * now create the new socket. */ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif struct ip_options_rcu *inet_opt; if (sk_acceptq_is_full(sk)) goto exit_overflow; newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; inet_sk_rx_dst_set(newsk, skb); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newsk->sk_bound_dev_if = ireq->ir_iif; newinet->inet_saddr = ireq->ir_loc_addr; inet_opt = ireq->opt; rcu_assign_pointer(newinet->inet_opt, inet_opt); ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; newinet->inet_id = newtp->write_seq ^ jiffies; if (!dst) { dst = inet_csk_route_child_sock(sk, newsk, req); if (!dst) goto put_and_exit; } else { /* syncookie case : see end of cookie_v4_check() */ } sk_setup_caps(newsk, dst); tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET); if (key) { /* * We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET, key->key, key->keylen, GFP_ATOMIC); sk_nocaps_add(newsk, NETIF_F_GSO_MASK); } #endif if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); if (*own_req) tcp_move_syn(newtp, req); return newsk; exit_overflow: NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: tcp_listendrop(sk); return NULL; put_and_exit: inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; } EXPORT_SYMBOL(tcp_v4_syn_recv_sock); static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v4_check(sk, skb); #endif return sk; } /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v4_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; } return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) { rsk = sk; goto reset; } return 0; reset: tcp_v4_send_reset(rsk, skb); discard: kfree_skb(skb); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned. */ return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; } EXPORT_SYMBOL(tcp_v4_do_rcv); void tcp_v4_early_demux(struct sk_buff *skb) { const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; iph = ip_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, ntohs(th->dest), skb->skb_iif); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); if (dst && inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } } /* Packet is added to VJ-style prequeue for processing in process * context, if a reader task is waiting. Apparently, this exciting * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) * failed somewhere. Latency? Burstiness? Well, at least now we will * see, why it failed. 8)8) --ANK * */ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (sysctl_tcp_low_latency || !tp->ucopy.task) return false; if (skb->len <= tcp_hdrlen(skb) && skb_queue_len(&tp->ucopy.prequeue) == 0) return false; /* Before escaping RCU protected region, we need to take care of skb * dst. Prequeue is only enabled for established sockets. * For such sockets, we might need the skb dst only to set sk->sk_rx_dst * Instead of doing full sk_rx_dst validity here, let's perform * an optimistic check. */ if (likely(sk->sk_rx_dst)) skb_dst_drop(skb); else skb_dst_force_safe(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (skb_queue_len(&tp->ucopy.prequeue) >= 32 || tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) { struct sk_buff *skb1; BUG_ON(sock_owned_by_user(sk)); __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED, skb_queue_len(&tp->ucopy.prequeue)); while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb1); tp->ucopy.memory = 0; } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | POLLRDNORM | POLLRDBAND); if (!inet_csk_ack_scheduled(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, (3 * tcp_rto_min(sk)) / 4, TCP_RTO_MAX); } return true; } EXPORT_SYMBOL(tcp_prequeue); bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; /* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ limit += 64*1024; /* In case all data was pulled from skb frags (in __pskb_pull_tail()), * we can fix skb->truesize to its real value to avoid future drops. * This is valid because skb is not yet charged to the socket. * It has been noticed pure SACK packets were sometimes dropped * (if cooked by drivers without copybreak feature). */ if (!skb->data_len) skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); return true; } return false; } EXPORT_SYMBOL(tcp_add_backlog); /* * From tcp_input.c */ int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct tcphdr *th; bool refcounted; struct sock *sk; int ret; if (skb->pkt_type != PACKET_HOST) goto discard_it; /* Count it even if it's bad */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated. * So, we defer the checks. */ if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() * barrier() makes sure compiler wont play fool^Waliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), sizeof(struct inet_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, &refcounted); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); struct sock *nsk; sk = req->rsk_listener; if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } /* We own a reference on the listener, increase it again * as we might lose it too soon. */ sock_hold(sk); refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); goto discard_and_relse; } if (nsk == sk) { reqsk_put(req); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse; } else { sock_put(sk); return 0; } } if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; if (tcp_v4_inbound_md5_hash(sk, skb)) goto discard_and_relse; nf_reset(skb); if (sk_filter(sk, skb)) goto discard_and_relse; skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v4_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret; no_tcp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v4_send_reset(NULL, skb); } discard_it: /* Discard frame. */ kfree_skb(skb); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, skb, __tcp_hdrlen(th), iph->saddr, th->source, iph->daddr, th->dest, inet_iif(skb)); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; refcounted = false; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v4_timewait_ack(sk, skb); break; case TCP_TW_RST: tcp_v4_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS:; } goto discard_it; } static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor= tcp_twsk_destructor, }; void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } } EXPORT_SYMBOL(inet_sk_rx_dst_set); const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), .bind_conflict = inet_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif .mtu_reduced = tcp_v4_mtu_reduced, }; EXPORT_SYMBOL(ipv4_specific); #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v4_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; #endif return 0; } void tcp_v4_destroy_sock(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_clear_xmit_timers(sk); tcp_cleanup_congestion_control(sk); /* Cleanup up the write buffer. */ tcp_write_queue_purge(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ skb_rbtree_purge(&tp->out_of_order_queue); #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { tcp_clear_md5_list(sk); kfree_rcu(tp->md5sig_info, rcu); tp->md5sig_info = NULL; } #endif /* Clean prequeue, it must be empty really */ __skb_queue_purge(&tp->ucopy.prequeue); /* Clean up a referenced TCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); BUG_ON(tp->fastopen_rsk); /* If socket is aborted during connect operation */ tcp_free_fastopen_req(tp); tcp_saved_syn_free(tp); local_bh_disable(); sk_sockets_allocated_dec(sk); local_bh_enable(); } EXPORT_SYMBOL(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ /* * Get next listener socket follow cur. If cur is NULL, get first socket * starting from bucket given in st->bucket; when st->bucket is zero the * very first socket in the hash table is returned. */ static void *listening_get_next(struct seq_file *seq, void *cur) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct sock *sk = cur; if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock_bh(&ilb->lock); sk = sk_head(&ilb->head); st->offset = 0; goto get_sk; } ilb = &tcp_hashinfo.listening_hash[st->bucket]; ++st->num; ++st->offset; sk = sk_next(sk); get_sk: sk_for_each_from(sk) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == st->family) return sk; } spin_unlock_bh(&ilb->lock); st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) goto get_head; return NULL; } static void *listening_get_idx(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; st->offset = 0; rc = listening_get_next(seq, NULL); while (rc && *pos) { rc = listening_get_next(seq, rc); --*pos; } return rc; } static inline bool empty_bucket(const struct tcp_iter_state *st) { return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); } /* * Get first established socket starting from bucket given in st->bucket. * If st->bucket is zero, the very first socket in the hash is returned. */ static void *established_get_first(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(st)) continue; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; } rc = sk; goto out; } spin_unlock_bh(lock); } out: return rc; } static void *established_get_next(struct seq_file *seq, void *cur) { struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) return sk; } spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); ++st->bucket; return established_get_first(seq); } static void *established_get_idx(struct seq_file *seq, loff_t pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; rc = established_get_first(seq); while (rc && pos) { rc = established_get_next(seq, rc); --pos; } return rc; } static void *tcp_get_idx(struct seq_file *seq, loff_t pos) { void *rc; struct tcp_iter_state *st = seq->private; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_idx(seq, &pos); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; rc = established_get_idx(seq, pos); } return rc; } static void *tcp_seek_last_pos(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; int offset = st->offset; int orig_num = st->num; void *rc = NULL; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (st->bucket >= INET_LHTABLE_SIZE) break; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_next(seq, NULL); while (offset-- && rc) rc = listening_get_next(seq, rc); if (rc) break; st->bucket = 0; st->state = TCP_SEQ_STATE_ESTABLISHED; /* Fallthrough */ case TCP_SEQ_STATE_ESTABLISHED: if (st->bucket > tcp_hashinfo.ehash_mask) break; rc = established_get_first(seq); while (offset-- && rc) rc = established_get_next(seq, rc); } st->num = orig_num; return rc; } static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; if (*pos && *pos == st->last_pos) { rc = tcp_seek_last_pos(seq); if (rc) goto out; } st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; st->bucket = 0; st->offset = 0; rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; out: st->last_pos = *pos; return rc; } static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc = NULL; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); goto out; } switch (st->state) { case TCP_SEQ_STATE_LISTENING: rc = listening_get_next(seq, v); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; st->bucket = 0; st->offset = 0; rc = established_get_first(seq); } break; case TCP_SEQ_STATE_ESTABLISHED: rc = established_get_next(seq, v); break; } out: ++*pos; st->last_pos = *pos; return rc; } static void tcp_seq_stop(struct seq_file *seq, void *v) { struct tcp_iter_state *st = seq->private; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock); break; case TCP_SEQ_STATE_ESTABLISHED: if (v) spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); break; } } int tcp_seq_open(struct inode *inode, struct file *file) { struct tcp_seq_afinfo *afinfo = PDE_DATA(inode); struct tcp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct tcp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->last_pos = 0; return 0; } EXPORT_SYMBOL(tcp_seq_open); int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) { int rc = 0; struct proc_dir_entry *p; afinfo->seq_ops.start = tcp_seq_start; afinfo->seq_ops.next = tcp_seq_next; afinfo->seq_ops.stop = tcp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; } EXPORT_SYMBOL(tcp_proc_register); void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); } EXPORT_SYMBOL(tcp_proc_unregister); static void get_openreq4(const struct request_sock *req, struct seq_file *f, int i) { const struct inet_request_sock *ireq = inet_rsk(req); long delta = req->rsk_timer.expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", i, ireq->ir_loc_addr, ireq->ir_num, ireq->ir_rmt_addr, ntohs(ireq->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_delta_to_clock_t(delta), req->num_timeout, from_kuid_munged(seq_user_ns(f), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) { int timer_active; unsigned long timer_expires; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_sock *inet = inet_sk(sk); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); int rx_queue; int state; if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sk->sk_timer)) { timer_active = 2; timer_expires = sk->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = sk_state_load(sk); if (state == TCP_LISTEN) rx_queue = sk->sk_ack_backlog; else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", i, src, srcp, dest, destp, state, tp->write_seq - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), icsk->icsk_probes_out, sock_i_ino(sk), atomic_read(&sk->sk_refcnt), sk, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); } static void get_timewait4_sock(const struct inet_timewait_sock *tw, struct seq_file *f, int i) { long delta = tw->tw_timer.expires - jiffies; __be32 dest, src; __u16 destp, srcp; dest = tw->tw_daddr; src = tw->tw_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK", i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw); } #define TMPSZ 150 static int tcp4_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; seq_setwidth(seq, TMPSZ - 1); if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait4_sock(v, seq, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq4(v, seq, st->num); else get_tcp4_sock(v, seq, st->num); out: seq_pad(seq, '\n'); return 0; } static const struct file_operations tcp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp4_seq_afinfo = { .name = "tcp", .family = AF_INET, .seq_fops = &tcp_afinfo_seq_fops, .seq_ops = { .show = tcp4_seq_show, }, }; static int __net_init tcp4_proc_init_net(struct net *net) { return tcp_proc_register(net, &tcp4_seq_afinfo); } static void __net_exit tcp4_proc_exit_net(struct net *net) { tcp_proc_unregister(net, &tcp4_seq_afinfo); } static struct pernet_operations tcp4_net_ops = { .init = tcp4_proc_init_net, .exit = tcp4_proc_exit_net, }; int __init tcp4_proc_init(void) { return register_pernet_subsys(&tcp4_net_ops); } void tcp4_proc_exit(void) { unregister_pernet_subsys(&tcp4_net_ops); } #endif /* CONFIG_PROC_FS */ struct proto tcp_prot = { .name = "TCP", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v4_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v4_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .release_cb = tcp_release_cb, .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL(tcp_prot); static void __net_exit tcp_sk_exit(struct net *net) { int cpu; for_each_possible_cpu(cpu) inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); free_percpu(net->ipv4.tcp_sk); } static int __net_init tcp_sk_init(struct net *net) { int res, cpu; net->ipv4.tcp_sk = alloc_percpu(struct sock *); if (!net->ipv4.tcp_sk) return -ENOMEM; for_each_possible_cpu(cpu) { struct sock *sk; res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_TCP, net); if (res) goto fail; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } net->ipv4.sysctl_tcp_ecn = 2; net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES; net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; net->ipv4.sysctl_tcp_syncookies = 1; net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH; net->ipv4.sysctl_tcp_retries1 = TCP_RETR1; net->ipv4.sysctl_tcp_retries2 = TCP_RETR2; net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; return 0; fail: tcp_sk_exit(net); return res; } static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET); } static struct pernet_operations __net_initdata tcp_sk_ops = { .init = tcp_sk_init, .exit = tcp_sk_exit, .exit_batch = tcp_sk_exit_batch, }; void __init tcp_v4_init(void) { inet_hashinfo_init(&tcp_hashinfo); if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5349_1
crossvul-cpp_data_good_5199_0
/* * linux/fs/namei.c * * Copyright (C) 1991, 1992 Linus Torvalds */ /* * Some corrections by tytso. */ /* [Feb 1997 T. Schoebel-Theuer] Complete rewrite of the pathname * lookup logic. */ /* [Feb-Apr 2000, AV] Rewrite to the new namespace architecture. */ #include <linux/init.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/pagemap.h> #include <linux/fsnotify.h> #include <linux/personality.h> #include <linux/security.h> #include <linux/ima.h> #include <linux/syscalls.h> #include <linux/mount.h> #include <linux/audit.h> #include <linux/capability.h> #include <linux/file.h> #include <linux/fcntl.h> #include <linux/device_cgroup.h> #include <linux/fs_struct.h> #include <linux/posix_acl.h> #include <linux/hash.h> #include <asm/uaccess.h> #include "internal.h" #include "mount.h" /* [Feb-1997 T. Schoebel-Theuer] * Fundamental changes in the pathname lookup mechanisms (namei) * were necessary because of omirr. The reason is that omirr needs * to know the _real_ pathname, not the user-supplied one, in case * of symlinks (and also when transname replacements occur). * * The new code replaces the old recursive symlink resolution with * an iterative one (in case of non-nested symlink chains). It does * this with calls to <fs>_follow_link(). * As a side effect, dir_namei(), _namei() and follow_link() are now * replaced with a single function lookup_dentry() that can handle all * the special cases of the former code. * * With the new dcache, the pathname is stored at each inode, at least as * long as the refcount of the inode is positive. As a side effect, the * size of the dcache depends on the inode cache and thus is dynamic. * * [29-Apr-1998 C. Scott Ananian] Updated above description of symlink * resolution to correspond with current state of the code. * * Note that the symlink resolution is not *completely* iterative. * There is still a significant amount of tail- and mid- recursion in * the algorithm. Also, note that <fs>_readlink() is not used in * lookup_dentry(): lookup_dentry() on the result of <fs>_readlink() * may return different results than <fs>_follow_link(). Many virtual * filesystems (including /proc) exhibit this behavior. */ /* [24-Feb-97 T. Schoebel-Theuer] Side effects caused by new implementation: * New symlink semantics: when open() is called with flags O_CREAT | O_EXCL * and the name already exists in form of a symlink, try to create the new * name indicated by the symlink. The old code always complained that the * name already exists, due to not following the symlink even if its target * is nonexistent. The new semantics affects also mknod() and link() when * the name is a symlink pointing to a non-existent name. * * I don't know which semantics is the right one, since I have no access * to standards. But I found by trial that HP-UX 9.0 has the full "new" * semantics implemented, while SunOS 4.1.1 and Solaris (SunOS 5.4) have the * "old" one. Personally, I think the new semantics is much more logical. * Note that "ln old new" where "new" is a symlink pointing to a non-existing * file does succeed in both HP-UX and SunOs, but not in Solaris * and in the old Linux semantics. */ /* [16-Dec-97 Kevin Buhr] For security reasons, we change some symlink * semantics. See the comments in "open_namei" and "do_link" below. * * [10-Sep-98 Alan Modra] Another symlink change. */ /* [Feb-Apr 2000 AV] Complete rewrite. Rules for symlinks: * inside the path - always follow. * in the last component in creation/removal/renaming - never follow. * if LOOKUP_FOLLOW passed - follow. * if the pathname has trailing slashes - follow. * otherwise - don't follow. * (applied in that order). * * [Jun 2000 AV] Inconsistent behaviour of open() in case if flags==O_CREAT * restored for 2.4. This is the last surviving part of old 4.2BSD bug. * During the 2.4 we need to fix the userland stuff depending on it - * hopefully we will be able to get rid of that wart in 2.5. So far only * XEmacs seems to be relying on it... */ /* * [Sep 2001 AV] Single-semaphore locking scheme (kudos to David Holland) * implemented. Let's see if raised priority of ->s_vfs_rename_mutex gives * any extra contention... */ /* In order to reduce some races, while at the same time doing additional * checking and hopefully speeding things up, we copy filenames to the * kernel data space before using them.. * * POSIX.1 2.4: an empty pathname is invalid (ENOENT). * PATH_MAX includes the nul terminator --RR. */ #define EMBEDDED_NAME_MAX (PATH_MAX - offsetof(struct filename, iname)) struct filename * getname_flags(const char __user *filename, int flags, int *empty) { struct filename *result; char *kname; int len; result = audit_reusename(filename); if (result) return result; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); /* * First, try to embed the struct filename inside the names_cache * allocation */ kname = (char *)result->iname; result->name = kname; len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); if (unlikely(len < 0)) { __putname(result); return ERR_PTR(len); } /* * Uh-oh. We have a name that's approaching PATH_MAX. Allocate a * separate struct filename so we can dedicate the entire * names_cache allocation for the pathname, and re-do the copy from * userland. */ if (unlikely(len == EMBEDDED_NAME_MAX)) { const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; /* * size is chosen that way we to guarantee that * result->iname[0] is within the same object and that * kname can't be equal to result->iname, no matter what. */ result = kzalloc(size, GFP_KERNEL); if (unlikely(!result)) { __putname(kname); return ERR_PTR(-ENOMEM); } result->name = kname; len = strncpy_from_user(kname, filename, PATH_MAX); if (unlikely(len < 0)) { __putname(kname); kfree(result); return ERR_PTR(len); } if (unlikely(len == PATH_MAX)) { __putname(kname); kfree(result); return ERR_PTR(-ENAMETOOLONG); } } result->refcnt = 1; /* The empty path is special. */ if (unlikely(!len)) { if (empty) *empty = 1; if (!(flags & LOOKUP_EMPTY)) { putname(result); return ERR_PTR(-ENOENT); } } result->uptr = filename; result->aname = NULL; audit_getname(result); return result; } struct filename * getname(const char __user * filename) { return getname_flags(filename, 0, NULL); } struct filename * getname_kernel(const char * filename) { struct filename *result; int len = strlen(filename) + 1; result = __getname(); if (unlikely(!result)) return ERR_PTR(-ENOMEM); if (len <= EMBEDDED_NAME_MAX) { result->name = (char *)result->iname; } else if (len <= PATH_MAX) { struct filename *tmp; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (unlikely(!tmp)) { __putname(result); return ERR_PTR(-ENOMEM); } tmp->name = (char *)result; result = tmp; } else { __putname(result); return ERR_PTR(-ENAMETOOLONG); } memcpy((char *)result->name, filename, len); result->uptr = NULL; result->aname = NULL; result->refcnt = 1; audit_getname(result); return result; } void putname(struct filename *name) { BUG_ON(name->refcnt <= 0); if (--name->refcnt > 0) return; if (name->name != name->iname) { __putname(name->name); kfree(name); } else __putname(name); } static int check_acl(struct inode *inode, int mask) { #ifdef CONFIG_FS_POSIX_ACL struct posix_acl *acl; if (mask & MAY_NOT_BLOCK) { acl = get_cached_acl_rcu(inode, ACL_TYPE_ACCESS); if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ if (acl == ACL_NOT_CACHED) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } acl = get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } #endif return -EAGAIN; } /* * This does the basic permission checking */ static int acl_permission_check(struct inode *inode, int mask) { unsigned int mode = inode->i_mode; if (likely(uid_eq(current_fsuid(), inode->i_uid))) mode >>= 6; else { if (IS_POSIXACL(inode) && (mode & S_IRWXG)) { int error = check_acl(inode, mask); if (error != -EAGAIN) return error; } if (in_group_p(inode->i_gid)) mode >>= 3; } /* * If the DACs are ok we don't need any capability check. */ if ((mask & ~mode & (MAY_READ | MAY_WRITE | MAY_EXEC)) == 0) return 0; return -EACCES; } /** * generic_permission - check for access rights on a Posix-like filesystem * @inode: inode to check access rights for * @mask: right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC, ...) * * Used to check for read/write/execute permissions on a file. * We use "fsuid" for this, letting us set arbitrary permissions * for filesystem access without changing the "normal" uids which * are used for other things. * * generic_permission is rcu-walk aware. It returns -ECHILD in case an rcu-walk * request cannot be satisfied (eg. requires blocking or too much complexity). * It would then be called again in ref-walk mode. */ int generic_permission(struct inode *inode, int mask) { int ret; /* * Do the basic permission checks. */ ret = acl_permission_check(inode, mask); if (ret != -EACCES) return ret; if (S_ISDIR(inode->i_mode)) { /* DACs are overridable for directories */ if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; if (!(mask & MAY_WRITE)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } /* * Read/write DACs are always overridable. * Executable DACs are overridable when there is * at least one exec bit set. */ if (!(mask & MAY_EXEC) || (inode->i_mode & S_IXUGO)) if (capable_wrt_inode_uidgid(inode, CAP_DAC_OVERRIDE)) return 0; /* * Searching includes executable on directories, else just read. */ mask &= MAY_READ | MAY_WRITE | MAY_EXEC; if (mask == MAY_READ) if (capable_wrt_inode_uidgid(inode, CAP_DAC_READ_SEARCH)) return 0; return -EACCES; } EXPORT_SYMBOL(generic_permission); /* * We _really_ want to just do "generic_permission()" without * even looking at the inode->i_op values. So we keep a cache * flag in inode->i_opflags, that says "this has not special * permission function, use the fast case". */ static inline int do_inode_permission(struct inode *inode, int mask) { if (unlikely(!(inode->i_opflags & IOP_FASTPERM))) { if (likely(inode->i_op->permission)) return inode->i_op->permission(inode, mask); /* This gets set once for the inode lifetime */ spin_lock(&inode->i_lock); inode->i_opflags |= IOP_FASTPERM; spin_unlock(&inode->i_lock); } return generic_permission(inode, mask); } /** * __inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. * * This does not check for a read-only file system. You probably want * inode_permission(). */ int __inode_permission(struct inode *inode, int mask) { int retval; if (unlikely(mask & MAY_WRITE)) { /* * Nobody gets write access to an immutable file. */ if (IS_IMMUTABLE(inode)) return -EACCES; } retval = do_inode_permission(inode, mask); if (retval) return retval; retval = devcgroup_inode_permission(inode, mask); if (retval) return retval; return security_inode_permission(inode, mask); } EXPORT_SYMBOL(__inode_permission); /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. */ static int sb_permission(struct super_block *sb, struct inode *inode, int mask) { if (unlikely(mask & MAY_WRITE)) { umode_t mode = inode->i_mode; /* Nobody gets write access to a read-only fs. */ if ((sb->s_flags & MS_RDONLY) && (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) return -EROFS; } return 0; } /** * inode_permission - Check for access rights to a given inode * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Check for read/write/execute permissions on an inode. We use fs[ug]id for * this, letting us set arbitrary permissions for filesystem access without * changing the "normal" UIDs which are used for other things. * * When checking for MAY_APPEND, MAY_WRITE must also be set in @mask. */ int inode_permission(struct inode *inode, int mask) { int retval; retval = sb_permission(inode->i_sb, inode, mask); if (retval) return retval; return __inode_permission(inode, mask); } EXPORT_SYMBOL(inode_permission); /** * path_get - get a reference to a path * @path: path to get the reference to * * Given a path increment the reference count to the dentry and the vfsmount. */ void path_get(const struct path *path) { mntget(path->mnt); dget(path->dentry); } EXPORT_SYMBOL(path_get); /** * path_put - put a reference to a path * @path: path to put the reference to * * Given a path decrement the reference count to the dentry and the vfsmount. */ void path_put(const struct path *path) { dput(path->dentry); mntput(path->mnt); } EXPORT_SYMBOL(path_put); #define EMBEDDED_LEVELS 2 struct nameidata { struct path path; struct qstr last; struct path root; struct inode *inode; /* path.dentry.d_inode */ unsigned int flags; unsigned seq, m_seq; int last_type; unsigned depth; int total_link_count; struct saved { struct path link; struct delayed_call done; const char *name; unsigned seq; } *stack, internal[EMBEDDED_LEVELS]; struct filename *name; struct nameidata *saved; struct inode *link_inode; unsigned root_seq; int dfd; }; static void set_nameidata(struct nameidata *p, int dfd, struct filename *name) { struct nameidata *old = current->nameidata; p->stack = p->internal; p->dfd = dfd; p->name = name; p->total_link_count = old ? old->total_link_count : 0; p->saved = old; current->nameidata = p; } static void restore_nameidata(void) { struct nameidata *now = current->nameidata, *old = now->saved; current->nameidata = old; if (old) old->total_link_count = now->total_link_count; if (now->stack != now->internal) kfree(now->stack); } static int __nd_alloc_stack(struct nameidata *nd) { struct saved *p; if (nd->flags & LOOKUP_RCU) { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_ATOMIC); if (unlikely(!p)) return -ECHILD; } else { p= kmalloc(MAXSYMLINKS * sizeof(struct saved), GFP_KERNEL); if (unlikely(!p)) return -ENOMEM; } memcpy(p, nd->internal, sizeof(nd->internal)); nd->stack = p; return 0; } /** * path_connected - Verify that a path->dentry is below path->mnt.mnt_root * @path: nameidate to verify * * Rename can sometimes move a file or directory outside of a bind * mount, path_connected allows those cases to be detected. */ static bool path_connected(const struct path *path) { struct vfsmount *mnt = path->mnt; /* Only bind mounts can have disconnected paths */ if (mnt->mnt_root == mnt->mnt_sb->s_root) return true; return is_subdir(path->dentry, mnt->mnt_root); } static inline int nd_alloc_stack(struct nameidata *nd) { if (likely(nd->depth != EMBEDDED_LEVELS)) return 0; if (likely(nd->stack != nd->internal)) return 0; return __nd_alloc_stack(nd); } static void drop_links(struct nameidata *nd) { int i = nd->depth; while (i--) { struct saved *last = nd->stack + i; do_delayed_call(&last->done); clear_delayed_call(&last->done); } } static void terminate_walk(struct nameidata *nd) { drop_links(nd); if (!(nd->flags & LOOKUP_RCU)) { int i; path_put(&nd->path); for (i = 0; i < nd->depth; i++) path_put(&nd->stack[i].link); if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { path_put(&nd->root); nd->root.mnt = NULL; } } else { nd->flags &= ~LOOKUP_RCU; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } nd->depth = 0; } /* path_put is needed afterwards regardless of success or failure */ static bool legitimize_path(struct nameidata *nd, struct path *path, unsigned seq) { int res = __legitimize_mnt(path->mnt, nd->m_seq); if (unlikely(res)) { if (res > 0) path->mnt = NULL; path->dentry = NULL; return false; } if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) { path->dentry = NULL; return false; } return !read_seqcount_retry(&path->dentry->d_seq, seq); } static bool legitimize_links(struct nameidata *nd) { int i; for (i = 0; i < nd->depth; i++) { struct saved *last = nd->stack + i; if (unlikely(!legitimize_path(nd, &last->link, last->seq))) { drop_links(nd); nd->depth = i + 1; return false; } } return true; } /* * Path walking has 2 modes, rcu-walk and ref-walk (see * Documentation/filesystems/path-lookup.txt). In situations when we can't * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab * normal reference counts on dentries and vfsmounts to transition to ref-walk * mode. Refcounts are grabbed at the last known good point before rcu-walk * got stuck, so ref-walk may continue from there. If this is not successful * (eg. a seqcount has changed), then failure is returned and it's up to caller * to restart the path walk from the beginning in ref-walk mode. */ /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data * @dentry: child of nd->path.dentry or NULL * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry * for ref-walk mode. @dentry must be a path found by a do_lookup call on * @nd or NULL. Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq) { struct dentry *parent = nd->path.dentry; BUG_ON(!(nd->flags & LOOKUP_RCU)); nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; if (unlikely(!lockref_get_not_dead(&parent->d_lockref))) goto out1; /* * For a negative lookup, the lookup sequence point is the parents * sequence point, and it only needs to revalidate the parent dentry. * * For a positive lookup, we need to move both the parent and the * dentry from the RCU domain to be properly refcounted. And the * sequence number in the dentry validates *both* dentry counters, * since we checked the sequence number of the parent after we got * the child sequence number. So we know the parent must still * be valid if the child sequence number is still valid. */ if (!dentry) { if (read_seqcount_retry(&parent->d_seq, nd->seq)) goto out; BUG_ON(nd->inode != parent->d_inode); } else { if (!lockref_get_not_dead(&dentry->d_lockref)) goto out; if (read_seqcount_retry(&dentry->d_seq, seq)) goto drop_dentry; } /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. */ if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) { rcu_read_unlock(); dput(dentry); return -ECHILD; } } rcu_read_unlock(); return 0; drop_dentry: rcu_read_unlock(); dput(dentry); goto drop_root_mnt; out2: nd->path.mnt = NULL; out1: nd->path.dentry = NULL; out: rcu_read_unlock(); drop_root_mnt: if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; return -ECHILD; } static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) { if (unlikely(!legitimize_path(nd, link, seq))) { drop_links(nd); nd->depth = 0; nd->flags &= ~LOOKUP_RCU; nd->path.mnt = NULL; nd->path.dentry = NULL; if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; rcu_read_unlock(); } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) { return 0; } path_put(link); return -ECHILD; } static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { return dentry->d_op->d_revalidate(dentry, flags); } /** * complete_walk - successful completion of path walk * @nd: pointer nameidata * * If we had been in RCU mode, drop out of it and legitimize nd->path. * Revalidate the final result, unless we'd already done that during * the path walk or the filesystem doesn't ask for it. Return 0 on * success, -error on failure. In case of failure caller does not * need to drop nd->path. */ static int complete_walk(struct nameidata *nd) { struct dentry *dentry = nd->path.dentry; int status; if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; if (unlikely(unlazy_walk(nd, NULL, 0))) return -ECHILD; } if (likely(!(nd->flags & LOOKUP_JUMPED))) return 0; if (likely(!(dentry->d_flags & DCACHE_OP_WEAK_REVALIDATE))) return 0; status = dentry->d_op->d_weak_revalidate(dentry, nd->flags); if (status > 0) return 0; if (!status) status = -ESTALE; return status; } static void set_root(struct nameidata *nd) { struct fs_struct *fs = current->fs; if (nd->flags & LOOKUP_RCU) { unsigned seq; do { seq = read_seqcount_begin(&fs->seq); nd->root = fs->root; nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_root(fs, &nd->root); } } static void path_put_conditional(struct path *path, struct nameidata *nd) { dput(path->dentry); if (path->mnt != nd->path.mnt) mntput(path->mnt); } static inline void path_to_nameidata(const struct path *path, struct nameidata *nd) { if (!(nd->flags & LOOKUP_RCU)) { dput(nd->path.dentry); if (nd->path.mnt != path->mnt) mntput(nd->path.mnt); } nd->path.mnt = path->mnt; nd->path.dentry = path->dentry; } static int nd_jump_root(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { struct dentry *d; nd->path = nd->root; d = nd->path.dentry; nd->inode = d->d_inode; nd->seq = nd->root_seq; if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq))) return -ECHILD; } else { path_put(&nd->path); nd->path = nd->root; path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } nd->flags |= LOOKUP_JUMPED; return 0; } /* * Helper to directly jump to a known parsed path from ->get_link, * caller must have taken a reference to path beforehand. */ void nd_jump_link(struct path *path) { struct nameidata *nd = current->nameidata; path_put(&nd->path); nd->path = *path; nd->inode = nd->path.dentry->d_inode; nd->flags |= LOOKUP_JUMPED; } static inline void put_link(struct nameidata *nd) { struct saved *last = nd->stack + --nd->depth; do_delayed_call(&last->done); if (!(nd->flags & LOOKUP_RCU)) path_put(&last->link); } int sysctl_protected_symlinks __read_mostly = 0; int sysctl_protected_hardlinks __read_mostly = 0; /** * may_follow_link - Check symlink following for unsafe situations * @nd: nameidata pathwalk data * * In the case of the sysctl_protected_symlinks sysctl being enabled, * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is * in a sticky world-writable directory. This is to protect privileged * processes from failing races against path names that may change out * from under them by way of other users creating malicious symlinks. * It will permit symlinks to be followed only when outside a sticky * world-writable directory, or when the uid of the symlink and follower * match, or when the directory owner matches the symlink's owner. * * Returns 0 if following the symlink is allowed, -ve on error. */ static inline int may_follow_link(struct nameidata *nd) { const struct inode *inode; const struct inode *parent; if (!sysctl_protected_symlinks) return 0; /* Allowed if owner and follower match. */ inode = nd->link_inode; if (uid_eq(current_cred()->fsuid, inode->i_uid)) return 0; /* Allowed if parent directory not sticky and world-writable. */ parent = nd->inode; if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) return 0; /* Allowed if parent directory and link owner match. */ if (uid_eq(parent->i_uid, inode->i_uid)) return 0; if (nd->flags & LOOKUP_RCU) return -ECHILD; audit_log_link_denied("follow_link", &nd->stack[0].link); return -EACCES; } /** * safe_hardlink_source - Check for safe hardlink conditions * @inode: the source inode to hardlink from * * Return false if at least one of the following conditions: * - inode is not a regular file * - inode is setuid * - inode is setgid and group-exec * - access failure for read and write * * Otherwise returns true. */ static bool safe_hardlink_source(struct inode *inode) { umode_t mode = inode->i_mode; /* Special files should not get pinned to the filesystem. */ if (!S_ISREG(mode)) return false; /* Setuid files should not get pinned to the filesystem. */ if (mode & S_ISUID) return false; /* Executable setgid files should not get pinned to the filesystem. */ if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) return false; /* Hardlinking to unreadable or unwritable sources is dangerous. */ if (inode_permission(inode, MAY_READ | MAY_WRITE)) return false; return true; } /** * may_linkat - Check permissions for creating a hardlink * @link: the source to hardlink from * * Block hardlink when all of: * - sysctl_protected_hardlinks enabled * - fsuid does not match inode * - hardlink source is unsafe (see safe_hardlink_source() above) * - not CAP_FOWNER in a namespace with the inode owner uid mapped * * Returns 0 if successful, -ve on error. */ static int may_linkat(struct path *link) { struct inode *inode; if (!sysctl_protected_hardlinks) return 0; inode = link->dentry->d_inode; /* Source inode owner (or CAP_FOWNER) can hardlink all they like, * otherwise, it must be a safe source. */ if (inode_owner_or_capable(inode) || safe_hardlink_source(inode)) return 0; audit_log_link_denied("linkat", link); return -EPERM; } static __always_inline const char *get_link(struct nameidata *nd) { struct saved *last = nd->stack + nd->depth - 1; struct dentry *dentry = last->link.dentry; struct inode *inode = nd->link_inode; int error; const char *res; if (!(nd->flags & LOOKUP_RCU)) { touch_atime(&last->link); cond_resched(); } else if (atime_needs_update(&last->link, inode)) { if (unlikely(unlazy_walk(nd, NULL, 0))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } error = security_inode_follow_link(dentry, inode, nd->flags & LOOKUP_RCU); if (unlikely(error)) return ERR_PTR(error); nd->last_type = LAST_BIND; res = inode->i_link; if (!res) { const char * (*get)(struct dentry *, struct inode *, struct delayed_call *); get = inode->i_op->get_link; if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { if (unlikely(unlazy_walk(nd, NULL, 0))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } } else { res = get(dentry, inode, &last->done); } if (IS_ERR_OR_NULL(res)) return res; } if (*res == '/') { if (!nd->root.mnt) set_root(nd); if (unlikely(nd_jump_root(nd))) return ERR_PTR(-ECHILD); while (unlikely(*++res == '/')) ; } if (!*res) res = NULL; return res; } /* * follow_up - Find the mountpoint of path's vfsmount * * Given a path, find the mountpoint of its source file system. * Replace @path with the path of the mountpoint in the parent mount. * Up is towards /. * * Return 1 if we went up a level and 0 if we were already at the * root. */ int follow_up(struct path *path) { struct mount *mnt = real_mount(path->mnt); struct mount *parent; struct dentry *mountpoint; read_seqlock_excl(&mount_lock); parent = mnt->mnt_parent; if (parent == mnt) { read_sequnlock_excl(&mount_lock); return 0; } mntget(&parent->mnt); mountpoint = dget(mnt->mnt_mountpoint); read_sequnlock_excl(&mount_lock); dput(path->dentry); path->dentry = mountpoint; mntput(path->mnt); path->mnt = &parent->mnt; return 1; } EXPORT_SYMBOL(follow_up); /* * Perform an automount * - return -EISDIR to tell follow_managed() to stop and return the path we * were called with. */ static int follow_automount(struct path *path, struct nameidata *nd, bool *need_mntput) { struct vfsmount *mnt; int err; if (!path->dentry->d_op || !path->dentry->d_op->d_automount) return -EREMOTE; /* We don't want to mount if someone's just doing a stat - * unless they're stat'ing a directory and appended a '/' to * the name. * * We do, however, want to mount if someone wants to open or * create a file of any type under the mountpoint, wants to * traverse through the mountpoint or wants to open the * mounted directory. Also, autofs may mark negative dentries * as being automount points. These will need the attentions * of the daemon to instantiate them before they can be used. */ if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && path->dentry->d_inode) return -EISDIR; nd->total_link_count++; if (nd->total_link_count >= 40) return -ELOOP; mnt = path->dentry->d_op->d_automount(path); if (IS_ERR(mnt)) { /* * The filesystem is allowed to return -EISDIR here to indicate * it doesn't want to automount. For instance, autofs would do * this so that its userspace daemon can mount on this dentry. * * However, we can only permit this if it's a terminal point in * the path being looked up; if it wasn't then the remainder of * the path is inaccessible and we should say so. */ if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT)) return -EREMOTE; return PTR_ERR(mnt); } if (!mnt) /* mount collision */ return 0; if (!*need_mntput) { /* lock_mount() may release path->mnt on error */ mntget(path->mnt); *need_mntput = true; } err = finish_automount(mnt, path); switch (err) { case -EBUSY: /* Someone else made a mount here whilst we were busy */ return 0; case 0: path_put(path); path->mnt = mnt; path->dentry = dget(mnt->mnt_root); return 0; default: return err; } } /* * Handle a dentry that is managed in some way. * - Flagged for transit management (autofs) * - Flagged as mountpoint * - Flagged as automount point * * This may only be called in refwalk mode. * * Serialization is taken care of in namespace.c */ static int follow_managed(struct path *path, struct nameidata *nd) { struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */ unsigned managed; bool need_mntput = false; int ret = 0; /* Given that we're not holding a lock here, we retain the value in a * local variable for each dentry as we look at it so that we don't see * the components of that value change under us */ while (managed = ACCESS_ONCE(path->dentry->d_flags), managed &= DCACHE_MANAGED_DENTRY, unlikely(managed != 0)) { /* Allow the filesystem to manage the transit without i_mutex * being held. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage(path->dentry, false); if (ret < 0) break; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); if (need_mntput) mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); need_mntput = true; continue; } /* Something is mounted on this dentry in another * namespace and/or whatever was mounted there in this * namespace got unmounted before lookup_mnt() could * get it */ } /* Handle an automount point */ if (managed & DCACHE_NEED_AUTOMOUNT) { ret = follow_automount(path, nd, &need_mntput); if (ret < 0) break; continue; } /* We didn't change the current path point */ break; } if (need_mntput && path->mnt == mnt) mntput(path->mnt); if (ret == -EISDIR || !ret) ret = 1; if (need_mntput) nd->flags |= LOOKUP_JUMPED; if (unlikely(ret < 0)) path_put_conditional(path, nd); return ret; } int follow_down_one(struct path *path) { struct vfsmount *mounted; mounted = lookup_mnt(path); if (mounted) { dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); return 1; } return 0; } EXPORT_SYMBOL(follow_down_one); static inline int managed_dentry_rcu(struct dentry *dentry) { return (dentry->d_flags & DCACHE_MANAGE_TRANSIT) ? dentry->d_op->d_manage(dentry, true) : 0; } /* * Try to skip to top of mountpoint pile in rcuwalk mode. Fail if * we meet a managed dentry that would need blocking. */ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { for (;;) { struct mount *mounted; /* * Don't forget we might have a non-mountpoint managed dentry * that wants to block transit. */ switch (managed_dentry_rcu(path->dentry)) { case -ECHILD: default: return false; case -EISDIR: return true; case 0: break; } if (!d_mountpoint(path->dentry)) return !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); mounted = __lookup_mnt(path->mnt, path->dentry); if (!mounted) break; path->mnt = &mounted->mnt; path->dentry = mounted->mnt.mnt_root; nd->flags |= LOOKUP_JUMPED; *seqp = read_seqcount_begin(&path->dentry->d_seq); /* * Update the inode too. We don't need to re-check the * dentry sequence number here after this d_inode read, * because a mount-point is always pinned. */ *inode = path->dentry->d_inode; } return !read_seqretry(&mount_lock, nd->m_seq) && !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); } static int follow_dotdot_rcu(struct nameidata *nd) { struct inode *inode = nd->inode; while (1) { if (path_equal(&nd->path, &nd->root)) break; if (nd->path.dentry != nd->path.mnt->mnt_root) { struct dentry *old = nd->path.dentry; struct dentry *parent = old->d_parent; unsigned seq; inode = parent->d_inode; seq = read_seqcount_begin(&parent->d_seq); if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq))) return -ECHILD; nd->path.dentry = parent; nd->seq = seq; if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } else { struct mount *mnt = real_mount(nd->path.mnt); struct mount *mparent = mnt->mnt_parent; struct dentry *mountpoint = mnt->mnt_mountpoint; struct inode *inode2 = mountpoint->d_inode; unsigned seq = read_seqcount_begin(&mountpoint->d_seq); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (&mparent->mnt == nd->path.mnt) break; /* we know that mountpoint was pinned */ nd->path.dentry = mountpoint; nd->path.mnt = &mparent->mnt; inode = inode2; nd->seq = seq; } } while (unlikely(d_mountpoint(nd->path.dentry))) { struct mount *mounted; mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry); if (unlikely(read_seqretry(&mount_lock, nd->m_seq))) return -ECHILD; if (!mounted) break; nd->path.mnt = &mounted->mnt; nd->path.dentry = mounted->mnt.mnt_root; inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } nd->inode = inode; return 0; } /* * Follow down to the covering mount currently visible to userspace. At each * point, the filesystem owning that dentry may be queried as to whether the * caller is permitted to proceed or not. */ int follow_down(struct path *path) { unsigned managed; int ret; while (managed = ACCESS_ONCE(path->dentry->d_flags), unlikely(managed & DCACHE_MANAGED_DENTRY)) { /* Allow the filesystem to manage the transit without i_mutex * being held. * * We indicate to the filesystem if someone is trying to mount * something here. This gives autofs the chance to deny anyone * other than its daemon the right to mount on its * superstructure. * * The filesystem may sleep at this point. */ if (managed & DCACHE_MANAGE_TRANSIT) { BUG_ON(!path->dentry->d_op); BUG_ON(!path->dentry->d_op->d_manage); ret = path->dentry->d_op->d_manage( path->dentry, false); if (ret < 0) return ret == -EISDIR ? 0 : ret; } /* Transit to a mounted filesystem. */ if (managed & DCACHE_MOUNTED) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); continue; } /* Don't handle automount points here */ break; } return 0; } EXPORT_SYMBOL(follow_down); /* * Skip to top of mountpoint pile in refwalk mode for follow_dotdot() */ static void follow_mount(struct path *path) { while (d_mountpoint(path->dentry)) { struct vfsmount *mounted = lookup_mnt(path); if (!mounted) break; dput(path->dentry); mntput(path->mnt); path->mnt = mounted; path->dentry = dget(mounted->mnt_root); } } static int follow_dotdot(struct nameidata *nd) { while(1) { struct dentry *old = nd->path.dentry; if (nd->path.dentry == nd->root.dentry && nd->path.mnt == nd->root.mnt) { break; } if (nd->path.dentry != nd->path.mnt->mnt_root) { /* rare case of legitimate dget_parent()... */ nd->path.dentry = dget_parent(nd->path.dentry); dput(old); if (unlikely(!path_connected(&nd->path))) return -ENOENT; break; } if (!follow_up(&nd->path)) break; } follow_mount(&nd->path); nd->inode = nd->path.dentry->d_inode; return 0; } /* * This looks up the name in dcache, possibly revalidates the old dentry and * allocates a new one if not found or not valid. In the need_lookup argument * returns whether i_op->lookup is necessary. */ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry; int error; dentry = d_lookup(dir, name); if (dentry) { if (dentry->d_flags & DCACHE_OP_REVALIDATE) { error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); dput(dentry); return ERR_PTR(error); } } } return dentry; } /* * Call i_op->lookup on the dentry. The dentry must be negative and * unhashed. * * dir->d_inode->i_mutex must be held */ static struct dentry *lookup_real(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct dentry *old; /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { dput(dentry); return ERR_PTR(-ENOENT); } old = dir->i_op->lookup(dir, dentry, flags); if (unlikely(old)) { dput(dentry); dentry = old; } return dentry; } static struct dentry *__lookup_hash(const struct qstr *name, struct dentry *base, unsigned int flags) { struct dentry *dentry = lookup_dcache(name, base, flags); if (dentry) return dentry; dentry = d_alloc(base, name); if (unlikely(!dentry)) return ERR_PTR(-ENOMEM); return lookup_real(base->d_inode, dentry, flags); } static int lookup_fast(struct nameidata *nd, struct path *path, struct inode **inode, unsigned *seqp) { struct vfsmount *mnt = nd->path.mnt; struct dentry *dentry, *parent = nd->path.dentry; int status = 1; int err; /* * Rename seqlock is not required here because in the off chance * of a false negative due to a concurrent rename, the caller is * going to fall back to non-racy lookup. */ if (nd->flags & LOOKUP_RCU) { unsigned seq; bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; return 0; } /* * This sequence count validates that the inode matches * the dentry name information from lookup. */ *inode = d_backing_inode(dentry); negative = d_is_negative(dentry); if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) return -ECHILD; /* * This sequence count validates that the parent had no * changes while we did the lookup of the dentry above. * * The memory barrier in read_seqcount_begin of child is * enough, we can use __read_seqcount_retry here. */ if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq))) return -ECHILD; *seqp = seq; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) status = d_revalidate(dentry, nd->flags); if (unlikely(status <= 0)) { if (unlazy_walk(nd, dentry, seq)) return -ECHILD; if (status == -ECHILD) status = d_revalidate(dentry, nd->flags); } else { /* * Note: do negative dentry check after revalidation in * case that drops it. */ if (unlikely(negative)) return -ENOENT; path->mnt = mnt; path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; if (unlazy_walk(nd, dentry, seq)) return -ECHILD; } } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) d_invalidate(dentry); dput(dentry); return status; } if (unlikely(d_is_negative(dentry))) { dput(dentry); return -ENOENT; } path->mnt = mnt; path->dentry = dentry; err = follow_managed(path, nd); if (likely(err > 0)) *inode = d_backing_inode(path->dentry); return err; } /* Fast lookup failed, do it the slow way */ static struct dentry *lookup_slow(const struct qstr *name, struct dentry *dir, unsigned int flags) { struct dentry *dentry; inode_lock(dir->d_inode); dentry = d_lookup(dir, name); if (unlikely(dentry)) { if ((dentry->d_flags & DCACHE_OP_REVALIDATE) && !(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) d_invalidate(dentry); dput(dentry); dentry = ERR_PTR(error); } } if (dentry) { inode_unlock(dir->d_inode); return dentry; } } dentry = d_alloc(dir, name); if (unlikely(!dentry)) { inode_unlock(dir->d_inode); return ERR_PTR(-ENOMEM); } dentry = lookup_real(dir->d_inode, dentry, flags); inode_unlock(dir->d_inode); return dentry; } static inline int may_lookup(struct nameidata *nd) { if (nd->flags & LOOKUP_RCU) { int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); } static inline int handle_dots(struct nameidata *nd, int type) { if (type == LAST_DOTDOT) { if (!nd->root.mnt) set_root(nd); if (nd->flags & LOOKUP_RCU) { return follow_dotdot_rcu(nd); } else return follow_dotdot(nd); } return 0; } static int pick_link(struct nameidata *nd, struct path *link, struct inode *inode, unsigned seq) { int error; struct saved *last; if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) { path_to_nameidata(link, nd); return -ELOOP; } if (!(nd->flags & LOOKUP_RCU)) { if (link->mnt == nd->path.mnt) mntget(link->mnt); } error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { if (unlikely(unlazy_link(nd, link, seq))) return -ECHILD; error = nd_alloc_stack(nd); } if (error) { path_put(link); return error; } } last = nd->stack + nd->depth++; last->link = *link; clear_delayed_call(&last->done); nd->link_inode = inode; last->seq = seq; return 1; } /* * Do we need to follow links? We _really_ want to be able * to do this check without having to look at inode->i_op, * so we keep a cache of "no, this doesn't need follow_link" * for the common case. */ static inline int should_follow_link(struct nameidata *nd, struct path *link, int follow, struct inode *inode, unsigned seq) { if (likely(!d_is_symlink(link->dentry))) return 0; if (!follow) return 0; /* make sure that d_is_symlink above matches inode */ if (nd->flags & LOOKUP_RCU) { if (read_seqcount_retry(&link->dentry->d_seq, seq)) return -ECHILD; } return pick_link(nd, link, inode, seq); } enum {WALK_GET = 1, WALK_PUT = 2}; static int walk_component(struct nameidata *nd, int flags) { struct path path; struct inode *inode; unsigned seq; int err; /* * "." and ".." are special - ".." especially so because it has * to be able to know about the current root directory and * parent relationships. */ if (unlikely(nd->last_type != LAST_NORM)) { err = handle_dots(nd, nd->last_type); if (flags & WALK_PUT) put_link(nd); return err; } err = lookup_fast(nd, &path, &inode, &seq); if (unlikely(err <= 0)) { if (err < 0) return err; path.dentry = lookup_slow(&nd->last, nd->path.dentry, nd->flags); if (IS_ERR(path.dentry)) return PTR_ERR(path.dentry); path.mnt = nd->path.mnt; err = follow_managed(&path, nd); if (unlikely(err < 0)) return err; if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } seq = 0; /* we are already out of RCU mode */ inode = d_backing_inode(path.dentry); } if (flags & WALK_PUT) put_link(nd); err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq); if (unlikely(err)) return err; path_to_nameidata(&path, nd); nd->inode = inode; nd->seq = seq; return 0; } /* * We can do the critical dentry name comparison and hashing * operations one word at a time, but we are limited to: * * - Architectures with fast unaligned word accesses. We could * do a "get_unaligned()" if this helps and is sufficiently * fast. * * - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we * do not trap on the (extremely unlikely) case of a page * crossing operation. * * - Furthermore, we need an efficient 64-bit compile for the * 64-bit case in order to generate the "number of bytes in * the final mask". Again, that could be replaced with a * efficient population count instruction or similar. */ #ifdef CONFIG_DCACHE_WORD_ACCESS #include <asm/word-at-a-time.h> #ifdef CONFIG_64BIT static inline unsigned int fold_hash(unsigned long hash) { return hash_64(hash, 32); } #else /* 32-bit case */ #define fold_hash(x) (x) #endif unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long a, mask; unsigned long hash = 0; for (;;) { a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; hash += a; hash *= 9; name += sizeof(unsigned long); len -= sizeof(unsigned long); if (!len) goto done; } mask = bytemask_from_count(len); hash += mask & a; done: return fold_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * Calculate the length and hash of the path component, and * return the "hash_len" as the result. */ static inline u64 hash_name(const char *name) { unsigned long a, b, adata, bdata, mask, hash, len; const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; hash = a = 0; len = -sizeof(unsigned long); do { hash = (hash + a) * 9; len += sizeof(unsigned long); a = load_unaligned_zeropad(name+len); b = a ^ REPEAT_BYTE('/'); } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); adata = prep_zero_mask(a, adata, &constants); bdata = prep_zero_mask(b, bdata, &constants); mask = create_zero_mask(adata | bdata); hash += a & zero_bytemask(mask); len += find_zero(mask); return hashlen_create(fold_hash(hash), len); } #else unsigned int full_name_hash(const unsigned char *name, unsigned int len) { unsigned long hash = init_name_hash(); while (len--) hash = partial_name_hash(*name++, hash); return end_name_hash(hash); } EXPORT_SYMBOL(full_name_hash); /* * We know there's a real path component here of at least * one character. */ static inline u64 hash_name(const char *name) { unsigned long hash = init_name_hash(); unsigned long len = 0, c; c = (unsigned char)*name; do { len++; hash = partial_name_hash(c, hash); c = (unsigned char)name[len]; } while (c && c != '/'); return hashlen_create(end_name_hash(hash), len); } #endif /* * Name resolution. * This is the basic name resolution function, turning a pathname into * the final dentry. We expect 'base' to be positive and a directory. * * Returns 0 and nd will have valid dentry and mnt on success. * Returns error and drops reference to input namei data on failure. */ static int link_path_walk(const char *name, struct nameidata *nd) { int err; while (*name=='/') name++; if (!*name) return 0; /* At this point we know we have a real path component. */ for(;;) { u64 hash_len; int type; err = may_lookup(nd); if (err) return err; hash_len = hash_name(name); type = LAST_NORM; if (name[0] == '.') switch (hashlen_len(hash_len)) { case 2: if (name[1] == '.') { type = LAST_DOTDOT; nd->flags |= LOOKUP_JUMPED; } break; case 1: type = LAST_DOT; } if (likely(type == LAST_NORM)) { struct dentry *parent = nd->path.dentry; nd->flags &= ~LOOKUP_JUMPED; if (unlikely(parent->d_flags & DCACHE_OP_HASH)) { struct qstr this = { { .hash_len = hash_len }, .name = name }; err = parent->d_op->d_hash(parent, &this); if (err < 0) return err; hash_len = this.hash_len; name = this.name; } } nd->last.hash_len = hash_len; nd->last.name = name; nd->last_type = type; name += hashlen_len(hash_len); if (!*name) goto OK; /* * If it wasn't NUL, we know it was '/'. Skip that * slash, and continue until no more slashes. */ do { name++; } while (unlikely(*name == '/')); if (unlikely(!*name)) { OK: /* pathname body, done */ if (!nd->depth) return 0; name = nd->stack[nd->depth - 1].name; /* trailing symlink, done */ if (!name) return 0; /* last component of nested symlink */ err = walk_component(nd, WALK_GET | WALK_PUT); } else { err = walk_component(nd, WALK_GET); } if (err < 0) return err; if (err) { const char *s = get_link(nd); if (IS_ERR(s)) return PTR_ERR(s); err = 0; if (unlikely(!s)) { /* jumped */ put_link(nd); } else { nd->stack[nd->depth - 1].name = name; name = s; continue; } } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } return -ENOTDIR; } } } static const char *path_init(struct nameidata *nd, unsigned flags) { int retval = 0; const char *s = nd->name->name; nd->last_type = LAST_ROOT; /* if there are only slashes... */ nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT; nd->depth = 0; if (flags & LOOKUP_ROOT) { struct dentry *root = nd->root.dentry; struct inode *inode = root->d_inode; if (*s) { if (!d_can_lookup(root)) return ERR_PTR(-ENOTDIR); retval = inode_permission(inode, MAY_EXEC); if (retval) return ERR_PTR(retval); } nd->path = nd->root; nd->inode = inode; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); nd->root_seq = nd->seq; nd->m_seq = read_seqbegin(&mount_lock); } else { path_get(&nd->path); } return s; } nd->root.mnt = NULL; nd->path.mnt = NULL; nd->path.dentry = NULL; nd->m_seq = read_seqbegin(&mount_lock); if (*s == '/') { if (flags & LOOKUP_RCU) rcu_read_lock(); set_root(nd); if (likely(!nd_jump_root(nd))) return s; nd->root.mnt = NULL; rcu_read_unlock(); return ERR_PTR(-ECHILD); } else if (nd->dfd == AT_FDCWD) { if (flags & LOOKUP_RCU) { struct fs_struct *fs = current->fs; unsigned seq; rcu_read_lock(); do { seq = read_seqcount_begin(&fs->seq); nd->path = fs->pwd; nd->inode = nd->path.dentry->d_inode; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); } while (read_seqcount_retry(&fs->seq, seq)); } else { get_fs_pwd(current->fs, &nd->path); nd->inode = nd->path.dentry->d_inode; } return s; } else { /* Caller must check execute permissions on the starting path component */ struct fd f = fdget_raw(nd->dfd); struct dentry *dentry; if (!f.file) return ERR_PTR(-EBADF); dentry = f.file->f_path.dentry; if (*s) { if (!d_can_lookup(dentry)) { fdput(f); return ERR_PTR(-ENOTDIR); } } nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { rcu_read_lock(); nd->inode = nd->path.dentry->d_inode; nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); } else { path_get(&nd->path); nd->inode = nd->path.dentry->d_inode; } fdput(f); return s; } } static const char *trailing_symlink(struct nameidata *nd) { const char *s; int error = may_follow_link(nd); if (unlikely(error)) return ERR_PTR(error); nd->flags |= LOOKUP_PARENT; nd->stack[0].name = NULL; s = get_link(nd); return s ? s : ""; } static inline int lookup_last(struct nameidata *nd) { if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; nd->flags &= ~LOOKUP_PARENT; return walk_component(nd, nd->flags & LOOKUP_FOLLOW ? nd->depth ? WALK_PUT | WALK_GET : WALK_GET : 0); } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && ((err = lookup_last(nd)) > 0)) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } if (!err) err = complete_walk(nd); if (!err && nd->flags & LOOKUP_DIRECTORY) if (!d_can_lookup(nd->path.dentry)) err = -ENOTDIR; if (!err) { *path = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root) { int retval; struct nameidata nd; if (IS_ERR(name)) return PTR_ERR(name); if (unlikely(root)) { nd.root = *root; flags |= LOOKUP_ROOT; } set_nameidata(&nd, dfd, name); retval = path_lookupat(&nd, flags | LOOKUP_RCU, path); if (unlikely(retval == -ECHILD)) retval = path_lookupat(&nd, flags, path); if (unlikely(retval == -ESTALE)) retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path); if (likely(!retval)) audit_inode(name, path->dentry, flags & LOOKUP_PARENT); restore_nameidata(); putname(name); return retval; } /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */ static int path_parentat(struct nameidata *nd, unsigned flags, struct path *parent) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); err = link_path_walk(s, nd); if (!err) err = complete_walk(nd); if (!err) { *parent = nd->path; nd->path.mnt = NULL; nd->path.dentry = NULL; } terminate_walk(nd); return err; } static struct filename *filename_parentat(int dfd, struct filename *name, unsigned int flags, struct path *parent, struct qstr *last, int *type) { int retval; struct nameidata nd; if (IS_ERR(name)) return name; set_nameidata(&nd, dfd, name); retval = path_parentat(&nd, flags | LOOKUP_RCU, parent); if (unlikely(retval == -ECHILD)) retval = path_parentat(&nd, flags, parent); if (unlikely(retval == -ESTALE)) retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent); if (likely(!retval)) { *last = nd.last; *type = nd.last_type; audit_inode(name, parent->dentry, LOOKUP_PARENT); } else { putname(name); name = ERR_PTR(retval); } restore_nameidata(); return name; } /* does lookup, returns the object with parent locked */ struct dentry *kern_path_locked(const char *name, struct path *path) { struct filename *filename; struct dentry *d; struct qstr last; int type; filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path, &last, &type); if (IS_ERR(filename)) return ERR_CAST(filename); if (unlikely(type != LAST_NORM)) { path_put(path); putname(filename); return ERR_PTR(-EINVAL); } inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); d = __lookup_hash(&last, path->dentry, 0); if (IS_ERR(d)) { inode_unlock(path->dentry->d_inode); path_put(path); } putname(filename); return d; } int kern_path(const char *name, unsigned int flags, struct path *path) { return filename_lookup(AT_FDCWD, getname_kernel(name), flags, path, NULL); } EXPORT_SYMBOL(kern_path); /** * vfs_path_lookup - lookup a file path relative to a dentry-vfsmount pair * @dentry: pointer to dentry of the base directory * @mnt: pointer to vfs mount of the base directory * @name: pointer to file name * @flags: lookup flags * @path: pointer to struct path to fill */ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, const char *name, unsigned int flags, struct path *path) { struct path root = {.mnt = mnt, .dentry = dentry}; /* the first argument of filename_lookup() is ignored with root */ return filename_lookup(AT_FDCWD, getname_kernel(name), flags , path, &root); } EXPORT_SYMBOL(vfs_path_lookup); /** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * The caller must hold base->i_mutex. */ struct dentry *lookup_one_len(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; WARN_ON_ONCE(!inode_is_locked(base->d_inode)); this.name = name; this.len = len; this.hash = full_name_hash(name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); return __lookup_hash(&this, base, 0); } EXPORT_SYMBOL(lookup_one_len); /** * lookup_one_len_unlocked - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from * @len: maximum length @len should be interpreted to * * Note that this routine is purely a helper for filesystem usage and should * not be called by generic code. * * Unlike lookup_one_len, it should be called without the parent * i_mutex held, and will take the i_mutex itself if necessary. */ struct dentry *lookup_one_len_unlocked(const char *name, struct dentry *base, int len) { struct qstr this; unsigned int c; int err; struct dentry *ret; this.name = name; this.len = len; this.hash = full_name_hash(name, len); if (!len) return ERR_PTR(-EACCES); if (unlikely(name[0] == '.')) { if (len < 2 || (len == 2 && name[1] == '.')) return ERR_PTR(-EACCES); } while (len--) { c = *(const unsigned char *)name++; if (c == '/' || c == '\0') return ERR_PTR(-EACCES); } /* * See if the low-level filesystem might want * to use its own hash.. */ if (base->d_flags & DCACHE_OP_HASH) { int err = base->d_op->d_hash(base, &this); if (err < 0) return ERR_PTR(err); } err = inode_permission(base->d_inode, MAY_EXEC); if (err) return ERR_PTR(err); ret = lookup_dcache(&this, base, 0); if (!ret) ret = lookup_slow(&this, base, 0); return ret; } EXPORT_SYMBOL(lookup_one_len_unlocked); int user_path_at_empty(int dfd, const char __user *name, unsigned flags, struct path *path, int *empty) { return filename_lookup(dfd, getname_flags(name, flags, empty), flags, path, NULL); } EXPORT_SYMBOL(user_path_at_empty); /* * NB: most callers don't do anything directly with the reference to the * to struct filename, but the nd->last pointer points into the name string * allocated by getname. So we must hold the reference to it until all * path-walking is complete. */ static inline struct filename * user_path_parent(int dfd, const char __user *path, struct path *parent, struct qstr *last, int *type, unsigned int flags) { /* only LOOKUP_REVAL is allowed in extra flags */ return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL, parent, last, type); } /** * mountpoint_last - look up last component for umount * @nd: pathwalk nameidata - currently pointing at parent directory of "last" * @path: pointer to container for result * * This is a special lookup_last function just for umount. In this case, we * need to resolve the path without doing any revalidation. * * The nameidata should be the result of doing a LOOKUP_PARENT pathwalk. Since * mountpoints are always pinned in the dcache, their ancestors are too. Thus, * in almost all cases, this lookup will be served out of the dcache. The only * cases where it won't are if nd->last refers to a symlink or the path is * bogus and it doesn't exist. * * Returns: * -error: if there was an error during lookup. This includes -ENOENT if the * lookup found a negative dentry. The nd->path reference will also be * put in this case. * * 0: if we successfully resolved nd->path and found it to not to be a * symlink that needs to be followed. "path" will also be populated. * The nd->path reference will also be put. * * 1: if we successfully resolved nd->last and found it to be a symlink * that needs to be followed. "path" will be populated with the path * to the link, and nd->path will *not* be put. */ static int mountpoint_last(struct nameidata *nd, struct path *path) { int error = 0; struct dentry *dentry; struct dentry *dir = nd->path.dentry; /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { if (unlazy_walk(nd, NULL, 0)) return -ECHILD; } nd->flags &= ~LOOKUP_PARENT; if (unlikely(nd->last_type != LAST_NORM)) { error = handle_dots(nd, nd->last_type); if (error) return error; dentry = dget(nd->path.dentry); } else { dentry = d_lookup(dir, &nd->last); if (!dentry) { /* * No cached dentry. Mounted dentries are pinned in the * cache, so that means that this dentry is probably * a symlink or the path doesn't actually point * to a mounted dentry. */ dentry = lookup_slow(&nd->last, dir, nd->flags | LOOKUP_NO_REVAL); if (IS_ERR(dentry)) return PTR_ERR(dentry); } } if (d_is_negative(dentry)) { dput(dentry); return -ENOENT; } if (nd->depth) put_link(nd); path->dentry = dentry; path->mnt = nd->path.mnt; error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW, d_backing_inode(dentry), 0); if (unlikely(error)) return error; mntget(path->mnt); follow_mount(path); return 0; } /** * path_mountpoint - look up a path to be umounted * @nd: lookup context * @flags: lookup flags * @path: pointer to container for result * * Look up the given name, but don't attempt to revalidate the last component. * Returns 0 and "path" will be valid on success; Returns error otherwise. */ static int path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path) { const char *s = path_init(nd, flags); int err; if (IS_ERR(s)) return PTR_ERR(s); while (!(err = link_path_walk(s, nd)) && (err = mountpoint_last(nd, path)) > 0) { s = trailing_symlink(nd); if (IS_ERR(s)) { err = PTR_ERR(s); break; } } terminate_walk(nd); return err; } static int filename_mountpoint(int dfd, struct filename *name, struct path *path, unsigned int flags) { struct nameidata nd; int error; if (IS_ERR(name)) return PTR_ERR(name); set_nameidata(&nd, dfd, name); error = path_mountpoint(&nd, flags | LOOKUP_RCU, path); if (unlikely(error == -ECHILD)) error = path_mountpoint(&nd, flags, path); if (unlikely(error == -ESTALE)) error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path); if (likely(!error)) audit_inode(name, path->dentry, 0); restore_nameidata(); putname(name); return error; } /** * user_path_mountpoint_at - lookup a path from userland in order to umount it * @dfd: directory file descriptor * @name: pathname from userland * @flags: lookup flags * @path: pointer to container to hold result * * A umount is a special case for path walking. We're not actually interested * in the inode in this situation, and ESTALE errors can be a problem. We * simply want track down the dentry and vfsmount attached at the mountpoint * and avoid revalidating the last component. * * Returns 0 and populates "path" on success. */ int user_path_mountpoint_at(int dfd, const char __user *name, unsigned int flags, struct path *path) { return filename_mountpoint(dfd, getname(name), path, flags); } int kern_path_mountpoint(int dfd, const char *name, struct path *path, unsigned int flags) { return filename_mountpoint(dfd, getname_kernel(name), path, flags); } EXPORT_SYMBOL(kern_path_mountpoint); int __check_sticky(struct inode *dir, struct inode *inode) { kuid_t fsuid = current_fsuid(); if (uid_eq(inode->i_uid, fsuid)) return 0; if (uid_eq(dir->i_uid, fsuid)) return 0; return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); } EXPORT_SYMBOL(__check_sticky); /* * Check whether we can remove a link victim from directory dir, check * whether the type of victim is right. * 1. We can't do it if dir is read-only (done in permission()) * 2. We should have write and exec permissions on dir * 3. We can't remove anything from append-only dir * 4. We can't do anything with immutable dir (done in permission()) * 5. If the sticky bit on dir is set we should either * a. be owner of dir, or * b. be owner of victim, or * c. have CAP_FOWNER capability * 6. If the victim is append-only or immutable we can't do antyhing with * links pointing to it. * 7. If we were asked to remove a directory and victim isn't one - ENOTDIR. * 8. If we were asked to remove a non-directory and victim isn't one - EISDIR. * 9. We can't remove a root or mountpoint. * 10. We don't allow removal of NFS sillyrenamed files; it's handled by * nfs_async_unlink(). */ static int may_delete(struct inode *dir, struct dentry *victim, bool isdir) { struct inode *inode = d_backing_inode(victim); int error; if (d_is_negative(victim)) return -ENOENT; BUG_ON(!inode); BUG_ON(victim->d_parent->d_inode != dir); audit_inode_child(dir, victim, AUDIT_TYPE_CHILD_DELETE); error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) return error; if (IS_APPEND(dir)) return -EPERM; if (check_sticky(dir, inode) || IS_APPEND(inode) || IS_IMMUTABLE(inode) || IS_SWAPFILE(inode)) return -EPERM; if (isdir) { if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; if (victim->d_flags & DCACHE_NFSFS_RENAMED) return -EBUSY; return 0; } /* Check whether we can create an object with dentry child in directory * dir. * 1. We can't do it if child already exists (open has special treatment for * this case, but since we are inlined it's OK) * 2. We can't do it if dir is read-only (done in permission()) * 3. We should have write and exec permissions on dir * 4. We can't do it if dir is immutable (done in permission()) */ static inline int may_create(struct inode *dir, struct dentry *child) { audit_inode_child(dir, child, AUDIT_TYPE_CHILD_CREATE); if (child->d_inode) return -EEXIST; if (IS_DEADDIR(dir)) return -ENOENT; return inode_permission(dir, MAY_WRITE | MAY_EXEC); } /* * p1 and p2 should be directories on the same fs. */ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) { struct dentry *p; if (p1 == p2) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); return NULL; } mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { inode_lock_nested(p2->d_inode, I_MUTEX_PARENT); inode_lock_nested(p1->d_inode, I_MUTEX_CHILD); return p; } p = d_ancestor(p1, p2); if (p) { inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_CHILD); return p; } inode_lock_nested(p1->d_inode, I_MUTEX_PARENT); inode_lock_nested(p2->d_inode, I_MUTEX_PARENT2); return NULL; } EXPORT_SYMBOL(lock_rename); void unlock_rename(struct dentry *p1, struct dentry *p2) { inode_unlock(p1->d_inode); if (p1 != p2) { inode_unlock(p2->d_inode); mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); } } EXPORT_SYMBOL(unlock_rename); int vfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool want_excl) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->create) return -EACCES; /* shouldn't it be ENOSYS? */ mode &= S_IALLUGO; mode |= S_IFREG; error = security_inode_create(dir, dentry, mode); if (error) return error; error = dir->i_op->create(dir, dentry, mode, want_excl); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_create); static int may_open(struct path *path, int acc_mode, int flag) { struct dentry *dentry = path->dentry; struct inode *inode = dentry->d_inode; int error; if (!inode) return -ENOENT; switch (inode->i_mode & S_IFMT) { case S_IFLNK: return -ELOOP; case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; break; case S_IFBLK: case S_IFCHR: if (path->mnt->mnt_flags & MNT_NODEV) return -EACCES; /*FALLTHRU*/ case S_IFIFO: case S_IFSOCK: flag &= ~O_TRUNC; break; } error = inode_permission(inode, MAY_OPEN | acc_mode); if (error) return error; /* * An append-only file must be opened in append mode for writing. */ if (IS_APPEND(inode)) { if ((flag & O_ACCMODE) != O_RDONLY && !(flag & O_APPEND)) return -EPERM; if (flag & O_TRUNC) return -EPERM; } /* O_NOATIME can only be set by the owner or superuser */ if (flag & O_NOATIME && !inode_owner_or_capable(inode)) return -EPERM; return 0; } static int handle_truncate(struct file *filp) { struct path *path = &filp->f_path; struct inode *inode = path->dentry->d_inode; int error = get_write_access(inode); if (error) return error; /* * Refuse to truncate files with mandatory locks held on them. */ error = locks_verify_locked(filp); if (!error) error = security_path_truncate(path); if (!error) { error = do_truncate(path->dentry, 0, ATTR_MTIME|ATTR_CTIME|ATTR_OPEN, filp); } put_write_access(inode); return error; } static inline int open_to_namei_flags(int flag) { if ((flag & O_ACCMODE) == 3) flag--; return flag; } static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) { int error = security_path_mknod(dir, dentry, mode, 0); if (error) return error; error = inode_permission(dir->dentry->d_inode, MAY_WRITE | MAY_EXEC); if (error) return error; return security_inode_create(dir->dentry->d_inode, dentry, mode); } /* * Attempt to atomically look up, create and open a file from a negative * dentry. * * Returns 0 if successful. The file will have been created and attached to * @file by the filesystem calling finish_open(). * * Returns 1 if the file was looked up only or didn't need creating. The * caller will need to perform the open themselves. @path will have been * updated to point to the new dentry. This may be negative. * * Returns an error code otherwise. */ static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, bool got_write, bool need_lookup, int *opened) { struct inode *dir = nd->path.dentry->d_inode; unsigned open_flag = open_to_namei_flags(op->open_flag); umode_t mode; int error; int acc_mode; int create_error = 0; struct dentry *const DENTRY_NOT_SET = (void *) -1UL; bool excl; BUG_ON(dentry->d_inode); /* Don't create child dentry for a dead directory. */ if (unlikely(IS_DEADDIR(dir))) { error = -ENOENT; goto out; } mode = op->mode; if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) mode &= ~current_umask(); excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT); if (excl) open_flag &= ~O_TRUNC; /* * Checking write permission is tricky, bacuse we don't know if we are * going to actually need it: O_CREAT opens should work as long as the * file exists. But checking existence breaks atomicity. The trick is * to check access and if not granted clear O_CREAT from the flags. * * Another problem is returing the "right" error value (e.g. for an * O_EXCL open we want to return EEXIST not EROFS). */ if (((open_flag & (O_CREAT | O_TRUNC)) || (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { if (!(open_flag & O_CREAT)) { /* * No O_CREATE -> atomicity not a requirement -> fall * back to lookup + open */ goto no_open; } else if (open_flag & (O_EXCL | O_TRUNC)) { /* Fall back and fail with the right error */ create_error = -EROFS; goto no_open; } else { /* No side effects, safe to clear O_CREAT */ create_error = -EROFS; open_flag &= ~O_CREAT; } } if (open_flag & O_CREAT) { error = may_o_create(&nd->path, dentry, mode); if (error) { create_error = error; if (open_flag & O_EXCL) goto no_open; open_flag &= ~O_CREAT; } } if (nd->flags & LOOKUP_DIRECTORY) open_flag |= O_DIRECTORY; file->f_path.dentry = DENTRY_NOT_SET; file->f_path.mnt = nd->path.mnt; error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode, opened); if (error < 0) { if (create_error && error == -ENOENT) error = create_error; goto out; } if (error) { /* returned 1, that is */ if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { error = -EIO; goto out; } if (file->f_path.dentry) { dput(dentry); dentry = file->f_path.dentry; } if (*opened & FILE_CREATED) fsnotify_create(dir, dentry); if (!dentry->d_inode) { WARN_ON(*opened & FILE_CREATED); if (create_error) { error = create_error; goto out; } } else { if (excl && !(*opened & FILE_CREATED)) { error = -EEXIST; goto out; } } goto looked_up; } /* * We didn't have the inode before the open, so check open permission * here. */ acc_mode = op->acc_mode; if (*opened & FILE_CREATED) { WARN_ON(!(open_flag & O_CREAT)); fsnotify_create(dir, dentry); acc_mode = 0; } error = may_open(&file->f_path, acc_mode, open_flag); if (error) fput(file); out: dput(dentry); return error; no_open: if (need_lookup) { dentry = lookup_real(dir, dentry, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (create_error) { int open_flag = op->open_flag; error = create_error; if ((open_flag & O_EXCL)) { if (!dentry->d_inode) goto out; } else if (!dentry->d_inode) { goto out; } else if ((open_flag & O_TRUNC) && d_is_reg(dentry)) { goto out; } /* will fail later, go on to get the right error */ } } looked_up: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; } /* * Look up and maybe create and open the last component. * * Must be called with i_mutex held on parent. * * Returns 0 if the file was successfully atomically created (if necessary) and * opened. In this case the file will be returned attached to @file. * * Returns 1 if the file was not completely opened at this time, though lookups * and creations will have been performed and the dentry returned in @path will * be positive upon return if O_CREAT was specified. If O_CREAT wasn't * specified then a negative dentry may be returned. * * An error code is returned otherwise. * * FILE_CREATE will be set in @*opened if the dentry was created and will be * cleared otherwise prior to returning. */ static int lookup_open(struct nameidata *nd, struct path *path, struct file *file, const struct open_flags *op, bool got_write, int *opened) { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; struct dentry *dentry; int error; bool need_lookup = false; *opened &= ~FILE_CREATED; dentry = lookup_dcache(&nd->last, dir, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!dentry) { dentry = d_alloc(dir, &nd->last); if (unlikely(!dentry)) return -ENOMEM; need_lookup = true; } else if (dentry->d_inode) { /* Cached positive dentry: will open in f_op->open */ goto out_no_open; } if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { return atomic_open(nd, dentry, path, file, op, got_write, need_lookup, opened); } if (need_lookup) { BUG_ON(dentry->d_inode); dentry = lookup_real(dir_inode, dentry, nd->flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); } /* Negative dentry, just create the file */ if (!dentry->d_inode && (op->open_flag & O_CREAT)) { umode_t mode = op->mode; if (!IS_POSIXACL(dir->d_inode)) mode &= ~current_umask(); /* * This write is needed to ensure that a * rw->ro transition does not occur between * the time when the file is created and when * a permanent write count is taken through * the 'struct file' in finish_open(). */ if (!got_write) { error = -EROFS; goto out_dput; } *opened |= FILE_CREATED; error = security_path_mknod(&nd->path, dentry, mode, 0); if (error) goto out_dput; error = vfs_create(dir->d_inode, dentry, mode, nd->flags & LOOKUP_EXCL); if (error) goto out_dput; } out_no_open: path->dentry = dentry; path->mnt = nd->path.mnt; return 1; out_dput: dput(dentry); return error; } /* * Handle the last step of open() */ static int do_last(struct nameidata *nd, struct file *file, const struct open_flags *op, int *opened) { struct dentry *dir = nd->path.dentry; int open_flag = op->open_flag; bool will_truncate = (open_flag & O_TRUNC) != 0; bool got_write = false; int acc_mode = op->acc_mode; unsigned seq; struct inode *inode; struct path save_parent = { .dentry = NULL, .mnt = NULL }; struct path path; bool retried = false; int error; nd->flags &= ~LOOKUP_PARENT; nd->flags |= op->intent; if (nd->last_type != LAST_NORM) { error = handle_dots(nd, nd->last_type); if (unlikely(error)) return error; goto finish_open; } if (!(open_flag & O_CREAT)) { if (nd->last.name[nd->last.len]) nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; /* we _can_ be in RCU mode here */ error = lookup_fast(nd, &path, &inode, &seq); if (likely(error > 0)) goto finish_lookup; if (error < 0) return error; BUG_ON(nd->inode != dir->d_inode); BUG_ON(nd->flags & LOOKUP_RCU); } else { /* create side of things */ /* * This will *only* deal with leaving RCU mode - LOOKUP_JUMPED * has been cleared when we got to the last component we are * about to look up */ error = complete_walk(nd); if (error) return error; audit_inode(nd->name, dir, LOOKUP_PARENT); /* trailing slashes? */ if (unlikely(nd->last.name[nd->last.len])) return -EISDIR; } retry_lookup: if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { error = mnt_want_write(nd->path.mnt); if (!error) got_write = true; /* * do _not_ fail yet - we might not need that or fail with * a different error; let lookup_open() decide; we'll be * dropping this one anyway. */ } inode_lock(dir->d_inode); error = lookup_open(nd, &path, file, op, got_write, opened); inode_unlock(dir->d_inode); if (error <= 0) { if (error) goto out; if ((*opened & FILE_CREATED) || !S_ISREG(file_inode(file)->i_mode)) will_truncate = false; audit_inode(nd->name, file->f_path.dentry, 0); goto opened; } if (*opened & FILE_CREATED) { /* Don't check for write permission, don't truncate */ open_flag &= ~O_TRUNC; will_truncate = false; acc_mode = 0; path_to_nameidata(&path, nd); goto finish_open_created; } /* * If atomic_open() acquired write access it is dropped now due to * possible mount and symlink following (this might be optimized away if * necessary...) */ if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } if (unlikely(d_is_negative(path.dentry))) { path_to_nameidata(&path, nd); return -ENOENT; } /* * create/update audit record if it already exists. */ audit_inode(nd->name, path.dentry, 0); if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) { path_to_nameidata(&path, nd); return -EEXIST; } error = follow_managed(&path, nd); if (unlikely(error < 0)) return error; seq = 0; /* out of RCU mode, so the value doesn't matter */ inode = d_backing_inode(path.dentry); finish_lookup: if (nd->depth) put_link(nd); error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW, inode, seq); if (unlikely(error)) return error; if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) { path_to_nameidata(&path, nd); } else { save_parent.dentry = nd->path.dentry; save_parent.mnt = mntget(path.mnt); nd->path.dentry = path.dentry; } nd->inode = inode; nd->seq = seq; /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ finish_open: error = complete_walk(nd); if (error) { path_put(&save_parent); return error; } audit_inode(nd->name, nd->path.dentry, 0); if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) { error = -ELOOP; goto out; } error = -EISDIR; if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) goto out; error = -ENOTDIR; if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry)) goto out; if (!d_is_reg(nd->path.dentry)) will_truncate = false; if (will_truncate) { error = mnt_want_write(nd->path.mnt); if (error) goto out; got_write = true; } finish_open_created: if (likely(!(open_flag & O_PATH))) { error = may_open(&nd->path, acc_mode, open_flag); if (error) goto out; } BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ error = vfs_open(&nd->path, file, current_cred()); if (!error) { *opened |= FILE_OPENED; } else { if (error == -EOPENSTALE) goto stale_open; goto out; } opened: error = open_check_o_direct(file); if (error) goto exit_fput; error = ima_file_check(file, op->acc_mode, *opened); if (error) goto exit_fput; if (will_truncate) { error = handle_truncate(file); if (error) goto exit_fput; } out: if (unlikely(error > 0)) { WARN_ON(1); error = -EINVAL; } if (got_write) mnt_drop_write(nd->path.mnt); path_put(&save_parent); return error; exit_fput: fput(file); goto out; stale_open: /* If no saved parent or already retried then can't retry */ if (!save_parent.dentry || retried) goto out; BUG_ON(save_parent.dentry != dir); path_put(&nd->path); nd->path = save_parent; nd->inode = dir->d_inode; save_parent.mnt = NULL; save_parent.dentry = NULL; if (got_write) { mnt_drop_write(nd->path.mnt); got_write = false; } retried = true; goto retry_lookup; } static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child; struct inode *dir; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) return error; error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; dir = path.dentry->d_inode; /* we want directory to be writable */ error = inode_permission(dir, MAY_WRITE | MAY_EXEC); if (error) goto out2; if (!dir->i_op->tmpfile) { error = -EOPNOTSUPP; goto out2; } child = d_alloc(path.dentry, &name); if (unlikely(!child)) { error = -ENOMEM; goto out2; } dput(path.dentry); path.dentry = child; error = dir->i_op->tmpfile(dir, child, op->mode); if (error) goto out2; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); if (error) goto out2; file->f_path.mnt = path.mnt; error = finish_open(file, child, NULL, opened); if (error) goto out2; error = open_check_o_direct(file); if (error) { fput(file); } else if (!(op->open_flag & O_EXCL)) { struct inode *inode = file_inode(file); spin_lock(&inode->i_lock); inode->i_state |= I_LINKABLE; spin_unlock(&inode->i_lock); } out2: mnt_drop_write(path.mnt); out: path_put(&path); return error; } static struct file *path_openat(struct nameidata *nd, const struct open_flags *op, unsigned flags) { const char *s; struct file *file; int opened = 0; int error; file = get_empty_filp(); if (IS_ERR(file)) return file; file->f_flags = op->open_flag; if (unlikely(file->f_flags & __O_TMPFILE)) { error = do_tmpfile(nd, flags, op, file, &opened); goto out2; } s = path_init(nd, flags); if (IS_ERR(s)) { put_filp(file); return ERR_CAST(s); } while (!(error = link_path_walk(s, nd)) && (error = do_last(nd, file, op, &opened)) > 0) { nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL); s = trailing_symlink(nd); if (IS_ERR(s)) { error = PTR_ERR(s); break; } } terminate_walk(nd); out2: if (!(opened & FILE_OPENED)) { BUG_ON(!error); put_filp(file); } if (unlikely(error)) { if (error == -EOPENSTALE) { if (flags & LOOKUP_RCU) error = -ECHILD; else error = -ESTALE; } file = ERR_PTR(error); } return file; } struct file *do_filp_open(int dfd, struct filename *pathname, const struct open_flags *op) { struct nameidata nd; int flags = op->lookup_flags; struct file *filp; set_nameidata(&nd, dfd, pathname); filp = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(filp == ERR_PTR(-ECHILD))) filp = path_openat(&nd, op, flags); if (unlikely(filp == ERR_PTR(-ESTALE))) filp = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); return filp; } struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt, const char *name, const struct open_flags *op) { struct nameidata nd; struct file *file; struct filename *filename; int flags = op->lookup_flags | LOOKUP_ROOT; nd.root.mnt = mnt; nd.root.dentry = dentry; if (d_is_symlink(dentry) && op->intent & LOOKUP_OPEN) return ERR_PTR(-ELOOP); filename = getname_kernel(name); if (IS_ERR(filename)) return ERR_CAST(filename); set_nameidata(&nd, -1, filename); file = path_openat(&nd, op, flags | LOOKUP_RCU); if (unlikely(file == ERR_PTR(-ECHILD))) file = path_openat(&nd, op, flags); if (unlikely(file == ERR_PTR(-ESTALE))) file = path_openat(&nd, op, flags | LOOKUP_REVAL); restore_nameidata(); putname(filename); return file; } static struct dentry *filename_create(int dfd, struct filename *name, struct path *path, unsigned int lookup_flags) { struct dentry *dentry = ERR_PTR(-EEXIST); struct qstr last; int type; int err2; int error; bool is_dir = (lookup_flags & LOOKUP_DIRECTORY); /* * Note that only LOOKUP_REVAL and LOOKUP_DIRECTORY matter here. Any * other flags passed in are ignored! */ lookup_flags &= LOOKUP_REVAL; name = filename_parentat(dfd, name, lookup_flags, path, &last, &type); if (IS_ERR(name)) return ERR_CAST(name); /* * Yucky last component or no last component at all? * (foo/., foo/.., /////) */ if (unlikely(type != LAST_NORM)) goto out; /* don't fail immediately if it's r/o, at least try to report other errors */ err2 = mnt_want_write(path->mnt); /* * Do the final lookup. */ lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL; inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path->dentry, lookup_flags); if (IS_ERR(dentry)) goto unlock; error = -EEXIST; if (d_is_positive(dentry)) goto fail; /* * Special case - lookup gave negative, but... we had foo/bar/ * From the vfs_mknod() POV we just have a negative dentry - * all is fine. Let's be bastards - you had / on the end, you've * been asking for (non-existent) directory. -ENOENT for you. */ if (unlikely(!is_dir && last.name[last.len])) { error = -ENOENT; goto fail; } if (unlikely(err2)) { error = err2; goto fail; } putname(name); return dentry; fail: dput(dentry); dentry = ERR_PTR(error); unlock: inode_unlock(path->dentry->d_inode); if (!err2) mnt_drop_write(path->mnt); out: path_put(path); putname(name); return dentry; } struct dentry *kern_path_create(int dfd, const char *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname_kernel(pathname), path, lookup_flags); } EXPORT_SYMBOL(kern_path_create); void done_path_create(struct path *path, struct dentry *dentry) { dput(dentry); inode_unlock(path->dentry->d_inode); mnt_drop_write(path->mnt); path_put(path); } EXPORT_SYMBOL(done_path_create); inline struct dentry *user_path_create(int dfd, const char __user *pathname, struct path *path, unsigned int lookup_flags) { return filename_create(dfd, getname(pathname), path, lookup_flags); } EXPORT_SYMBOL(user_path_create); int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { int error = may_create(dir, dentry); if (error) return error; if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD)) return -EPERM; if (!dir->i_op->mknod) return -EPERM; error = devcgroup_inode_mknod(mode, dev); if (error) return error; error = security_inode_mknod(dir, dentry, mode, dev); if (error) return error; error = dir->i_op->mknod(dir, dentry, mode, dev); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mknod); static int may_mknod(umode_t mode) { switch (mode & S_IFMT) { case S_IFREG: case S_IFCHR: case S_IFBLK: case S_IFIFO: case S_IFSOCK: case 0: /* zero mode translates to S_IFREG */ return 0; case S_IFDIR: return -EPERM; default: return -EINVAL; } } SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, umode_t, mode, unsigned, dev) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = 0; error = may_mknod(mode); if (error) return error; retry: dentry = user_path_create(dfd, filename, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mknod(&path, dentry, mode, dev); if (error) goto out; switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, new_decode_dev(dev)); break; case S_IFIFO: case S_IFSOCK: error = vfs_mknod(path.dentry->d_inode,dentry,mode,0); break; } out: done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE3(mknod, const char __user *, filename, umode_t, mode, unsigned, dev) { return sys_mknodat(AT_FDCWD, filename, mode, dev); } int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int error = may_create(dir, dentry); unsigned max_links = dir->i_sb->s_max_links; if (error) return error; if (!dir->i_op->mkdir) return -EPERM; mode &= (S_IRWXUGO|S_ISVTX); error = security_inode_mkdir(dir, dentry, mode); if (error) return error; if (max_links && dir->i_nlink >= max_links) return -EMLINK; error = dir->i_op->mkdir(dir, dentry, mode); if (!error) fsnotify_mkdir(dir, dentry); return error; } EXPORT_SYMBOL(vfs_mkdir); SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, umode_t, mode) { struct dentry *dentry; struct path path; int error; unsigned int lookup_flags = LOOKUP_DIRECTORY; retry: dentry = user_path_create(dfd, pathname, &path, lookup_flags); if (IS_ERR(dentry)) return PTR_ERR(dentry); if (!IS_POSIXACL(path.dentry->d_inode)) mode &= ~current_umask(); error = security_path_mkdir(&path, dentry, mode); if (!error) error = vfs_mkdir(path.dentry->d_inode, dentry, mode); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE2(mkdir, const char __user *, pathname, umode_t, mode) { return sys_mkdirat(AT_FDCWD, pathname, mode); } int vfs_rmdir(struct inode *dir, struct dentry *dentry) { int error = may_delete(dir, dentry, 1); if (error) return error; if (!dir->i_op->rmdir) return -EPERM; dget(dentry); inode_lock(dentry->d_inode); error = -EBUSY; if (is_local_mountpoint(dentry)) goto out; error = security_inode_rmdir(dir, dentry); if (error) goto out; shrink_dcache_parent(dentry); error = dir->i_op->rmdir(dir, dentry); if (error) goto out; dentry->d_inode->i_flags |= S_DEAD; dont_mount(dentry); detach_mounts(dentry); out: inode_unlock(dentry->d_inode); dput(dentry); if (!error) d_delete(dentry); return error; } EXPORT_SYMBOL(vfs_rmdir); static long do_rmdir(int dfd, const char __user *pathname) { int error = 0; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: name = user_path_parent(dfd, pathname, &path, &last, &type, lookup_flags); if (IS_ERR(name)) return PTR_ERR(name); switch (type) { case LAST_DOTDOT: error = -ENOTEMPTY; goto exit1; case LAST_DOT: error = -EINVAL; goto exit1; case LAST_ROOT: error = -EBUSY; goto exit1; } error = mnt_want_write(path.mnt); if (error) goto exit1; inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto exit2; if (!dentry->d_inode) { error = -ENOENT; goto exit3; } error = security_path_rmdir(&path, dentry); if (error) goto exit3; error = vfs_rmdir(path.dentry->d_inode, dentry); exit3: dput(dentry); exit2: inode_unlock(path.dentry->d_inode); mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { return do_rmdir(AT_FDCWD, pathname); } /** * vfs_unlink - unlink a filesystem object * @dir: parent directory * @dentry: victim * @delegated_inode: returns victim inode, if the inode is delegated. * * The caller must hold dir->i_mutex. * * If vfs_unlink discovers a delegation, it will return -EWOULDBLOCK and * return a reference to the inode in delegated_inode. The caller * should then break the delegation on that inode and retry. Because * breaking a delegation may take a long time, the caller should drop * dir->i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_unlink(struct inode *dir, struct dentry *dentry, struct inode **delegated_inode) { struct inode *target = dentry->d_inode; int error = may_delete(dir, dentry, 0); if (error) return error; if (!dir->i_op->unlink) return -EPERM; inode_lock(target); if (is_local_mountpoint(dentry)) error = -EBUSY; else { error = security_inode_unlink(dir, dentry); if (!error) { error = try_break_deleg(target, delegated_inode); if (error) goto out; error = dir->i_op->unlink(dir, dentry); if (!error) { dont_mount(dentry); detach_mounts(dentry); } } } out: inode_unlock(target); /* We don't d_delete() NFS sillyrenamed files--they still exist. */ if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) { fsnotify_link_count(target); d_delete(dentry); } return error; } EXPORT_SYMBOL(vfs_unlink); /* * Make sure that the actual truncation of the file will occur outside its * directory's i_mutex. Truncate can take a long time if there is a lot of * writeout happening, and we don't want to prevent access to the directory * while waiting on the I/O. */ static long do_unlinkat(int dfd, const char __user *pathname) { int error; struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; struct inode *inode = NULL; struct inode *delegated_inode = NULL; unsigned int lookup_flags = 0; retry: name = user_path_parent(dfd, pathname, &path, &last, &type, lookup_flags); if (IS_ERR(name)) return PTR_ERR(name); error = -EISDIR; if (type != LAST_NORM) goto exit1; error = mnt_want_write(path.mnt); if (error) goto exit1; retry_deleg: inode_lock_nested(path.dentry->d_inode, I_MUTEX_PARENT); dentry = __lookup_hash(&last, path.dentry, lookup_flags); error = PTR_ERR(dentry); if (!IS_ERR(dentry)) { /* Why not before? Because we want correct error value */ if (last.name[last.len]) goto slashes; inode = dentry->d_inode; if (d_is_negative(dentry)) goto slashes; ihold(inode); error = security_path_unlink(&path, dentry); if (error) goto exit2; error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode); exit2: dput(dentry); } inode_unlock(path.dentry->d_inode); if (inode) iput(inode); /* truncate the inode here */ inode = NULL; if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(path.mnt); exit1: path_put(&path); putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; inode = NULL; goto retry; } return error; slashes: if (d_is_negative(dentry)) error = -ENOENT; else if (d_is_dir(dentry)) error = -EISDIR; else error = -ENOTDIR; goto exit2; } SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) { if ((flag & ~AT_REMOVEDIR) != 0) return -EINVAL; if (flag & AT_REMOVEDIR) return do_rmdir(dfd, pathname); return do_unlinkat(dfd, pathname); } SYSCALL_DEFINE1(unlink, const char __user *, pathname) { return do_unlinkat(AT_FDCWD, pathname); } int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->symlink) return -EPERM; error = security_inode_symlink(dir, dentry, oldname); if (error) return error; error = dir->i_op->symlink(dir, dentry, oldname); if (!error) fsnotify_create(dir, dentry); return error; } EXPORT_SYMBOL(vfs_symlink); SYSCALL_DEFINE3(symlinkat, const char __user *, oldname, int, newdfd, const char __user *, newname) { int error; struct filename *from; struct dentry *dentry; struct path path; unsigned int lookup_flags = 0; from = getname(oldname); if (IS_ERR(from)) return PTR_ERR(from); retry: dentry = user_path_create(newdfd, newname, &path, lookup_flags); error = PTR_ERR(dentry); if (IS_ERR(dentry)) goto out_putname; error = security_path_symlink(&path, dentry, from->name); if (!error) error = vfs_symlink(path.dentry->d_inode, dentry, from->name); done_path_create(&path, dentry); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } out_putname: putname(from); return error; } SYSCALL_DEFINE2(symlink, const char __user *, oldname, const char __user *, newname) { return sys_symlinkat(oldname, AT_FDCWD, newname); } /** * vfs_link - create a new link * @old_dentry: object to be linked * @dir: new parent * @new_dentry: where to create the new link * @delegated_inode: returns inode needing a delegation break * * The caller must hold dir->i_mutex * * If vfs_link discovers a delegation on the to-be-linked file in need * of breaking, it will return -EWOULDBLOCK and return a reference to the * inode in delegated_inode. The caller should then break the delegation * and retry. Because breaking a delegation may take a long time, the * caller should drop the i_mutex before doing so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. */ int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry, struct inode **delegated_inode) { struct inode *inode = old_dentry->d_inode; unsigned max_links = dir->i_sb->s_max_links; int error; if (!inode) return -ENOENT; error = may_create(dir, new_dentry); if (error) return error; if (dir->i_sb != inode->i_sb) return -EXDEV; /* * A link to an append-only or immutable file cannot be created. */ if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) return -EPERM; if (!dir->i_op->link) return -EPERM; if (S_ISDIR(inode->i_mode)) return -EPERM; error = security_inode_link(old_dentry, dir, new_dentry); if (error) return error; inode_lock(inode); /* Make sure we don't allow creating hardlink to an unlinked file */ if (inode->i_nlink == 0 && !(inode->i_state & I_LINKABLE)) error = -ENOENT; else if (max_links && inode->i_nlink >= max_links) error = -EMLINK; else { error = try_break_deleg(inode, delegated_inode); if (!error) error = dir->i_op->link(old_dentry, dir, new_dentry); } if (!error && (inode->i_state & I_LINKABLE)) { spin_lock(&inode->i_lock); inode->i_state &= ~I_LINKABLE; spin_unlock(&inode->i_lock); } inode_unlock(inode); if (!error) fsnotify_link(dir, inode, new_dentry); return error; } EXPORT_SYMBOL(vfs_link); /* * Hardlinks are often used in delicate situations. We avoid * security-related surprises by not following symlinks on the * newname. --KAB * * We don't follow them on the oldname either to be compatible * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, int, flags) { struct dentry *new_dentry; struct path old_path, new_path; struct inode *delegated_inode = NULL; int how = 0; int error; if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) return -EINVAL; /* * To use null names we require CAP_DAC_READ_SEARCH * This ensures that not everyone will be able to create * handlink using the passed filedescriptor. */ if (flags & AT_EMPTY_PATH) { if (!capable(CAP_DAC_READ_SEARCH)) return -ENOENT; how = LOOKUP_EMPTY; } if (flags & AT_SYMLINK_FOLLOW) how |= LOOKUP_FOLLOW; retry: error = user_path_at(olddfd, oldname, how, &old_path); if (error) return error; new_dentry = user_path_create(newdfd, newname, &new_path, (how & LOOKUP_REVAL)); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto out; error = -EXDEV; if (old_path.mnt != new_path.mnt) goto out_dput; error = may_linkat(&old_path); if (unlikely(error)) goto out_dput; error = security_path_link(old_path.dentry, &new_path, new_dentry); if (error) goto out_dput; error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode); out_dput: done_path_create(&new_path, new_dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) { path_put(&old_path); goto retry; } } if (retry_estale(error, how)) { path_put(&old_path); how |= LOOKUP_REVAL; goto retry; } out: path_put(&old_path); return error; } SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname) { return sys_linkat(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } /** * vfs_rename - rename a filesystem object * @old_dir: parent of source * @old_dentry: source * @new_dir: parent of destination * @new_dentry: destination * @delegated_inode: returns an inode needing a delegation break * @flags: rename flags * * The caller must hold multiple mutexes--see lock_rename()). * * If vfs_rename discovers a delegation in need of breaking at either * the source or destination, it will return -EWOULDBLOCK and return a * reference to the inode in delegated_inode. The caller should then * break the delegation and retry. Because breaking a delegation may * take a long time, the caller should drop all locks before doing * so. * * Alternatively, a caller may pass NULL for delegated_inode. This may * be appropriate for callers that expect the underlying filesystem not * to be NFS exported. * * The worst of all namespace operations - renaming directory. "Perverted" * doesn't even start to describe it. Somebody in UCB had a heck of a trip... * Problems: * a) we can get into loop creation. * b) race potential - two innocent renames can create a loop together. * That's where 4.4 screws up. Current fix: serialization on * sb->s_vfs_rename_mutex. We might be more accurate, but that's another * story. * c) we have to lock _four_ objects - parents and victim (if it exists), * and source (if it is not a directory). * And that - after we got ->i_mutex on parents (until then we don't know * whether the target exists). Solution: try to be smart with locking * order for inodes. We rely on the fact that tree topology may change * only under ->s_vfs_rename_mutex _and_ that parent of the object we * move will be locked. Thus we can rank directories by the tree * (ancestors first) and rank all non-directories after them. * That works since everybody except rename does "lock parent, lookup, * lock child" and rename is under ->s_vfs_rename_mutex. * HOWEVER, it relies on the assumption that any object with ->lookup() * has no more than 1 dentry. If "hybrid" objects will ever appear, * we'd better make sure that there's no link(2) for them. * d) conversion from fhandle to dentry may come in the wrong moment - when * we are removing the target. Solution: we will have to grab ->i_mutex * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on * ->i_mutex on parents, which works but leads to some truly excessive * locking]. */ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, struct inode **delegated_inode, unsigned int flags) { int error; bool is_dir = d_is_dir(old_dentry); const unsigned char *old_name; struct inode *source = old_dentry->d_inode; struct inode *target = new_dentry->d_inode; bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; /* * Check source == target. * On overlayfs need to look at underlying inodes. */ if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0)) return 0; error = may_delete(old_dir, old_dentry, is_dir); if (error) return error; if (!target) { error = may_create(new_dir, new_dentry); } else { new_is_dir = d_is_dir(new_dentry); if (!(flags & RENAME_EXCHANGE)) error = may_delete(new_dir, new_dentry, is_dir); else error = may_delete(new_dir, new_dentry, new_is_dir); } if (error) return error; if (!old_dir->i_op->rename && !old_dir->i_op->rename2) return -EPERM; if (flags && !old_dir->i_op->rename2) return -EINVAL; /* * If we are going to change the parent - check write permissions, * we'll need to flip '..'. */ if (new_dir != old_dir) { if (is_dir) { error = inode_permission(source, MAY_WRITE); if (error) return error; } if ((flags & RENAME_EXCHANGE) && new_is_dir) { error = inode_permission(target, MAY_WRITE); if (error) return error; } } error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry, flags); if (error) return error; old_name = fsnotify_oldname_init(old_dentry->d_name.name); dget(new_dentry); if (!is_dir || (flags & RENAME_EXCHANGE)) lock_two_nondirectories(source, target); else if (target) inode_lock(target); error = -EBUSY; if (is_local_mountpoint(old_dentry) || is_local_mountpoint(new_dentry)) goto out; if (max_links && new_dir != old_dir) { error = -EMLINK; if (is_dir && !new_is_dir && new_dir->i_nlink >= max_links) goto out; if ((flags & RENAME_EXCHANGE) && !is_dir && new_is_dir && old_dir->i_nlink >= max_links) goto out; } if (is_dir && !(flags & RENAME_EXCHANGE) && target) shrink_dcache_parent(new_dentry); if (!is_dir) { error = try_break_deleg(source, delegated_inode); if (error) goto out; } if (target && !new_is_dir) { error = try_break_deleg(target, delegated_inode); if (error) goto out; } if (!old_dir->i_op->rename2) { error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); } else { WARN_ON(old_dir->i_op->rename != NULL); error = old_dir->i_op->rename2(old_dir, old_dentry, new_dir, new_dentry, flags); } if (error) goto out; if (!(flags & RENAME_EXCHANGE) && target) { if (is_dir) target->i_flags |= S_DEAD; dont_mount(new_dentry); detach_mounts(new_dentry); } if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) { if (!(flags & RENAME_EXCHANGE)) d_move(old_dentry, new_dentry); else d_exchange(old_dentry, new_dentry); } out: if (!is_dir || (flags & RENAME_EXCHANGE)) unlock_two_nondirectories(source, target); else if (target) inode_unlock(target); dput(new_dentry); if (!error) { fsnotify_move(old_dir, new_dir, old_name, is_dir, !(flags & RENAME_EXCHANGE) ? target : NULL, old_dentry); if (flags & RENAME_EXCHANGE) { fsnotify_move(new_dir, old_dir, old_dentry->d_name.name, new_is_dir, NULL, new_dentry); } } fsnotify_oldname_free(old_name); return error; } EXPORT_SYMBOL(vfs_rename); SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags) { struct dentry *old_dentry, *new_dentry; struct dentry *trap; struct path old_path, new_path; struct qstr old_last, new_last; int old_type, new_type; struct inode *delegated_inode = NULL; struct filename *from; struct filename *to; unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET; bool should_retry = false; int error; if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) && (flags & RENAME_EXCHANGE)) return -EINVAL; if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD)) return -EPERM; if (flags & RENAME_EXCHANGE) target_flags = 0; retry: from = user_path_parent(olddfd, oldname, &old_path, &old_last, &old_type, lookup_flags); if (IS_ERR(from)) { error = PTR_ERR(from); goto exit; } to = user_path_parent(newdfd, newname, &new_path, &new_last, &new_type, lookup_flags); if (IS_ERR(to)) { error = PTR_ERR(to); goto exit1; } error = -EXDEV; if (old_path.mnt != new_path.mnt) goto exit2; error = -EBUSY; if (old_type != LAST_NORM) goto exit2; if (flags & RENAME_NOREPLACE) error = -EEXIST; if (new_type != LAST_NORM) goto exit2; error = mnt_want_write(old_path.mnt); if (error) goto exit2; retry_deleg: trap = lock_rename(new_path.dentry, old_path.dentry); old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags); error = PTR_ERR(old_dentry); if (IS_ERR(old_dentry)) goto exit3; /* source must exist */ error = -ENOENT; if (d_is_negative(old_dentry)) goto exit4; new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags); error = PTR_ERR(new_dentry); if (IS_ERR(new_dentry)) goto exit4; error = -EEXIST; if ((flags & RENAME_NOREPLACE) && d_is_positive(new_dentry)) goto exit5; if (flags & RENAME_EXCHANGE) { error = -ENOENT; if (d_is_negative(new_dentry)) goto exit5; if (!d_is_dir(new_dentry)) { error = -ENOTDIR; if (new_last.name[new_last.len]) goto exit5; } } /* unless the source is a directory trailing slashes give -ENOTDIR */ if (!d_is_dir(old_dentry)) { error = -ENOTDIR; if (old_last.name[old_last.len]) goto exit5; if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len]) goto exit5; } /* source should not be ancestor of target */ error = -EINVAL; if (old_dentry == trap) goto exit5; /* target should not be an ancestor of source */ if (!(flags & RENAME_EXCHANGE)) error = -ENOTEMPTY; if (new_dentry == trap) goto exit5; error = security_path_rename(&old_path, old_dentry, &new_path, new_dentry, flags); if (error) goto exit5; error = vfs_rename(old_path.dentry->d_inode, old_dentry, new_path.dentry->d_inode, new_dentry, &delegated_inode, flags); exit5: dput(new_dentry); exit4: dput(old_dentry); exit3: unlock_rename(new_path.dentry, old_path.dentry); if (delegated_inode) { error = break_deleg_wait(&delegated_inode); if (!error) goto retry_deleg; } mnt_drop_write(old_path.mnt); exit2: if (retry_estale(error, lookup_flags)) should_retry = true; path_put(&new_path); putname(to); exit1: path_put(&old_path); putname(from); if (should_retry) { should_retry = false; lookup_flags |= LOOKUP_REVAL; goto retry; } exit: return error; } SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname) { return sys_renameat2(olddfd, oldname, newdfd, newname, 0); } SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newname) { return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); } int vfs_whiteout(struct inode *dir, struct dentry *dentry) { int error = may_create(dir, dentry); if (error) return error; if (!dir->i_op->mknod) return -EPERM; return dir->i_op->mknod(dir, dentry, S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV); } EXPORT_SYMBOL(vfs_whiteout); int readlink_copy(char __user *buffer, int buflen, const char *link) { int len = PTR_ERR(link); if (IS_ERR(link)) goto out; len = strlen(link); if (len > (unsigned) buflen) len = buflen; if (copy_to_user(buffer, link, len)) len = -EFAULT; out: return len; } EXPORT_SYMBOL(readlink_copy); /* * A helper for ->readlink(). This should be used *ONLY* for symlinks that * have ->get_link() not calling nd_jump_link(). Using (or not using) it * for any given inode is up to filesystem. */ int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); struct inode *inode = d_inode(dentry); const char *link = inode->i_link; int res; if (!link) { link = inode->i_op->get_link(dentry, inode, &done); if (IS_ERR(link)) return PTR_ERR(link); } res = readlink_copy(buffer, buflen, link); do_delayed_call(&done); return res; } EXPORT_SYMBOL(generic_readlink); /* get the link contents into pagecache */ const char *page_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *callback) { char *kaddr; struct page *page; struct address_space *mapping = inode->i_mapping; if (!dentry) { page = find_get_page(mapping, 0); if (!page) return ERR_PTR(-ECHILD); if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-ECHILD); } } else { page = read_mapping_page(mapping, 0, NULL); if (IS_ERR(page)) return (char*)page; } set_delayed_call(callback, page_put_link, page); BUG_ON(mapping_gfp_mask(mapping) & __GFP_HIGHMEM); kaddr = page_address(page); nd_terminate_link(kaddr, inode->i_size, PAGE_SIZE - 1); return kaddr; } EXPORT_SYMBOL(page_get_link); void page_put_link(void *arg) { put_page(arg); } EXPORT_SYMBOL(page_put_link); int page_readlink(struct dentry *dentry, char __user *buffer, int buflen) { DEFINE_DELAYED_CALL(done); int res = readlink_copy(buffer, buflen, page_get_link(dentry, d_inode(dentry), &done)); do_delayed_call(&done); return res; } EXPORT_SYMBOL(page_readlink); /* * The nofs argument instructs pagecache_write_begin to pass AOP_FLAG_NOFS */ int __page_symlink(struct inode *inode, const char *symname, int len, int nofs) { struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; int err; unsigned int flags = AOP_FLAG_UNINTERRUPTIBLE; if (nofs) flags |= AOP_FLAG_NOFS; retry: err = pagecache_write_begin(NULL, mapping, 0, len-1, flags, &page, &fsdata); if (err) goto fail; memcpy(page_address(page), symname, len-1); err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, page, fsdata); if (err < 0) goto fail; if (err < len-1) goto retry; mark_inode_dirty(inode); return 0; fail: return err; } EXPORT_SYMBOL(__page_symlink); int page_symlink(struct inode *inode, const char *symname, int len) { return __page_symlink(inode, symname, len, !mapping_gfp_constraint(inode->i_mapping, __GFP_FS)); } EXPORT_SYMBOL(page_symlink); const struct inode_operations page_symlink_inode_operations = { .readlink = generic_readlink, .get_link = page_get_link, }; EXPORT_SYMBOL(page_symlink_inode_operations);
./CrossVul/dataset_final_sorted/CWE-284/c/good_5199_0
crossvul-cpp_data_good_2409_0
/* * linux/fs/namespace.c * * (C) Copyright Al Viro 2000, 2001 * Released under GPL v2. * * Based on code from fs/super.c, copyright Linus Torvalds and others. * Heavily rewritten. */ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/capability.h> #include <linux/mnt_namespace.h> #include <linux/user_namespace.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/idr.h> #include <linux/init.h> /* init_rootfs */ #include <linux/fs_struct.h> /* get_fs_root et.al. */ #include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */ #include <linux/uaccess.h> #include <linux/proc_ns.h> #include <linux/magic.h> #include <linux/bootmem.h> #include <linux/task_work.h> #include "pnode.h" #include "internal.h" static unsigned int m_hash_mask __read_mostly; static unsigned int m_hash_shift __read_mostly; static unsigned int mp_hash_mask __read_mostly; static unsigned int mp_hash_shift __read_mostly; static __initdata unsigned long mhash_entries; static int __init set_mhash_entries(char *str) { if (!str) return 0; mhash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("mhash_entries=", set_mhash_entries); static __initdata unsigned long mphash_entries; static int __init set_mphash_entries(char *str) { if (!str) return 0; mphash_entries = simple_strtoul(str, &str, 0); return 1; } __setup("mphash_entries=", set_mphash_entries); static u64 event; static DEFINE_IDA(mnt_id_ida); static DEFINE_IDA(mnt_group_ida); static DEFINE_SPINLOCK(mnt_id_lock); static int mnt_id_start = 0; static int mnt_group_start = 1; static struct hlist_head *mount_hashtable __read_mostly; static struct hlist_head *mountpoint_hashtable __read_mostly; static struct kmem_cache *mnt_cache __read_mostly; static DECLARE_RWSEM(namespace_sem); /* /sys/fs */ struct kobject *fs_kobj; EXPORT_SYMBOL_GPL(fs_kobj); /* * vfsmount lock may be taken for read to prevent changes to the * vfsmount hash, ie. during mountpoint lookups or walking back * up the tree. * * It should be taken for write in all cases where the vfsmount * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry) { unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES); tmp += ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> m_hash_shift); return &mount_hashtable[tmp & m_hash_mask]; } static inline struct hlist_head *mp_hash(struct dentry *dentry) { unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); tmp = tmp + (tmp >> mp_hash_shift); return &mountpoint_hashtable[tmp & mp_hash_mask]; } /* * allocation is serialized by namespace_sem, but we need the spinlock to * serialize with freeing. */ static int mnt_alloc_id(struct mount *mnt) { int res; retry: ida_pre_get(&mnt_id_ida, GFP_KERNEL); spin_lock(&mnt_id_lock); res = ida_get_new_above(&mnt_id_ida, mnt_id_start, &mnt->mnt_id); if (!res) mnt_id_start = mnt->mnt_id + 1; spin_unlock(&mnt_id_lock); if (res == -EAGAIN) goto retry; return res; } static void mnt_free_id(struct mount *mnt) { int id = mnt->mnt_id; spin_lock(&mnt_id_lock); ida_remove(&mnt_id_ida, id); if (mnt_id_start > id) mnt_id_start = id; spin_unlock(&mnt_id_lock); } /* * Allocate a new peer group ID * * mnt_group_ida is protected by namespace_sem */ static int mnt_alloc_group_id(struct mount *mnt) { int res; if (!ida_pre_get(&mnt_group_ida, GFP_KERNEL)) return -ENOMEM; res = ida_get_new_above(&mnt_group_ida, mnt_group_start, &mnt->mnt_group_id); if (!res) mnt_group_start = mnt->mnt_group_id + 1; return res; } /* * Release a peer group ID */ void mnt_release_group_id(struct mount *mnt) { int id = mnt->mnt_group_id; ida_remove(&mnt_group_ida, id); if (mnt_group_start > id) mnt_group_start = id; mnt->mnt_group_id = 0; } /* * vfsmount lock must be held for read */ static inline void mnt_add_count(struct mount *mnt, int n) { #ifdef CONFIG_SMP this_cpu_add(mnt->mnt_pcp->mnt_count, n); #else preempt_disable(); mnt->mnt_count += n; preempt_enable(); #endif } /* * vfsmount lock must be held for write */ unsigned int mnt_get_count(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_count; } return count; #else return mnt->mnt_count; #endif } static void drop_mountpoint(struct fs_pin *p) { struct mount *m = container_of(p, struct mount, mnt_umount); dput(m->mnt_ex_mountpoint); pin_remove(p); mntput(&m->mnt); } static struct mount *alloc_vfsmnt(const char *name) { struct mount *mnt = kmem_cache_zalloc(mnt_cache, GFP_KERNEL); if (mnt) { int err; err = mnt_alloc_id(mnt); if (err) goto out_free_cache; if (name) { mnt->mnt_devname = kstrdup_const(name, GFP_KERNEL); if (!mnt->mnt_devname) goto out_free_id; } #ifdef CONFIG_SMP mnt->mnt_pcp = alloc_percpu(struct mnt_pcp); if (!mnt->mnt_pcp) goto out_free_devname; this_cpu_add(mnt->mnt_pcp->mnt_count, 1); #else mnt->mnt_count = 1; mnt->mnt_writers = 0; #endif INIT_HLIST_NODE(&mnt->mnt_hash); INIT_LIST_HEAD(&mnt->mnt_child); INIT_LIST_HEAD(&mnt->mnt_mounts); INIT_LIST_HEAD(&mnt->mnt_list); INIT_LIST_HEAD(&mnt->mnt_expire); INIT_LIST_HEAD(&mnt->mnt_share); INIT_LIST_HEAD(&mnt->mnt_slave_list); INIT_LIST_HEAD(&mnt->mnt_slave); INIT_HLIST_NODE(&mnt->mnt_mp_list); #ifdef CONFIG_FSNOTIFY INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks); #endif init_fs_pin(&mnt->mnt_umount, drop_mountpoint); } return mnt; #ifdef CONFIG_SMP out_free_devname: kfree_const(mnt->mnt_devname); #endif out_free_id: mnt_free_id(mnt); out_free_cache: kmem_cache_free(mnt_cache, mnt); return NULL; } /* * Most r/o checks on a fs are for operations that take * discrete amounts of time, like a write() or unlink(). * We must keep track of when those operations start * (for permission checks) and when they end, so that * we can determine when writes are able to occur to * a filesystem. */ /* * __mnt_is_readonly: check whether a mount is read-only * @mnt: the mount to check for its write status * * This shouldn't be used directly ouside of the VFS. * It does not guarantee that the filesystem will stay * r/w, just that it is right *now*. This can not and * should not be used in place of IS_RDONLY(inode). * mnt_want/drop_write() will _keep_ the filesystem * r/w. */ int __mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_flags & MNT_READONLY) return 1; if (mnt->mnt_sb->s_flags & MS_RDONLY) return 1; return 0; } EXPORT_SYMBOL_GPL(__mnt_is_readonly); static inline void mnt_inc_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_inc(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers++; #endif } static inline void mnt_dec_writers(struct mount *mnt) { #ifdef CONFIG_SMP this_cpu_dec(mnt->mnt_pcp->mnt_writers); #else mnt->mnt_writers--; #endif } static unsigned int mnt_get_writers(struct mount *mnt) { #ifdef CONFIG_SMP unsigned int count = 0; int cpu; for_each_possible_cpu(cpu) { count += per_cpu_ptr(mnt->mnt_pcp, cpu)->mnt_writers; } return count; #else return mnt->mnt_writers; #endif } static int mnt_is_readonly(struct vfsmount *mnt) { if (mnt->mnt_sb->s_readonly_remount) return 1; /* Order wrt setting s_flags/s_readonly_remount in do_remount() */ smp_rmb(); return __mnt_is_readonly(mnt); } /* * Most r/o & frozen checks on a fs are for operations that take discrete * amounts of time, like a write() or unlink(). We must keep track of when * those operations start (for permission checks) and when they end, so that we * can determine when writes are able to occur to a filesystem. */ /** * __mnt_want_write - get write access to a mount without freeze protection * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mnt it read-write) before * returning success. This operation does not protect against filesystem being * frozen. When the write operation is finished, __mnt_drop_write() must be * called. This is effectively a refcount. */ int __mnt_want_write(struct vfsmount *m) { struct mount *mnt = real_mount(m); int ret = 0; preempt_disable(); mnt_inc_writers(mnt); /* * The store to mnt_inc_writers must be visible before we pass * MNT_WRITE_HOLD loop below, so that the slowpath can see our * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) cpu_relax(); /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until * MNT_WRITE_HOLD is cleared. */ smp_rmb(); if (mnt_is_readonly(m)) { mnt_dec_writers(mnt); ret = -EROFS; } preempt_enable(); return ret; } /** * mnt_want_write - get write access to a mount * @m: the mount on which to take a write * * This tells the low-level filesystem that a write is about to be performed to * it, and makes sure that writes are allowed (mount is read-write, filesystem * is not frozen) before returning success. When the write operation is * finished, mnt_drop_write() must be called. This is effectively a refcount. */ int mnt_want_write(struct vfsmount *m) { int ret; sb_start_write(m->mnt_sb); ret = __mnt_want_write(m); if (ret) sb_end_write(m->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write); /** * mnt_clone_write - get write access to a mount * @mnt: the mount on which to take a write * * This is effectively like mnt_want_write, except * it must only be used to take an extra write reference * on a mountpoint that we already know has a write reference * on it. This allows some optimisation. * * After finished, mnt_drop_write must be called as usual to * drop the reference. */ int mnt_clone_write(struct vfsmount *mnt) { /* superblock may be r/o */ if (__mnt_is_readonly(mnt)) return -EROFS; preempt_disable(); mnt_inc_writers(real_mount(mnt)); preempt_enable(); return 0; } EXPORT_SYMBOL_GPL(mnt_clone_write); /** * __mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like __mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int __mnt_want_write_file(struct file *file) { if (!(file->f_mode & FMODE_WRITER)) return __mnt_want_write(file->f_path.mnt); else return mnt_clone_write(file->f_path.mnt); } /** * mnt_want_write_file - get write access to a file's mount * @file: the file who's mount on which to take a write * * This is like mnt_want_write, but it takes a file and can * do some optimisations if the file is open for write already */ int mnt_want_write_file(struct file *file) { int ret; sb_start_write(file->f_path.mnt->mnt_sb); ret = __mnt_want_write_file(file); if (ret) sb_end_write(file->f_path.mnt->mnt_sb); return ret; } EXPORT_SYMBOL_GPL(mnt_want_write_file); /** * __mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done * performing writes to it. Must be matched with * __mnt_want_write() call above. */ void __mnt_drop_write(struct vfsmount *mnt) { preempt_disable(); mnt_dec_writers(real_mount(mnt)); preempt_enable(); } /** * mnt_drop_write - give up write access to a mount * @mnt: the mount on which to give up write access * * Tells the low-level filesystem that we are done performing writes to it and * also allows filesystem to be frozen again. Must be matched with * mnt_want_write() call above. */ void mnt_drop_write(struct vfsmount *mnt) { __mnt_drop_write(mnt); sb_end_write(mnt->mnt_sb); } EXPORT_SYMBOL_GPL(mnt_drop_write); void __mnt_drop_write_file(struct file *file) { __mnt_drop_write(file->f_path.mnt); } void mnt_drop_write_file(struct file *file) { mnt_drop_write(file->f_path.mnt); } EXPORT_SYMBOL(mnt_drop_write_file); static int mnt_make_readonly(struct mount *mnt) { int ret = 0; lock_mount_hash(); mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; /* * After storing MNT_WRITE_HOLD, we'll read the counters. This store * should be visible before we do. */ smp_mb(); /* * With writers on hold, if this value is zero, then there are * definitely no active writers (although held writers may subsequently * increment the count, they'll have to wait, and decrement it after * seeing MNT_READONLY). * * It is OK to have counter incremented on one CPU and decremented on * another: the sum will add up correctly. The danger would be when we * sum up each counter, if we read a counter before it is incremented, * but then read another CPU's count which it has been subsequently * decremented from -- we would see more decrements than we should. * MNT_WRITE_HOLD protects against this scenario, because * mnt_want_write first increments count, then smp_mb, then spins on * MNT_WRITE_HOLD, so it can't be decremented by another CPU while * we're counting up here. */ if (mnt_get_writers(mnt) > 0) ret = -EBUSY; else mnt->mnt.mnt_flags |= MNT_READONLY; /* * MNT_READONLY must become visible before ~MNT_WRITE_HOLD, so writers * that become unheld will see MNT_READONLY. */ smp_wmb(); mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; unlock_mount_hash(); return ret; } static void __mnt_unmake_readonly(struct mount *mnt) { lock_mount_hash(); mnt->mnt.mnt_flags &= ~MNT_READONLY; unlock_mount_hash(); } int sb_prepare_remount_readonly(struct super_block *sb) { struct mount *mnt; int err = 0; /* Racy optimization. Recheck the counter under MNT_WRITE_HOLD */ if (atomic_long_read(&sb->s_remove_count)) return -EBUSY; lock_mount_hash(); list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; smp_mb(); if (mnt_get_writers(mnt) > 0) { err = -EBUSY; break; } } } if (!err && atomic_long_read(&sb->s_remove_count)) err = -EBUSY; if (!err) { sb->s_readonly_remount = 1; smp_wmb(); } list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; } unlock_mount_hash(); return err; } static void free_vfsmnt(struct mount *mnt) { kfree_const(mnt->mnt_devname); #ifdef CONFIG_SMP free_percpu(mnt->mnt_pcp); #endif kmem_cache_free(mnt_cache, mnt); } static void delayed_free_vfsmnt(struct rcu_head *head) { free_vfsmnt(container_of(head, struct mount, mnt_rcu)); } /* call under rcu_read_lock */ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) { struct mount *mnt; if (read_seqretry(&mount_lock, seq)) return false; if (bastard == NULL) return true; mnt = real_mount(bastard); mnt_add_count(mnt, 1); if (likely(!read_seqretry(&mount_lock, seq))) return true; if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { mnt_add_count(mnt, -1); return false; } rcu_read_unlock(); mntput(bastard); rcu_read_lock(); return false; } /* * find the first mount at @dentry on vfsmount @mnt. * call under rcu_read_lock() */ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry) { struct hlist_head *head = m_hash(mnt, dentry); struct mount *p; hlist_for_each_entry_rcu(p, head, mnt_hash) if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) return p; return NULL; } /* * find the last mount at @dentry on vfsmount @mnt. * mount_lock must be held. */ struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) { struct mount *p, *res = NULL; p = __lookup_mnt(mnt, dentry); if (!p) goto out; if (!(p->mnt.mnt_flags & MNT_UMOUNT)) res = p; hlist_for_each_entry_continue(p, mnt_hash) { if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry) break; if (!(p->mnt.mnt_flags & MNT_UMOUNT)) res = p; } out: return res; } /* * lookup_mnt - Return the first child mount mounted at path * * "First" means first mounted chronologically. If you create the * following mounts: * * mount /dev/sda1 /mnt * mount /dev/sda2 /mnt * mount /dev/sda3 /mnt * * Then lookup_mnt() on the base /mnt dentry in the root mount will * return successively the root dentry and vfsmount of /dev/sda1, then * /dev/sda2, then /dev/sda3, then NULL. * * lookup_mnt takes a reference to the found vfsmount. */ struct vfsmount *lookup_mnt(struct path *path) { struct mount *child_mnt; struct vfsmount *m; unsigned seq; rcu_read_lock(); do { seq = read_seqbegin(&mount_lock); child_mnt = __lookup_mnt(path->mnt, path->dentry); m = child_mnt ? &child_mnt->mnt : NULL; } while (!legitimize_mnt(m, seq)); rcu_read_unlock(); return m; } /* * __is_local_mountpoint - Test to see if dentry is a mountpoint in the * current mount namespace. * * The common case is dentries are not mountpoints at all and that * test is handled inline. For the slow case when we are actually * dealing with a mountpoint of some kind, walk through all of the * mounts in the current mount namespace and test to see if the dentry * is a mountpoint. * * The mount_hashtable is not usable in the context because we * need to identify all mounts that may be in the current mount * namespace not just a mount that happens to have some specified * parent mount. */ bool __is_local_mountpoint(struct dentry *dentry) { struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct mount *mnt; bool is_covered = false; if (!d_mountpoint(dentry)) goto out; down_read(&namespace_sem); list_for_each_entry(mnt, &ns->list, mnt_list) { is_covered = (mnt->mnt_mountpoint == dentry); if (is_covered) break; } up_read(&namespace_sem); out: return is_covered; } static struct mountpoint *lookup_mountpoint(struct dentry *dentry) { struct hlist_head *chain = mp_hash(dentry); struct mountpoint *mp; hlist_for_each_entry(mp, chain, m_hash) { if (mp->m_dentry == dentry) { /* might be worth a WARN_ON() */ if (d_unlinked(dentry)) return ERR_PTR(-ENOENT); mp->m_count++; return mp; } } return NULL; } static struct mountpoint *new_mountpoint(struct dentry *dentry) { struct hlist_head *chain = mp_hash(dentry); struct mountpoint *mp; int ret; mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL); if (!mp) return ERR_PTR(-ENOMEM); ret = d_set_mounted(dentry); if (ret) { kfree(mp); return ERR_PTR(ret); } mp->m_dentry = dentry; mp->m_count = 1; hlist_add_head(&mp->m_hash, chain); INIT_HLIST_HEAD(&mp->m_list); return mp; } static void put_mountpoint(struct mountpoint *mp) { if (!--mp->m_count) { struct dentry *dentry = mp->m_dentry; BUG_ON(!hlist_empty(&mp->m_list)); spin_lock(&dentry->d_lock); dentry->d_flags &= ~DCACHE_MOUNTED; spin_unlock(&dentry->d_lock); hlist_del(&mp->m_hash); kfree(mp); } } static inline int check_mnt(struct mount *mnt) { return mnt->mnt_ns == current->nsproxy->mnt_ns; } /* * vfsmount lock must be held for write */ static void touch_mnt_namespace(struct mnt_namespace *ns) { if (ns) { ns->event = ++event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void __touch_mnt_namespace(struct mnt_namespace *ns) { if (ns && ns->event != event) { ns->event = event; wake_up_interruptible(&ns->poll); } } /* * vfsmount lock must be held for write */ static void unhash_mnt(struct mount *mnt) { mnt->mnt_parent = mnt; mnt->mnt_mountpoint = mnt->mnt.mnt_root; list_del_init(&mnt->mnt_child); hlist_del_init_rcu(&mnt->mnt_hash); hlist_del_init(&mnt->mnt_mp_list); put_mountpoint(mnt->mnt_mp); mnt->mnt_mp = NULL; } /* * vfsmount lock must be held for write */ static void detach_mnt(struct mount *mnt, struct path *old_path) { old_path->dentry = mnt->mnt_mountpoint; old_path->mnt = &mnt->mnt_parent->mnt; unhash_mnt(mnt); } /* * vfsmount lock must be held for write */ static void umount_mnt(struct mount *mnt) { /* old mountpoint will be dropped when we can do that */ mnt->mnt_ex_mountpoint = mnt->mnt_mountpoint; unhash_mnt(mnt); } /* * vfsmount lock must be held for write */ void mnt_set_mountpoint(struct mount *mnt, struct mountpoint *mp, struct mount *child_mnt) { mp->m_count++; mnt_add_count(mnt, 1); /* essentially, that's mntget */ child_mnt->mnt_mountpoint = dget(mp->m_dentry); child_mnt->mnt_parent = mnt; child_mnt->mnt_mp = mp; hlist_add_head(&child_mnt->mnt_mp_list, &mp->m_list); } /* * vfsmount lock must be held for write */ static void attach_mnt(struct mount *mnt, struct mount *parent, struct mountpoint *mp) { mnt_set_mountpoint(parent, mp, mnt); hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); } static void attach_shadowed(struct mount *mnt, struct mount *parent, struct mount *shadows) { if (shadows) { hlist_add_behind_rcu(&mnt->mnt_hash, &shadows->mnt_hash); list_add(&mnt->mnt_child, &shadows->mnt_child); } else { hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mnt->mnt_mountpoint)); list_add_tail(&mnt->mnt_child, &parent->mnt_mounts); } } /* * vfsmount lock must be held for write */ static void commit_tree(struct mount *mnt, struct mount *shadows) { struct mount *parent = mnt->mnt_parent; struct mount *m; LIST_HEAD(head); struct mnt_namespace *n = parent->mnt_ns; BUG_ON(parent == mnt); list_add_tail(&head, &mnt->mnt_list); list_for_each_entry(m, &head, mnt_list) m->mnt_ns = n; list_splice(&head, n->list.prev); attach_shadowed(mnt, parent, shadows); touch_mnt_namespace(n); } static struct mount *next_mnt(struct mount *p, struct mount *root) { struct list_head *next = p->mnt_mounts.next; if (next == &p->mnt_mounts) { while (1) { if (p == root) return NULL; next = p->mnt_child.next; if (next != &p->mnt_parent->mnt_mounts) break; p = p->mnt_parent; } } return list_entry(next, struct mount, mnt_child); } static struct mount *skip_mnt_tree(struct mount *p) { struct list_head *prev = p->mnt_mounts.prev; while (prev != &p->mnt_mounts) { p = list_entry(prev, struct mount, mnt_child); prev = p->mnt_mounts.prev; } return p; } struct vfsmount * vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data) { struct mount *mnt; struct dentry *root; if (!type) return ERR_PTR(-ENODEV); mnt = alloc_vfsmnt(name); if (!mnt) return ERR_PTR(-ENOMEM); if (flags & MS_KERNMOUNT) mnt->mnt.mnt_flags = MNT_INTERNAL; root = mount_fs(type, flags, name, data); if (IS_ERR(root)) { mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_CAST(root); } mnt->mnt.mnt_root = root; mnt->mnt.mnt_sb = root->d_sb; mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; lock_mount_hash(); list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); unlock_mount_hash(); return &mnt->mnt; } EXPORT_SYMBOL_GPL(vfs_kern_mount); static struct mount *clone_mnt(struct mount *old, struct dentry *root, int flag) { struct super_block *sb = old->mnt.mnt_sb; struct mount *mnt; int err; mnt = alloc_vfsmnt(old->mnt_devname); if (!mnt) return ERR_PTR(-ENOMEM); if (flag & (CL_SLAVE | CL_PRIVATE | CL_SHARED_TO_SLAVE)) mnt->mnt_group_id = 0; /* not a peer of original */ else mnt->mnt_group_id = old->mnt_group_id; if ((flag & CL_MAKE_SHARED) && !mnt->mnt_group_id) { err = mnt_alloc_group_id(mnt); if (err) goto out_free; } mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED); /* Don't allow unprivileged users to change mount flags */ if (flag & CL_UNPRIVILEGED) { mnt->mnt.mnt_flags |= MNT_LOCK_ATIME; if (mnt->mnt.mnt_flags & MNT_READONLY) mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; if (mnt->mnt.mnt_flags & MNT_NODEV) mnt->mnt.mnt_flags |= MNT_LOCK_NODEV; if (mnt->mnt.mnt_flags & MNT_NOSUID) mnt->mnt.mnt_flags |= MNT_LOCK_NOSUID; if (mnt->mnt.mnt_flags & MNT_NOEXEC) mnt->mnt.mnt_flags |= MNT_LOCK_NOEXEC; } /* Don't allow unprivileged users to reveal what is under a mount */ if ((flag & CL_UNPRIVILEGED) && (!(flag & CL_EXPIRE) || list_empty(&old->mnt_expire))) mnt->mnt.mnt_flags |= MNT_LOCKED; atomic_inc(&sb->s_active); mnt->mnt.mnt_sb = sb; mnt->mnt.mnt_root = dget(root); mnt->mnt_mountpoint = mnt->mnt.mnt_root; mnt->mnt_parent = mnt; lock_mount_hash(); list_add_tail(&mnt->mnt_instance, &sb->s_mounts); unlock_mount_hash(); if ((flag & CL_SLAVE) || ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { list_add(&mnt->mnt_slave, &old->mnt_slave_list); mnt->mnt_master = old; CLEAR_MNT_SHARED(mnt); } else if (!(flag & CL_PRIVATE)) { if ((flag & CL_MAKE_SHARED) || IS_MNT_SHARED(old)) list_add(&mnt->mnt_share, &old->mnt_share); if (IS_MNT_SLAVE(old)) list_add(&mnt->mnt_slave, &old->mnt_slave); mnt->mnt_master = old->mnt_master; } if (flag & CL_MAKE_SHARED) set_mnt_shared(mnt); /* stick the duplicate mount on the same expiry list * as the original if that was on one */ if (flag & CL_EXPIRE) { if (!list_empty(&old->mnt_expire)) list_add(&mnt->mnt_expire, &old->mnt_expire); } return mnt; out_free: mnt_free_id(mnt); free_vfsmnt(mnt); return ERR_PTR(err); } static void cleanup_mnt(struct mount *mnt) { /* * This probably indicates that somebody messed * up a mnt_want/drop_write() pair. If this * happens, the filesystem was probably unable * to make r/w->r/o transitions. */ /* * The locking used to deal with mnt_count decrement provides barriers, * so mnt_get_writers() below is safe. */ WARN_ON(mnt_get_writers(mnt)); if (unlikely(mnt->mnt_pins.first)) mnt_pin_kill(mnt); fsnotify_vfsmount_delete(&mnt->mnt); dput(mnt->mnt.mnt_root); deactivate_super(mnt->mnt.mnt_sb); mnt_free_id(mnt); call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt); } static void __cleanup_mnt(struct rcu_head *head) { cleanup_mnt(container_of(head, struct mount, mnt_rcu)); } static LLIST_HEAD(delayed_mntput_list); static void delayed_mntput(struct work_struct *unused) { struct llist_node *node = llist_del_all(&delayed_mntput_list); struct llist_node *next; for (; node; node = next) { next = llist_next(node); cleanup_mnt(llist_entry(node, struct mount, mnt_llist)); } } static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput); static void mntput_no_expire(struct mount *mnt) { rcu_read_lock(); mnt_add_count(mnt, -1); if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ rcu_read_unlock(); return; } lock_mount_hash(); if (mnt_get_count(mnt)) { rcu_read_unlock(); unlock_mount_hash(); return; } if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { rcu_read_unlock(); unlock_mount_hash(); return; } mnt->mnt.mnt_flags |= MNT_DOOMED; rcu_read_unlock(); list_del(&mnt->mnt_instance); if (unlikely(!list_empty(&mnt->mnt_mounts))) { struct mount *p, *tmp; list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { umount_mnt(p); } } unlock_mount_hash(); if (likely(!(mnt->mnt.mnt_flags & MNT_INTERNAL))) { struct task_struct *task = current; if (likely(!(task->flags & PF_KTHREAD))) { init_task_work(&mnt->mnt_rcu, __cleanup_mnt); if (!task_work_add(task, &mnt->mnt_rcu, true)) return; } if (llist_add(&mnt->mnt_llist, &delayed_mntput_list)) schedule_delayed_work(&delayed_mntput_work, 1); return; } cleanup_mnt(mnt); } void mntput(struct vfsmount *mnt) { if (mnt) { struct mount *m = real_mount(mnt); /* avoid cacheline pingpong, hope gcc doesn't get "smart" */ if (unlikely(m->mnt_expiry_mark)) m->mnt_expiry_mark = 0; mntput_no_expire(m); } } EXPORT_SYMBOL(mntput); struct vfsmount *mntget(struct vfsmount *mnt) { if (mnt) mnt_add_count(real_mount(mnt), 1); return mnt; } EXPORT_SYMBOL(mntget); struct vfsmount *mnt_clone_internal(struct path *path) { struct mount *p; p = clone_mnt(real_mount(path->mnt), path->dentry, CL_PRIVATE); if (IS_ERR(p)) return ERR_CAST(p); p->mnt.mnt_flags |= MNT_INTERNAL; return &p->mnt; } static inline void mangle(struct seq_file *m, const char *s) { seq_escape(m, s, " \t\n\\"); } /* * Simple .show_options callback for filesystems which don't want to * implement more complex mount option showing. * * See also save_mount_options(). */ int generic_show_options(struct seq_file *m, struct dentry *root) { const char *options; rcu_read_lock(); options = rcu_dereference(root->d_sb->s_options); if (options != NULL && options[0]) { seq_putc(m, ','); mangle(m, options); } rcu_read_unlock(); return 0; } EXPORT_SYMBOL(generic_show_options); /* * If filesystem uses generic_show_options(), this function should be * called from the fill_super() callback. * * The .remount_fs callback usually needs to be handled in a special * way, to make sure, that previous options are not overwritten if the * remount fails. * * Also note, that if the filesystem's .remount_fs function doesn't * reset all options to their default value, but changes only newly * given options, then the displayed options will not reflect reality * any more. */ void save_mount_options(struct super_block *sb, char *options) { BUG_ON(sb->s_options); rcu_assign_pointer(sb->s_options, kstrdup(options, GFP_KERNEL)); } EXPORT_SYMBOL(save_mount_options); void replace_mount_options(struct super_block *sb, char *options) { char *old = sb->s_options; rcu_assign_pointer(sb->s_options, options); if (old) { synchronize_rcu(); kfree(old); } } EXPORT_SYMBOL(replace_mount_options); #ifdef CONFIG_PROC_FS /* iterator; we want it to have access to namespace_sem, thus here... */ static void *m_start(struct seq_file *m, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); down_read(&namespace_sem); if (p->cached_event == p->ns->event) { void *v = p->cached_mount; if (*pos == p->cached_index) return v; if (*pos == p->cached_index + 1) { v = seq_list_next(v, &p->ns->list, &p->cached_index); return p->cached_mount = v; } } p->cached_event = p->ns->event; p->cached_mount = seq_list_start(&p->ns->list, *pos); p->cached_index = *pos; return p->cached_mount; } static void *m_next(struct seq_file *m, void *v, loff_t *pos) { struct proc_mounts *p = proc_mounts(m); p->cached_mount = seq_list_next(v, &p->ns->list, pos); p->cached_index = *pos; return p->cached_mount; } static void m_stop(struct seq_file *m, void *v) { up_read(&namespace_sem); } static int m_show(struct seq_file *m, void *v) { struct proc_mounts *p = proc_mounts(m); struct mount *r = list_entry(v, struct mount, mnt_list); return p->show(m, &r->mnt); } const struct seq_operations mounts_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = m_show, }; #endif /* CONFIG_PROC_FS */ /** * may_umount_tree - check if a mount tree is busy * @mnt: root of mount tree * * This is called to check if a tree of mounts has any * open files, pwds, chroots or sub mounts that are * busy. */ int may_umount_tree(struct vfsmount *m) { struct mount *mnt = real_mount(m); int actual_refs = 0; int minimum_refs = 0; struct mount *p; BUG_ON(!m); /* write lock needed for mnt_get_count */ lock_mount_hash(); for (p = mnt; p; p = next_mnt(p, mnt)) { actual_refs += mnt_get_count(p); minimum_refs += 2; } unlock_mount_hash(); if (actual_refs > minimum_refs) return 0; return 1; } EXPORT_SYMBOL(may_umount_tree); /** * may_umount - check if a mount point is busy * @mnt: root of mount * * This is called to check if a mount point has any * open files, pwds, chroots or sub mounts. If the * mount has sub mounts this will return busy * regardless of whether the sub mounts are busy. * * Doesn't take quota and stuff into account. IOW, in some cases it will * give false negatives. The main reason why it's here is that we need * a non-destructive way to look for easily umountable filesystems. */ int may_umount(struct vfsmount *mnt) { int ret = 1; down_read(&namespace_sem); lock_mount_hash(); if (propagate_mount_busy(real_mount(mnt), 2)) ret = 0; unlock_mount_hash(); up_read(&namespace_sem); return ret; } EXPORT_SYMBOL(may_umount); static HLIST_HEAD(unmounted); /* protected by namespace_sem */ static void namespace_unlock(void) { struct hlist_head head; hlist_move_list(&unmounted, &head); up_write(&namespace_sem); if (likely(hlist_empty(&head))) return; synchronize_rcu(); group_pin_kill(&head); } static inline void namespace_lock(void) { down_write(&namespace_sem); } enum umount_tree_flags { UMOUNT_SYNC = 1, UMOUNT_PROPAGATE = 2, }; /* * mount_lock must be held * namespace_sem must be held for write */ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) { LIST_HEAD(tmp_list); struct mount *p; if (how & UMOUNT_PROPAGATE) propagate_mount_unlock(mnt); /* Gather the mounts to umount */ for (p = mnt; p; p = next_mnt(p, mnt)) { p->mnt.mnt_flags |= MNT_UMOUNT; list_move(&p->mnt_list, &tmp_list); } /* Hide the mounts from mnt_mounts */ list_for_each_entry(p, &tmp_list, mnt_list) { list_del_init(&p->mnt_child); } /* Add propogated mounts to the tmp_list */ if (how & UMOUNT_PROPAGATE) propagate_umount(&tmp_list); while (!list_empty(&tmp_list)) { bool disconnect; p = list_first_entry(&tmp_list, struct mount, mnt_list); list_del_init(&p->mnt_expire); list_del_init(&p->mnt_list); __touch_mnt_namespace(p->mnt_ns); p->mnt_ns = NULL; if (how & UMOUNT_SYNC) p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; disconnect = !IS_MNT_LOCKED_AND_LAZY(p); pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, disconnect ? &unmounted : NULL); if (mnt_has_parent(p)) { mnt_add_count(p->mnt_parent, -1); if (!disconnect) { /* Don't forget about p */ list_add_tail(&p->mnt_child, &p->mnt_parent->mnt_mounts); } else { umount_mnt(p); } } change_mnt_propagation(p, MS_PRIVATE); } } static void shrink_submounts(struct mount *mnt); static int do_umount(struct mount *mnt, int flags) { struct super_block *sb = mnt->mnt.mnt_sb; int retval; retval = security_sb_umount(&mnt->mnt, flags); if (retval) return retval; /* * Allow userspace to request a mountpoint be expired rather than * unmounting unconditionally. Unmount only happens if: * (1) the mark is already set (the mark is cleared by mntput()) * (2) the usage count == 1 [parent vfsmount] + 1 [sys_umount] */ if (flags & MNT_EXPIRE) { if (&mnt->mnt == current->fs->root.mnt || flags & (MNT_FORCE | MNT_DETACH)) return -EINVAL; /* * probably don't strictly need the lock here if we examined * all race cases, but it's a slowpath. */ lock_mount_hash(); if (mnt_get_count(mnt) != 2) { unlock_mount_hash(); return -EBUSY; } unlock_mount_hash(); if (!xchg(&mnt->mnt_expiry_mark, 1)) return -EAGAIN; } /* * If we may have to abort operations to get out of this * mount, and they will themselves hold resources we must * allow the fs to do things. In the Unix tradition of * 'Gee thats tricky lets do it in userspace' the umount_begin * might fail to complete on the first run through as other tasks * must return, and the like. Thats for the mount program to worry * about for the moment. */ if (flags & MNT_FORCE && sb->s_op->umount_begin) { sb->s_op->umount_begin(sb); } /* * No sense to grab the lock for this test, but test itself looks * somewhat bogus. Suggestions for better replacement? * Ho-hum... In principle, we might treat that as umount + switch * to rootfs. GC would eventually take care of the old vfsmount. * Actually it makes sense, especially if rootfs would contain a * /reboot - static binary that would close all descriptors and * call reboot(9). Then init(8) could umount root and exec /reboot. */ if (&mnt->mnt == current->fs->root.mnt && !(flags & MNT_DETACH)) { /* * Special case for "unmounting" root ... * we just try to remount it readonly. */ if (!capable(CAP_SYS_ADMIN)) return -EPERM; down_write(&sb->s_umount); if (!(sb->s_flags & MS_RDONLY)) retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); up_write(&sb->s_umount); return retval; } namespace_lock(); lock_mount_hash(); event++; if (flags & MNT_DETACH) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, UMOUNT_PROPAGATE); retval = 0; } else { shrink_submounts(mnt); retval = -EBUSY; if (!propagate_mount_busy(mnt, 2)) { if (!list_empty(&mnt->mnt_list)) umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); retval = 0; } } unlock_mount_hash(); namespace_unlock(); return retval; } /* * __detach_mounts - lazily unmount all mounts on the specified dentry * * During unlink, rmdir, and d_drop it is possible to loose the path * to an existing mountpoint, and wind up leaking the mount. * detach_mounts allows lazily unmounting those mounts instead of * leaking them. * * The caller may hold dentry->d_inode->i_mutex. */ void __detach_mounts(struct dentry *dentry) { struct mountpoint *mp; struct mount *mnt; namespace_lock(); mp = lookup_mountpoint(dentry); if (!mp) goto out_unlock; lock_mount_hash(); while (!hlist_empty(&mp->m_list)) { mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); if (mnt->mnt.mnt_flags & MNT_UMOUNT) { struct mount *p, *tmp; list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { hlist_add_head(&p->mnt_umount.s_list, &unmounted); umount_mnt(p); } } else umount_tree(mnt, 0); } unlock_mount_hash(); put_mountpoint(mp); out_unlock: namespace_unlock(); } /* * Is the caller allowed to modify his namespace? */ static inline bool may_mount(void) { return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN); } /* * Now umount can handle mount points as well as block devices. * This is important for filesystems which use unnamed block devices. * * We now support a flag for forced unmount like the other 'big iron' * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD */ SYSCALL_DEFINE2(umount, char __user *, name, int, flags) { struct path path; struct mount *mnt; int retval; int lookup_flags = 0; if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; if (!may_mount()) return -EPERM; if (!(flags & UMOUNT_NOFOLLOW)) lookup_flags |= LOOKUP_FOLLOW; retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path); if (retval) goto out; mnt = real_mount(path.mnt); retval = -EINVAL; if (path.dentry != path.mnt->mnt_root) goto dput_and_out; if (!check_mnt(mnt)) goto dput_and_out; if (mnt->mnt.mnt_flags & MNT_LOCKED) goto dput_and_out; retval = -EPERM; if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) goto dput_and_out; retval = do_umount(mnt, flags); dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ dput(path.dentry); mntput_no_expire(mnt); out: return retval; } #ifdef __ARCH_WANT_SYS_OLDUMOUNT /* * The 2.0 compatible umount. No flags. */ SYSCALL_DEFINE1(oldumount, char __user *, name) { return sys_umount(name, 0); } #endif static bool is_mnt_ns_file(struct dentry *dentry) { /* Is this a proxy for a mount namespace? */ return dentry->d_op == &ns_dentry_operations && dentry->d_fsdata == &mntns_operations; } struct mnt_namespace *to_mnt_ns(struct ns_common *ns) { return container_of(ns, struct mnt_namespace, ns); } static bool mnt_ns_loop(struct dentry *dentry) { /* Could bind mounting the mount namespace inode cause a * mount namespace loop? */ struct mnt_namespace *mnt_ns; if (!is_mnt_ns_file(dentry)) return false; mnt_ns = to_mnt_ns(get_proc_ns(dentry->d_inode)); return current->nsproxy->mnt_ns->seq >= mnt_ns->seq; } struct mount *copy_tree(struct mount *mnt, struct dentry *dentry, int flag) { struct mount *res, *p, *q, *r, *parent; if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(mnt)) return ERR_PTR(-EINVAL); if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(dentry)) return ERR_PTR(-EINVAL); res = q = clone_mnt(mnt, dentry, flag); if (IS_ERR(q)) return q; q->mnt_mountpoint = mnt->mnt_mountpoint; p = mnt; list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { struct mount *s; if (!is_subdir(r->mnt_mountpoint, dentry)) continue; for (s = r; s; s = next_mnt(s, r)) { struct mount *t = NULL; if (!(flag & CL_COPY_UNBINDABLE) && IS_MNT_UNBINDABLE(s)) { s = skip_mnt_tree(s); continue; } if (!(flag & CL_COPY_MNT_NS_FILE) && is_mnt_ns_file(s->mnt.mnt_root)) { s = skip_mnt_tree(s); continue; } while (p != s->mnt_parent) { p = p->mnt_parent; q = q->mnt_parent; } p = s; parent = q; q = clone_mnt(p, p->mnt.mnt_root, flag); if (IS_ERR(q)) goto out; lock_mount_hash(); list_add_tail(&q->mnt_list, &res->mnt_list); mnt_set_mountpoint(parent, p->mnt_mp, q); if (!list_empty(&parent->mnt_mounts)) { t = list_last_entry(&parent->mnt_mounts, struct mount, mnt_child); if (t->mnt_mp != p->mnt_mp) t = NULL; } attach_shadowed(q, parent, t); unlock_mount_hash(); } } return res; out: if (res) { lock_mount_hash(); umount_tree(res, UMOUNT_SYNC); unlock_mount_hash(); } return q; } /* Caller should check returned pointer for errors */ struct vfsmount *collect_mounts(struct path *path) { struct mount *tree; namespace_lock(); if (!check_mnt(real_mount(path->mnt))) tree = ERR_PTR(-EINVAL); else tree = copy_tree(real_mount(path->mnt), path->dentry, CL_COPY_ALL | CL_PRIVATE); namespace_unlock(); if (IS_ERR(tree)) return ERR_CAST(tree); return &tree->mnt; } void drop_collected_mounts(struct vfsmount *mnt) { namespace_lock(); lock_mount_hash(); umount_tree(real_mount(mnt), UMOUNT_SYNC); unlock_mount_hash(); namespace_unlock(); } /** * clone_private_mount - create a private clone of a path * * This creates a new vfsmount, which will be the clone of @path. The new will * not be attached anywhere in the namespace and will be private (i.e. changes * to the originating mount won't be propagated into this). * * Release with mntput(). */ struct vfsmount *clone_private_mount(struct path *path) { struct mount *old_mnt = real_mount(path->mnt); struct mount *new_mnt; if (IS_MNT_UNBINDABLE(old_mnt)) return ERR_PTR(-EINVAL); down_read(&namespace_sem); new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE); up_read(&namespace_sem); if (IS_ERR(new_mnt)) return ERR_CAST(new_mnt); return &new_mnt->mnt; } EXPORT_SYMBOL_GPL(clone_private_mount); int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, struct vfsmount *root) { struct mount *mnt; int res = f(root, arg); if (res) return res; list_for_each_entry(mnt, &real_mount(root)->mnt_list, mnt_list) { res = f(&mnt->mnt, arg); if (res) return res; } return 0; } static void cleanup_group_ids(struct mount *mnt, struct mount *end) { struct mount *p; for (p = mnt; p != end; p = next_mnt(p, mnt)) { if (p->mnt_group_id && !IS_MNT_SHARED(p)) mnt_release_group_id(p); } } static int invent_group_ids(struct mount *mnt, bool recurse) { struct mount *p; for (p = mnt; p; p = recurse ? next_mnt(p, mnt) : NULL) { if (!p->mnt_group_id && !IS_MNT_SHARED(p)) { int err = mnt_alloc_group_id(p); if (err) { cleanup_group_ids(mnt, p); return err; } } } return 0; } /* * @source_mnt : mount tree to be attached * @nd : place the mount tree @source_mnt is attached * @parent_nd : if non-null, detach the source_mnt from its parent and * store the parent mount and mountpoint dentry. * (done when source_mnt is moved) * * NOTE: in the table below explains the semantics when a source mount * of a given type is attached to a destination mount of a given type. * --------------------------------------------------------------------------- * | BIND MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (++) | shared (+) | shared(+++)| invalid | * | | | | | | * |non-shared| shared (+) | private | slave (*) | invalid | * *************************************************************************** * A bind operation clones the source mount and mounts the clone on the * destination mount. * * (++) the cloned mount is propagated to all the mounts in the propagation * tree of the destination mount and the cloned mount is added to * the peer group of the source mount. * (+) the cloned mount is created under the destination mount and is marked * as shared. The cloned mount is added to the peer group of the source * mount. * (+++) the mount is propagated to all the mounts in the propagation tree * of the destination mount and the cloned mount is made slave * of the same master as that of the source mount. The cloned mount * is marked as 'shared and slave'. * (*) the cloned mount is made a slave of the same master as that of the * source mount. * * --------------------------------------------------------------------------- * | MOVE MOUNT OPERATION | * |************************************************************************** * | source-->| shared | private | slave | unbindable | * | dest | | | | | * | | | | | | | * | v | | | | | * |************************************************************************** * | shared | shared (+) | shared (+) | shared(+++) | invalid | * | | | | | | * |non-shared| shared (+*) | private | slave (*) | unbindable | * *************************************************************************** * * (+) the mount is moved to the destination. And is then propagated to * all the mounts in the propagation tree of the destination mount. * (+*) the mount is moved to the destination. * (+++) the mount is moved to the destination and is then propagated to * all the mounts belonging to the destination mount's propagation tree. * the mount is marked as 'shared and slave'. * (*) the mount continues to be a slave at the new location. * * if the source mount is a tree, the operations explained above is * applied to each mount in the tree. * Must be called without spinlocks held, since this function can sleep * in allocations. */ static int attach_recursive_mnt(struct mount *source_mnt, struct mount *dest_mnt, struct mountpoint *dest_mp, struct path *parent_path) { HLIST_HEAD(tree_list); struct mount *child, *p; struct hlist_node *n; int err; if (IS_MNT_SHARED(dest_mnt)) { err = invent_group_ids(source_mnt, true); if (err) goto out; err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); lock_mount_hash(); if (err) goto out_cleanup_ids; for (p = source_mnt; p; p = next_mnt(p, source_mnt)) set_mnt_shared(p); } else { lock_mount_hash(); } if (parent_path) { detach_mnt(source_mnt, parent_path); attach_mnt(source_mnt, dest_mnt, dest_mp); touch_mnt_namespace(source_mnt->mnt_ns); } else { mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); commit_tree(source_mnt, NULL); } hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { struct mount *q; hlist_del_init(&child->mnt_hash); q = __lookup_mnt_last(&child->mnt_parent->mnt, child->mnt_mountpoint); commit_tree(child, q); } unlock_mount_hash(); return 0; out_cleanup_ids: while (!hlist_empty(&tree_list)) { child = hlist_entry(tree_list.first, struct mount, mnt_hash); umount_tree(child, UMOUNT_SYNC); } unlock_mount_hash(); cleanup_group_ids(source_mnt, NULL); out: return err; } static struct mountpoint *lock_mount(struct path *path) { struct vfsmount *mnt; struct dentry *dentry = path->dentry; retry: mutex_lock(&dentry->d_inode->i_mutex); if (unlikely(cant_mount(dentry))) { mutex_unlock(&dentry->d_inode->i_mutex); return ERR_PTR(-ENOENT); } namespace_lock(); mnt = lookup_mnt(path); if (likely(!mnt)) { struct mountpoint *mp = lookup_mountpoint(dentry); if (!mp) mp = new_mountpoint(dentry); if (IS_ERR(mp)) { namespace_unlock(); mutex_unlock(&dentry->d_inode->i_mutex); return mp; } return mp; } namespace_unlock(); mutex_unlock(&path->dentry->d_inode->i_mutex); path_put(path); path->mnt = mnt; dentry = path->dentry = dget(mnt->mnt_root); goto retry; } static void unlock_mount(struct mountpoint *where) { struct dentry *dentry = where->m_dentry; put_mountpoint(where); namespace_unlock(); mutex_unlock(&dentry->d_inode->i_mutex); } static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) { if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; if (d_is_dir(mp->m_dentry) != d_is_dir(mnt->mnt.mnt_root)) return -ENOTDIR; return attach_recursive_mnt(mnt, p, mp, NULL); } /* * Sanity check the flags to change_mnt_propagation. */ static int flags_to_propagation_type(int flags) { int type = flags & ~(MS_REC | MS_SILENT); /* Fail if any non-propagation flags are set */ if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) return 0; /* Only one propagation flag should be set */ if (!is_power_of_2(type)) return 0; return type; } /* * recursively change the type of the mountpoint. */ static int do_change_type(struct path *path, int flag) { struct mount *m; struct mount *mnt = real_mount(path->mnt); int recurse = flag & MS_REC; int type; int err = 0; if (path->dentry != path->mnt->mnt_root) return -EINVAL; type = flags_to_propagation_type(flag); if (!type) return -EINVAL; namespace_lock(); if (type == MS_SHARED) { err = invent_group_ids(mnt, recurse); if (err) goto out_unlock; } lock_mount_hash(); for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) change_mnt_propagation(m, type); unlock_mount_hash(); out_unlock: namespace_unlock(); return err; } static bool has_locked_children(struct mount *mnt, struct dentry *dentry) { struct mount *child; list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { if (!is_subdir(child->mnt_mountpoint, dentry)) continue; if (child->mnt.mnt_flags & MNT_LOCKED) return true; } return false; } /* * do loopback mount. */ static int do_loopback(struct path *path, const char *old_name, int recurse) { struct path old_path; struct mount *mnt = NULL, *old, *parent; struct mountpoint *mp; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); if (err) return err; err = -EINVAL; if (mnt_ns_loop(old_path.dentry)) goto out; mp = lock_mount(path); err = PTR_ERR(mp); if (IS_ERR(mp)) goto out; old = real_mount(old_path.mnt); parent = real_mount(path->mnt); err = -EINVAL; if (IS_MNT_UNBINDABLE(old)) goto out2; if (!check_mnt(parent)) goto out2; if (!check_mnt(old) && old_path.dentry->d_op != &ns_dentry_operations) goto out2; if (!recurse && has_locked_children(old, old_path.dentry)) goto out2; if (recurse) mnt = copy_tree(old, old_path.dentry, CL_COPY_MNT_NS_FILE); else mnt = clone_mnt(old, old_path.dentry, 0); if (IS_ERR(mnt)) { err = PTR_ERR(mnt); goto out2; } mnt->mnt.mnt_flags &= ~MNT_LOCKED; err = graft_tree(mnt, parent, mp); if (err) { lock_mount_hash(); umount_tree(mnt, UMOUNT_SYNC); unlock_mount_hash(); } out2: unlock_mount(mp); out: path_put(&old_path); return err; } static int change_mount_flags(struct vfsmount *mnt, int ms_flags) { int error = 0; int readonly_request = 0; if (ms_flags & MS_RDONLY) readonly_request = 1; if (readonly_request == __mnt_is_readonly(mnt)) return 0; if (readonly_request) error = mnt_make_readonly(real_mount(mnt)); else __mnt_unmake_readonly(real_mount(mnt)); return error; } /* * change filesystem flags. dir should be a physical root of filesystem. * If you've mounted a non-root directory somewhere and want to do remount * on it - tough luck. */ static int do_remount(struct path *path, int flags, int mnt_flags, void *data) { int err; struct super_block *sb = path->mnt->mnt_sb; struct mount *mnt = real_mount(path->mnt); if (!check_mnt(mnt)) return -EINVAL; if (path->dentry != path->mnt->mnt_root) return -EINVAL; /* Don't allow changing of locked mnt flags. * * No locks need to be held here while testing the various * MNT_LOCK flags because those flags can never be cleared * once they are set. */ if ((mnt->mnt.mnt_flags & MNT_LOCK_READONLY) && !(mnt_flags & MNT_READONLY)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) && !(mnt_flags & MNT_NODEV)) { /* Was the nodev implicitly added in mount? */ if ((mnt->mnt_ns->user_ns != &init_user_ns) && !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) { mnt_flags |= MNT_NODEV; } else { return -EPERM; } } if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) && !(mnt_flags & MNT_NOSUID)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_NOEXEC) && !(mnt_flags & MNT_NOEXEC)) { return -EPERM; } if ((mnt->mnt.mnt_flags & MNT_LOCK_ATIME) && ((mnt->mnt.mnt_flags & MNT_ATIME_MASK) != (mnt_flags & MNT_ATIME_MASK))) { return -EPERM; } err = security_sb_remount(sb, data); if (err) return err; down_write(&sb->s_umount); if (flags & MS_BIND) err = change_mount_flags(path->mnt, flags); else if (!capable(CAP_SYS_ADMIN)) err = -EPERM; else err = do_remount_sb(sb, flags, data, 0); if (!err) { lock_mount_hash(); mnt_flags |= mnt->mnt.mnt_flags & ~MNT_USER_SETTABLE_MASK; mnt->mnt.mnt_flags = mnt_flags; touch_mnt_namespace(mnt->mnt_ns); unlock_mount_hash(); } up_write(&sb->s_umount); return err; } static inline int tree_contains_unbindable(struct mount *mnt) { struct mount *p; for (p = mnt; p; p = next_mnt(p, mnt)) { if (IS_MNT_UNBINDABLE(p)) return 1; } return 0; } static int do_move_mount(struct path *path, const char *old_name) { struct path old_path, parent_path; struct mount *p; struct mount *old; struct mountpoint *mp; int err; if (!old_name || !*old_name) return -EINVAL; err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); if (err) return err; mp = lock_mount(path); err = PTR_ERR(mp); if (IS_ERR(mp)) goto out; old = real_mount(old_path.mnt); p = real_mount(path->mnt); err = -EINVAL; if (!check_mnt(p) || !check_mnt(old)) goto out1; if (old->mnt.mnt_flags & MNT_LOCKED) goto out1; err = -EINVAL; if (old_path.dentry != old_path.mnt->mnt_root) goto out1; if (!mnt_has_parent(old)) goto out1; if (d_is_dir(path->dentry) != d_is_dir(old_path.dentry)) goto out1; /* * Don't move a mount residing in a shared parent. */ if (IS_MNT_SHARED(old->mnt_parent)) goto out1; /* * Don't move a mount tree containing unbindable mounts to a destination * mount which is shared. */ if (IS_MNT_SHARED(p) && tree_contains_unbindable(old)) goto out1; err = -ELOOP; for (; mnt_has_parent(p); p = p->mnt_parent) if (p == old) goto out1; err = attach_recursive_mnt(old, real_mount(path->mnt), mp, &parent_path); if (err) goto out1; /* if the mount is moved, it should no longer be expire * automatically */ list_del_init(&old->mnt_expire); out1: unlock_mount(mp); out: if (!err) path_put(&parent_path); path_put(&old_path); return err; } static struct vfsmount *fs_set_subtype(struct vfsmount *mnt, const char *fstype) { int err; const char *subtype = strchr(fstype, '.'); if (subtype) { subtype++; err = -EINVAL; if (!subtype[0]) goto err; } else subtype = ""; mnt->mnt_sb->s_subtype = kstrdup(subtype, GFP_KERNEL); err = -ENOMEM; if (!mnt->mnt_sb->s_subtype) goto err; return mnt; err: mntput(mnt); return ERR_PTR(err); } /* * add a mount into a namespace's mount tree */ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) { struct mountpoint *mp; struct mount *parent; int err; mnt_flags &= ~MNT_INTERNAL_FLAGS; mp = lock_mount(path); if (IS_ERR(mp)) return PTR_ERR(mp); parent = real_mount(path->mnt); err = -EINVAL; if (unlikely(!check_mnt(parent))) { /* that's acceptable only for automounts done in private ns */ if (!(mnt_flags & MNT_SHRINKABLE)) goto unlock; /* ... and for those we'd better have mountpoint still alive */ if (!parent->mnt_ns) goto unlock; } /* Refuse the same filesystem on the same mount point */ err = -EBUSY; if (path->mnt->mnt_sb == newmnt->mnt.mnt_sb && path->mnt->mnt_root == path->dentry) goto unlock; err = -EINVAL; if (d_is_symlink(newmnt->mnt.mnt_root)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; err = graft_tree(newmnt, parent, mp); unlock: unlock_mount(mp); return err; } /* * create a new mount for userspace and request it to be added into the * namespace's tree */ static int do_new_mount(struct path *path, const char *fstype, int flags, int mnt_flags, const char *name, void *data) { struct file_system_type *type; struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns; struct vfsmount *mnt; int err; if (!fstype) return -EINVAL; type = get_fs_type(fstype); if (!type) return -ENODEV; if (user_ns != &init_user_ns) { if (!(type->fs_flags & FS_USERNS_MOUNT)) { put_filesystem(type); return -EPERM; } /* Only in special cases allow devices from mounts * created outside the initial user namespace. */ if (!(type->fs_flags & FS_USERNS_DEV_MOUNT)) { flags |= MS_NODEV; mnt_flags |= MNT_NODEV | MNT_LOCK_NODEV; } } mnt = vfs_kern_mount(type, flags, name, data); if (!IS_ERR(mnt) && (type->fs_flags & FS_HAS_SUBTYPE) && !mnt->mnt_sb->s_subtype) mnt = fs_set_subtype(mnt, fstype); put_filesystem(type); if (IS_ERR(mnt)) return PTR_ERR(mnt); err = do_add_mount(real_mount(mnt), path, mnt_flags); if (err) mntput(mnt); return err; } int finish_automount(struct vfsmount *m, struct path *path) { struct mount *mnt = real_mount(m); int err; /* The new mount record should have at least 2 refs to prevent it being * expired before we get a chance to add it */ BUG_ON(mnt_get_count(mnt) < 2); if (m->mnt_sb == path->mnt->mnt_sb && m->mnt_root == path->dentry) { err = -ELOOP; goto fail; } err = do_add_mount(mnt, path, path->mnt->mnt_flags | MNT_SHRINKABLE); if (!err) return 0; fail: /* remove m from any expiration list it may be on */ if (!list_empty(&mnt->mnt_expire)) { namespace_lock(); list_del_init(&mnt->mnt_expire); namespace_unlock(); } mntput(m); mntput(m); return err; } /** * mnt_set_expiry - Put a mount on an expiration list * @mnt: The mount to list. * @expiry_list: The list to add the mount to. */ void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) { namespace_lock(); list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); namespace_unlock(); } EXPORT_SYMBOL(mnt_set_expiry); /* * process a list of expirable mountpoints with the intent of discarding any * mountpoints that aren't in use and haven't been touched since last we came * here */ void mark_mounts_for_expiry(struct list_head *mounts) { struct mount *mnt, *next; LIST_HEAD(graveyard); if (list_empty(mounts)) return; namespace_lock(); lock_mount_hash(); /* extract from the expiration list every vfsmount that matches the * following criteria: * - only referenced by its parent vfsmount * - still marked for expiry (marked on the last call here; marks are * cleared by mntput()) */ list_for_each_entry_safe(mnt, next, mounts, mnt_expire) { if (!xchg(&mnt->mnt_expiry_mark, 1) || propagate_mount_busy(mnt, 1)) continue; list_move(&mnt->mnt_expire, &graveyard); } while (!list_empty(&graveyard)) { mnt = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(mnt->mnt_ns); umount_tree(mnt, UMOUNT_PROPAGATE|UMOUNT_SYNC); } unlock_mount_hash(); namespace_unlock(); } EXPORT_SYMBOL_GPL(mark_mounts_for_expiry); /* * Ripoff of 'select_parent()' * * search the list of submounts for a given mountpoint, and move any * shrinkable submounts to the 'graveyard' list. */ static int select_submounts(struct mount *parent, struct list_head *graveyard) { struct mount *this_parent = parent; struct list_head *next; int found = 0; repeat: next = this_parent->mnt_mounts.next; resume: while (next != &this_parent->mnt_mounts) { struct list_head *tmp = next; struct mount *mnt = list_entry(tmp, struct mount, mnt_child); next = tmp->next; if (!(mnt->mnt.mnt_flags & MNT_SHRINKABLE)) continue; /* * Descend a level if the d_mounts list is non-empty. */ if (!list_empty(&mnt->mnt_mounts)) { this_parent = mnt; goto repeat; } if (!propagate_mount_busy(mnt, 1)) { list_move_tail(&mnt->mnt_expire, graveyard); found++; } } /* * All done at this level ... ascend and resume the search */ if (this_parent != parent) { next = this_parent->mnt_child.next; this_parent = this_parent->mnt_parent; goto resume; } return found; } /* * process a list of expirable mountpoints with the intent of discarding any * submounts of a specific parent mountpoint * * mount_lock must be held for write */ static void shrink_submounts(struct mount *mnt) { LIST_HEAD(graveyard); struct mount *m; /* extract submounts of 'mountpoint' from the expiration list */ while (select_submounts(mnt, &graveyard)) { while (!list_empty(&graveyard)) { m = list_first_entry(&graveyard, struct mount, mnt_expire); touch_mnt_namespace(m->mnt_ns); umount_tree(m, UMOUNT_PROPAGATE|UMOUNT_SYNC); } } } /* * Some copy_from_user() implementations do not return the exact number of * bytes remaining to copy on a fault. But copy_mount_options() requires that. * Note that this function differs from copy_from_user() in that it will oops * on bad values of `to', rather than returning a short copy. */ static long exact_copy_from_user(void *to, const void __user * from, unsigned long n) { char *t = to; const char __user *f = from; char c; if (!access_ok(VERIFY_READ, from, n)) return n; while (n) { if (__get_user(c, f)) { memset(t, 0, n); break; } *t++ = c; f++; n--; } return n; } int copy_mount_options(const void __user * data, unsigned long *where) { int i; unsigned long page; unsigned long size; *where = 0; if (!data) return 0; if (!(page = __get_free_page(GFP_KERNEL))) return -ENOMEM; /* We only care that *some* data at the address the user * gave us is valid. Just in case, we'll zero * the remainder of the page. */ /* copy_from_user cannot cross TASK_SIZE ! */ size = TASK_SIZE - (unsigned long)data; if (size > PAGE_SIZE) size = PAGE_SIZE; i = size - exact_copy_from_user((void *)page, data, size); if (!i) { free_page(page); return -EFAULT; } if (i != PAGE_SIZE) memset((char *)page + i, 0, PAGE_SIZE - i); *where = page; return 0; } char *copy_mount_string(const void __user *data) { return data ? strndup_user(data, PAGE_SIZE) : NULL; } /* * Flags is a 32-bit value that allows up to 31 non-fs dependent flags to * be given to the mount() call (ie: read-only, no-dev, no-suid etc). * * data is a (void *) that can point to any structure up to * PAGE_SIZE-1 bytes, which can contain arbitrary fs-dependent * information (or be NULL). * * Pre-0.97 versions of mount() didn't have a flags word. * When the flags word was introduced its top half was required * to have the magic value 0xC0ED, and this remained so until 2.4.0-test9. * Therefore, if this magic number is present, it carries no information * and must be discarded. */ long do_mount(const char *dev_name, const char __user *dir_name, const char *type_page, unsigned long flags, void *data_page) { struct path path; int retval = 0; int mnt_flags = 0; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; /* Basic sanity checks */ if (data_page) ((char *)data_page)[PAGE_SIZE - 1] = 0; /* ... and get the mountpoint */ retval = user_path(dir_name, &path); if (retval) return retval; retval = security_sb_mount(dev_name, &path, type_page, flags, data_page); if (!retval && !may_mount()) retval = -EPERM; if (retval) goto dput_out; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) mnt_flags |= MNT_RELATIME; /* Separate the per-mountpoint flags */ if (flags & MS_NOSUID) mnt_flags |= MNT_NOSUID; if (flags & MS_NODEV) mnt_flags |= MNT_NODEV; if (flags & MS_NOEXEC) mnt_flags |= MNT_NOEXEC; if (flags & MS_NOATIME) mnt_flags |= MNT_NOATIME; if (flags & MS_NODIRATIME) mnt_flags |= MNT_NODIRATIME; if (flags & MS_STRICTATIME) mnt_flags &= ~(MNT_RELATIME | MNT_NOATIME); if (flags & MS_RDONLY) mnt_flags |= MNT_READONLY; /* The default atime for remount is preservation */ if ((flags & MS_REMOUNT) && ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | MS_STRICTATIME)) == 0)) { mnt_flags &= ~MNT_ATIME_MASK; mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; } flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE | MS_BORN | MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | MS_STRICTATIME); if (flags & MS_REMOUNT) retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, data_page); else if (flags & MS_BIND) retval = do_loopback(&path, dev_name, flags & MS_REC); else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) retval = do_change_type(&path, flags); else if (flags & MS_MOVE) retval = do_move_mount(&path, dev_name); else retval = do_new_mount(&path, type_page, flags, mnt_flags, dev_name, data_page); dput_out: path_put(&path); return retval; } static void free_mnt_ns(struct mnt_namespace *ns) { ns_free_inum(&ns->ns); put_user_ns(ns->user_ns); kfree(ns); } /* * Assign a sequence number so we can detect when we attempt to bind * mount a reference to an older mount namespace into the current * mount namespace, preventing reference counting loops. A 64bit * number incrementing at 10Ghz will take 12,427 years to wrap which * is effectively never, so we can ignore the possibility. */ static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1); static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns) { struct mnt_namespace *new_ns; int ret; new_ns = kmalloc(sizeof(struct mnt_namespace), GFP_KERNEL); if (!new_ns) return ERR_PTR(-ENOMEM); ret = ns_alloc_inum(&new_ns->ns); if (ret) { kfree(new_ns); return ERR_PTR(ret); } new_ns->ns.ops = &mntns_operations; new_ns->seq = atomic64_add_return(1, &mnt_ns_seq); atomic_set(&new_ns->count, 1); new_ns->root = NULL; INIT_LIST_HEAD(&new_ns->list); init_waitqueue_head(&new_ns->poll); new_ns->event = 0; new_ns->user_ns = get_user_ns(user_ns); return new_ns; } struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, struct user_namespace *user_ns, struct fs_struct *new_fs) { struct mnt_namespace *new_ns; struct vfsmount *rootmnt = NULL, *pwdmnt = NULL; struct mount *p, *q; struct mount *old; struct mount *new; int copy_flags; BUG_ON(!ns); if (likely(!(flags & CLONE_NEWNS))) { get_mnt_ns(ns); return ns; } old = ns->root; new_ns = alloc_mnt_ns(user_ns); if (IS_ERR(new_ns)) return new_ns; namespace_lock(); /* First pass: copy the tree topology */ copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; if (user_ns != ns->user_ns) copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED; new = copy_tree(old, old->mnt.mnt_root, copy_flags); if (IS_ERR(new)) { namespace_unlock(); free_mnt_ns(new_ns); return ERR_CAST(new); } new_ns->root = new; list_add_tail(&new_ns->list, &new->mnt_list); /* * Second pass: switch the tsk->fs->* elements and mark new vfsmounts * as belonging to new namespace. We have already acquired a private * fs_struct, so tsk->fs->lock is not needed. */ p = old; q = new; while (p) { q->mnt_ns = new_ns; if (new_fs) { if (&p->mnt == new_fs->root.mnt) { new_fs->root.mnt = mntget(&q->mnt); rootmnt = &p->mnt; } if (&p->mnt == new_fs->pwd.mnt) { new_fs->pwd.mnt = mntget(&q->mnt); pwdmnt = &p->mnt; } } p = next_mnt(p, old); q = next_mnt(q, new); if (!q) break; while (p->mnt.mnt_root != q->mnt.mnt_root) p = next_mnt(p, old); } namespace_unlock(); if (rootmnt) mntput(rootmnt); if (pwdmnt) mntput(pwdmnt); return new_ns; } /** * create_mnt_ns - creates a private namespace and adds a root filesystem * @mnt: pointer to the new root filesystem mountpoint */ static struct mnt_namespace *create_mnt_ns(struct vfsmount *m) { struct mnt_namespace *new_ns = alloc_mnt_ns(&init_user_ns); if (!IS_ERR(new_ns)) { struct mount *mnt = real_mount(m); mnt->mnt_ns = new_ns; new_ns->root = mnt; list_add(&mnt->mnt_list, &new_ns->list); } else { mntput(m); } return new_ns; } struct dentry *mount_subtree(struct vfsmount *mnt, const char *name) { struct mnt_namespace *ns; struct super_block *s; struct path path; int err; ns = create_mnt_ns(mnt); if (IS_ERR(ns)) return ERR_CAST(ns); err = vfs_path_lookup(mnt->mnt_root, mnt, name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); put_mnt_ns(ns); if (err) return ERR_PTR(err); /* trade a vfsmount reference for active sb one */ s = path.mnt->mnt_sb; atomic_inc(&s->s_active); mntput(path.mnt); /* lock the sucker */ down_write(&s->s_umount); /* ... and return the root of (sub)tree on it */ return path.dentry; } EXPORT_SYMBOL(mount_subtree); SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name, char __user *, type, unsigned long, flags, void __user *, data) { int ret; char *kernel_type; char *kernel_dev; unsigned long data_page; kernel_type = copy_mount_string(type); ret = PTR_ERR(kernel_type); if (IS_ERR(kernel_type)) goto out_type; kernel_dev = copy_mount_string(dev_name); ret = PTR_ERR(kernel_dev); if (IS_ERR(kernel_dev)) goto out_dev; ret = copy_mount_options(data, &data_page); if (ret < 0) goto out_data; ret = do_mount(kernel_dev, dir_name, kernel_type, flags, (void *) data_page); free_page(data_page); out_data: kfree(kernel_dev); out_dev: kfree(kernel_type); out_type: return ret; } /* * Return true if path is reachable from root * * namespace_sem or mount_lock is held */ bool is_path_reachable(struct mount *mnt, struct dentry *dentry, const struct path *root) { while (&mnt->mnt != root->mnt && mnt_has_parent(mnt)) { dentry = mnt->mnt_mountpoint; mnt = mnt->mnt_parent; } return &mnt->mnt == root->mnt && is_subdir(dentry, root->dentry); } int path_is_under(struct path *path1, struct path *path2) { int res; read_seqlock_excl(&mount_lock); res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); read_sequnlock_excl(&mount_lock); return res; } EXPORT_SYMBOL(path_is_under); /* * pivot_root Semantics: * Moves the root file system of the current process to the directory put_old, * makes new_root as the new root file system of the current process, and sets * root/cwd of all processes which had them on the current root to new_root. * * Restrictions: * The new_root and put_old must be directories, and must not be on the * same file system as the current process root. The put_old must be * underneath new_root, i.e. adding a non-zero number of /.. to the string * pointed to by put_old must yield the same directory as new_root. No other * file system may be mounted on put_old. After all, new_root is a mountpoint. * * Also, the current root cannot be on the 'rootfs' (initial ramfs) filesystem. * See Documentation/filesystems/ramfs-rootfs-initramfs.txt for alternatives * in this situation. * * Notes: * - we don't move root/cwd if they are not at the root (reason: if something * cared enough to change them, it's probably wrong to force them elsewhere) * - it's okay to pick a root that isn't the root of a file system, e.g. * /nfs/my_root where /nfs is the mount point. It must be a mountpoint, * though, so you may need to say mount --bind /nfs/my_root /nfs/my_root * first. */ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root, const char __user *, put_old) { struct path new, old, parent_path, root_parent, root; struct mount *new_mnt, *root_mnt, *old_mnt; struct mountpoint *old_mp, *root_mp; int error; if (!may_mount()) return -EPERM; error = user_path_dir(new_root, &new); if (error) goto out0; error = user_path_dir(put_old, &old); if (error) goto out1; error = security_sb_pivotroot(&old, &new); if (error) goto out2; get_fs_root(current->fs, &root); old_mp = lock_mount(&old); error = PTR_ERR(old_mp); if (IS_ERR(old_mp)) goto out3; error = -EINVAL; new_mnt = real_mount(new.mnt); root_mnt = real_mount(root.mnt); old_mnt = real_mount(old.mnt); if (IS_MNT_SHARED(old_mnt) || IS_MNT_SHARED(new_mnt->mnt_parent) || IS_MNT_SHARED(root_mnt->mnt_parent)) goto out4; if (!check_mnt(root_mnt) || !check_mnt(new_mnt)) goto out4; if (new_mnt->mnt.mnt_flags & MNT_LOCKED) goto out4; error = -ENOENT; if (d_unlinked(new.dentry)) goto out4; error = -EBUSY; if (new_mnt == root_mnt || old_mnt == root_mnt) goto out4; /* loop, on the same file system */ error = -EINVAL; if (root.mnt->mnt_root != root.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(root_mnt)) goto out4; /* not attached */ root_mp = root_mnt->mnt_mp; if (new.mnt->mnt_root != new.dentry) goto out4; /* not a mountpoint */ if (!mnt_has_parent(new_mnt)) goto out4; /* not attached */ /* make sure we can reach put_old from new_root */ if (!is_path_reachable(old_mnt, old.dentry, &new)) goto out4; /* make certain new is below the root */ if (!is_path_reachable(new_mnt, new.dentry, &root)) goto out4; root_mp->m_count++; /* pin it so it won't go away */ lock_mount_hash(); detach_mnt(new_mnt, &parent_path); detach_mnt(root_mnt, &root_parent); if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { new_mnt->mnt.mnt_flags |= MNT_LOCKED; root_mnt->mnt.mnt_flags &= ~MNT_LOCKED; } /* mount old root on put_old */ attach_mnt(root_mnt, old_mnt, old_mp); /* mount new_root on / */ attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp); touch_mnt_namespace(current->nsproxy->mnt_ns); /* A moved mount should not expire automatically */ list_del_init(&new_mnt->mnt_expire); unlock_mount_hash(); chroot_fs_refs(&root, &new); put_mountpoint(root_mp); error = 0; out4: unlock_mount(old_mp); if (!error) { path_put(&root_parent); path_put(&parent_path); } out3: path_put(&root); out2: path_put(&old); out1: path_put(&new); out0: return error; } static void __init init_mount_tree(void) { struct vfsmount *mnt; struct mnt_namespace *ns; struct path root; struct file_system_type *type; type = get_fs_type("rootfs"); if (!type) panic("Can't find rootfs type"); mnt = vfs_kern_mount(type, 0, "rootfs", NULL); put_filesystem(type); if (IS_ERR(mnt)) panic("Can't create rootfs"); ns = create_mnt_ns(mnt); if (IS_ERR(ns)) panic("Can't allocate initial namespace"); init_task.nsproxy->mnt_ns = ns; get_mnt_ns(ns); root.mnt = mnt; root.dentry = mnt->mnt_root; mnt->mnt_flags |= MNT_LOCKED; set_fs_pwd(current->fs, &root); set_fs_root(current->fs, &root); } void __init mnt_init(void) { unsigned u; int err; mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); mount_hashtable = alloc_large_system_hash("Mount-cache", sizeof(struct hlist_head), mhash_entries, 19, 0, &m_hash_shift, &m_hash_mask, 0, 0); mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", sizeof(struct hlist_head), mphash_entries, 19, 0, &mp_hash_shift, &mp_hash_mask, 0, 0); if (!mount_hashtable || !mountpoint_hashtable) panic("Failed to allocate mount hash table\n"); for (u = 0; u <= m_hash_mask; u++) INIT_HLIST_HEAD(&mount_hashtable[u]); for (u = 0; u <= mp_hash_mask; u++) INIT_HLIST_HEAD(&mountpoint_hashtable[u]); kernfs_init(); err = sysfs_init(); if (err) printk(KERN_WARNING "%s: sysfs_init error: %d\n", __func__, err); fs_kobj = kobject_create_and_add("fs", NULL); if (!fs_kobj) printk(KERN_WARNING "%s: kobj create error\n", __func__); init_rootfs(); init_mount_tree(); } void put_mnt_ns(struct mnt_namespace *ns) { if (!atomic_dec_and_test(&ns->count)) return; drop_collected_mounts(&ns->root->mnt); free_mnt_ns(ns); } struct vfsmount *kern_mount_data(struct file_system_type *type, void *data) { struct vfsmount *mnt; mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, data); if (!IS_ERR(mnt)) { /* * it is a longterm mount, don't release mnt until * we unmount before file sys is unregistered */ real_mount(mnt)->mnt_ns = MNT_NS_INTERNAL; } return mnt; } EXPORT_SYMBOL_GPL(kern_mount_data); void kern_unmount(struct vfsmount *mnt) { /* release long term mount so mount point can be released */ if (!IS_ERR_OR_NULL(mnt)) { real_mount(mnt)->mnt_ns = NULL; synchronize_rcu(); /* yecchhh... */ mntput(mnt); } } EXPORT_SYMBOL(kern_unmount); bool our_mnt(struct vfsmount *mnt) { return check_mnt(real_mount(mnt)); } bool current_chrooted(void) { /* Does the current process have a non-standard root */ struct path ns_root; struct path fs_root; bool chrooted; /* Find the namespace root */ ns_root.mnt = &current->nsproxy->mnt_ns->root->mnt; ns_root.dentry = ns_root.mnt->mnt_root; path_get(&ns_root); while (d_mountpoint(ns_root.dentry) && follow_down_one(&ns_root)) ; get_fs_root(current->fs, &fs_root); chrooted = !path_equal(&fs_root, &ns_root); path_put(&fs_root); path_put(&ns_root); return chrooted; } bool fs_fully_visible(struct file_system_type *type) { struct mnt_namespace *ns = current->nsproxy->mnt_ns; struct mount *mnt; bool visible = false; if (unlikely(!ns)) return false; down_read(&namespace_sem); list_for_each_entry(mnt, &ns->list, mnt_list) { struct mount *child; if (mnt->mnt.mnt_sb->s_type != type) continue; /* This mount is not fully visible if there are any child mounts * that cover anything except for empty directories. */ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) { struct inode *inode = child->mnt_mountpoint->d_inode; if (!S_ISDIR(inode->i_mode)) goto next; if (inode->i_nlink > 2) goto next; } visible = true; goto found; next: ; } found: up_read(&namespace_sem); return visible; } static struct ns_common *mntns_get(struct task_struct *task) { struct ns_common *ns = NULL; struct nsproxy *nsproxy; task_lock(task); nsproxy = task->nsproxy; if (nsproxy) { ns = &nsproxy->mnt_ns->ns; get_mnt_ns(to_mnt_ns(ns)); } task_unlock(task); return ns; } static void mntns_put(struct ns_common *ns) { put_mnt_ns(to_mnt_ns(ns)); } static int mntns_install(struct nsproxy *nsproxy, struct ns_common *ns) { struct fs_struct *fs = current->fs; struct mnt_namespace *mnt_ns = to_mnt_ns(ns); struct path root; if (!ns_capable(mnt_ns->user_ns, CAP_SYS_ADMIN) || !ns_capable(current_user_ns(), CAP_SYS_CHROOT) || !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) return -EPERM; if (fs->users != 1) return -EINVAL; get_mnt_ns(mnt_ns); put_mnt_ns(nsproxy->mnt_ns); nsproxy->mnt_ns = mnt_ns; /* Find the root */ root.mnt = &mnt_ns->root->mnt; root.dentry = mnt_ns->root->mnt.mnt_root; path_get(&root); while(d_mountpoint(root.dentry) && follow_down_one(&root)) ; /* Update the pwd and root */ set_fs_pwd(fs, &root); set_fs_root(fs, &root); path_put(&root); return 0; } const struct proc_ns_operations mntns_operations = { .name = "mnt", .type = CLONE_NEWNS, .get = mntns_get, .put = mntns_put, .install = mntns_install, };
./CrossVul/dataset_final_sorted/CWE-284/c/good_2409_0
crossvul-cpp_data_bad_1571_7
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* ** mysqlimport.c - Imports all given files ** into a table(s). */ #define IMPORT_VERSION "3.7" #include "client_priv.h" #include "my_default.h" #include "mysql_version.h" #ifdef HAVE_LIBPTHREAD #include <my_pthread.h> #endif #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ /* Global Thread counter */ uint counter; #ifdef HAVE_LIBPTHREAD pthread_mutex_t counter_mutex; pthread_cond_t count_threshhold; #endif static void db_error_with_table(MYSQL *mysql, char *table); static void db_error(MYSQL *mysql); static char *field_escape(char *to,const char *from,uint length); static char *add_load_option(char *ptr,const char *object, const char *statement); static my_bool verbose=0,lock_tables=0,ignore_errors=0,opt_delete=0, replace=0,silent=0,ignore=0,opt_compress=0, opt_low_priority= 0, tty_password= 0; static my_bool debug_info_flag= 0, debug_check_flag= 0; static uint opt_use_threads=0, opt_local_file=0, my_end_arg= 0; static char *opt_password=0, *current_user=0, *current_host=0, *current_db=0, *fields_terminated=0, *lines_terminated=0, *enclosed=0, *opt_enclosed=0, *escaped=0, *opt_columns=0, *default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME; static uint opt_mysql_port= 0, opt_protocol= 0; static char *opt_bind_addr = NULL; static char * opt_mysql_unix_port=0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static longlong opt_ignore_lines= -1; #include <sslopt-vars.h> #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static struct my_option my_long_options[] = { {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"columns", 'c', "Use only these columns to import the data to. Give the column names in a comma separated list. This is same as giving columns to LOAD DATA INFILE.", &opt_columns, &opt_columns, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug",'#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"delete", 'd', "First delete all rows from table.", &opt_delete, &opt_delete, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"fields-terminated-by", OPT_FTB, "Fields in the input file are terminated by the given string.", &fields_terminated, &fields_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-enclosed-by", OPT_ENC, "Fields in the import file are enclosed by the given character.", &enclosed, &enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-optionally-enclosed-by", OPT_O_ENC, "Fields in the input file are optionally enclosed by the given character.", &opt_enclosed, &opt_enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-escaped-by", OPT_ESC, "Fields in the input file are escaped by the given character.", &escaped, &escaped, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Continue even if we get an SQL error.", &ignore_errors, &ignore_errors, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Displays this help and exits.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &current_host, &current_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore", 'i', "If duplicate unique key was found, keep old row.", &ignore, &ignore, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-lines", OPT_IGN_LINES, "Ignore first n lines of data infile.", &opt_ignore_lines, &opt_ignore_lines, 0, GET_LL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"lines-terminated-by", OPT_LTB, "Lines in the input file are terminated by the given string.", &lines_terminated, &lines_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"local", 'L', "Read all files through the client.", &opt_local_file, &opt_local_file, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"lock-tables", 'l', "Lock all tables for write (this disables threads).", &lock_tables, &lock_tables, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"low-priority", OPT_LOW_PRIORITY, "Use LOW_PRIORITY when updating the table.", &opt_low_priority, &opt_low_priority, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given it's asked from the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replace", 'r', "If duplicate unique key was found, replace old row.", &replace, &replace, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"silent", 's', "Be more silent.", &silent, &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"use-threads", OPT_USE_THREADS, "Load files in parallel. The argument is the number " "of threads to use for loading data.", &opt_use_threads, &opt_use_threads, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &current_user, &current_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print info about the various stages.", &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static const char *load_default_groups[]= { "mysqlimport","client",0 }; static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n" ,my_progname, IMPORT_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); printf("\ Loads tables from text files in various formats. The base name of the\n\ text file must be the name of the table that should be used.\n\ If one uses sockets to connect to the MySQL server, the server will open and\n\ read the text file directly. In other cases the client will open the text\n\ file. The SQL command 'LOAD DATA INFILE' is used to import the rows.\n"); printf("\nUsage: %s [OPTIONS] database textfile...",my_progname); print_defaults("my",load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch(optid) { case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start=argument; my_free(opt_password); opt_password=my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]=0; /* Cut length of argument */ tty_password= 0; } else tty_password= 1; break; #ifdef _WIN32 case 'W': opt_protocol = MYSQL_PROTOCOL_PIPE; opt_local_file=1; break; #endif case OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case '#': DBUG_PUSH(argument ? argument : "d:t:o"); debug_check_flag= 1; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); case 'I': case '?': usage(); exit(0); } return 0; } static int get_options(int *argc, char ***argv) { int ho_error; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (enclosed && opt_enclosed) { fprintf(stderr, "You can't use ..enclosed.. and ..optionally-enclosed.. at the same time.\n"); return(1); } if (replace && ignore) { fprintf(stderr, "You can't use --ignore (-i) and --replace (-r) at the same time.\n"); return(1); } if (*argc < 2) { usage(); return 1; } current_db= *((*argv)++); (*argc)--; if (tty_password) opt_password=get_tty_password(NullS); return(0); } static int write_to_table(char *filename, MYSQL *mysql) { char tablename[FN_REFLEN], hard_path[FN_REFLEN], escaped_name[FN_REFLEN * 2 + 1], sql_statement[FN_REFLEN*16+256], *end, *pos; DBUG_ENTER("write_to_table"); DBUG_PRINT("enter",("filename: %s",filename)); fn_format(tablename, filename, "", "", 1 | 2); /* removes path & ext. */ if (!opt_local_file) my_stpcpy(hard_path,filename); else my_load_path(hard_path, filename, NULL); /* filename includes the path */ if (opt_delete) { if (verbose) fprintf(stdout, "Deleting the old data from table %s\n", tablename); snprintf(sql_statement, FN_REFLEN*16+256, "DELETE FROM %s", tablename); if (mysql_query(mysql, sql_statement)) { db_error_with_table(mysql, tablename); DBUG_RETURN(1); } } to_unix_path(hard_path); if (verbose) { if (opt_local_file) fprintf(stdout, "Loading data from LOCAL file: %s into %s\n", hard_path, tablename); else fprintf(stdout, "Loading data from SERVER file: %s into %s\n", hard_path, tablename); } mysql_real_escape_string(mysql, escaped_name, hard_path, (unsigned long) strlen(hard_path)); sprintf(sql_statement, "LOAD DATA %s %s INFILE '%s'", opt_low_priority ? "LOW_PRIORITY" : "", opt_local_file ? "LOCAL" : "", escaped_name); end= strend(sql_statement); if (replace) end= my_stpcpy(end, " REPLACE"); if (ignore) end= my_stpcpy(end, " IGNORE"); end= my_stpcpy(end, " INTO TABLE `"); /* Turn any ` into `` in table name. */ for (pos= tablename; *pos; pos++) { if (*pos == '`') *end++= '`'; *end++= *pos; } end= my_stpcpy(end, "`"); if (fields_terminated || enclosed || opt_enclosed || escaped) end= my_stpcpy(end, " FIELDS"); end= add_load_option(end, fields_terminated, " TERMINATED BY"); end= add_load_option(end, enclosed, " ENCLOSED BY"); end= add_load_option(end, opt_enclosed, " OPTIONALLY ENCLOSED BY"); end= add_load_option(end, escaped, " ESCAPED BY"); end= add_load_option(end, lines_terminated, " LINES TERMINATED BY"); if (opt_ignore_lines >= 0) end= my_stpcpy(longlong10_to_str(opt_ignore_lines, my_stpcpy(end, " IGNORE "),10), " LINES"); if (opt_columns) end= my_stpcpy(my_stpcpy(my_stpcpy(end, " ("), opt_columns), ")"); *end= '\0'; if (mysql_query(mysql, sql_statement)) { db_error_with_table(mysql, tablename); DBUG_RETURN(1); } if (!silent) { if (mysql_info(mysql)) /* If NULL-pointer, print nothing */ { fprintf(stdout, "%s.%s: %s\n", current_db, tablename, mysql_info(mysql)); } } DBUG_RETURN(0); } static void lock_table(MYSQL *mysql, int tablecount, char **raw_tablename) { DYNAMIC_STRING query; int i; char tablename[FN_REFLEN]; if (verbose) fprintf(stdout, "Locking tables for write\n"); init_dynamic_string(&query, "LOCK TABLES ", 256, 1024); for (i=0 ; i < tablecount ; i++) { fn_format(tablename, raw_tablename[i], "", "", 1 | 2); dynstr_append(&query, tablename); dynstr_append(&query, " WRITE,"); } if (mysql_real_query(mysql, query.str, query.length-1)) db_error(mysql); /* We shall countinue here, if --force was given */ } static MYSQL *db_connect(char *host, char *database, char *user, char *passwd) { MYSQL *mysql; if (verbose) fprintf(stdout, "Connecting to %s\n", host ? host : "localhost"); if (!(mysql= mysql_init(NULL))) return 0; if (opt_compress) mysql_options(mysql,MYSQL_OPT_COMPRESS,NullS); if (opt_local_file) mysql_options(mysql,MYSQL_OPT_LOCAL_INFILE, (char*) &opt_local_file); #ifdef HAVE_OPENSSL if (opt_use_ssl) { mysql_ssl_set(mysql, opt_ssl_key, opt_ssl_cert, opt_ssl_ca, opt_ssl_capath, opt_ssl_cipher); mysql_options(mysql, MYSQL_OPT_SSL_CRL, opt_ssl_crl); mysql_options(mysql, MYSQL_OPT_SSL_CRLPATH, opt_ssl_crlpath); } mysql_options(mysql,MYSQL_OPT_SSL_VERIFY_SERVER_CERT, (char*)&opt_ssl_verify_server_cert); #endif if (opt_protocol) mysql_options(mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(mysql,MYSQL_OPT_BIND,opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif if (opt_plugin_dir && *opt_plugin_dir) mysql_options(mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(mysql, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset); mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(mysql, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqlimport"); if (!(mysql_real_connect(mysql,host,user,passwd, database,opt_mysql_port,opt_mysql_unix_port, 0))) { ignore_errors=0; /* NO RETURN FROM db_error */ db_error(mysql); } mysql->reconnect= 0; if (verbose) fprintf(stdout, "Selecting database %s\n", database); if (mysql_select_db(mysql, database)) { ignore_errors=0; db_error(mysql); } return mysql; } static void db_disconnect(char *host, MYSQL *mysql) { if (verbose) fprintf(stdout, "Disconnecting from %s\n", host ? host : "localhost"); mysql_close(mysql); } static void safe_exit(int error, MYSQL *mysql) { if (ignore_errors) return; if (mysql) mysql_close(mysql); exit(error); } static void db_error_with_table(MYSQL *mysql, char *table) { my_printf_error(0,"Error: %d, %s, when using table: %s", MYF(0), mysql_errno(mysql), mysql_error(mysql), table); safe_exit(1, mysql); } static void db_error(MYSQL *mysql) { my_printf_error(0,"Error: %d %s", MYF(0), mysql_errno(mysql), mysql_error(mysql)); safe_exit(1, mysql); } static char *add_load_option(char *ptr, const char *object, const char *statement) { if (object) { /* Don't escape hex constants */ if (object[0] == '0' && (object[1] == 'x' || object[1] == 'X')) ptr= strxmov(ptr," ",statement," ",object,NullS); else { /* char constant; escape */ ptr= strxmov(ptr," ",statement," '",NullS); ptr= field_escape(ptr,object,(uint) strlen(object)); *ptr++= '\''; } } return ptr; } /* ** Allow the user to specify field terminator strings like: ** "'", "\", "\\" (escaped backslash), "\t" (tab), "\n" (newline) ** This is done by doubleing ' and add a end -\ if needed to avoid ** syntax errors from the SQL parser. */ static char *field_escape(char *to,const char *from,uint length) { const char *end; uint end_backslashes=0; for (end= from+length; from != end; from++) { *to++= *from; if (*from == '\\') end_backslashes^=1; /* find odd number of backslashes */ else { if (*from == '\'' && !end_backslashes) *to++= *from; /* We want a dublicate of "'" for MySQL */ end_backslashes=0; } } /* Add missing backslashes if user has specified odd number of backs.*/ if (end_backslashes) *to++= '\\'; return to; } int exitcode= 0; #ifdef HAVE_LIBPTHREAD pthread_handler_t worker_thread(void *arg) { int error; char *raw_table_name= (char *)arg; MYSQL *mysql= 0; if (mysql_thread_init()) goto error; if (!(mysql= db_connect(current_host,current_db,current_user,opt_password))) { goto error; } if (mysql_query(mysql, "/*!40101 set @@character_set_database=binary */;")) { db_error(mysql); /* We shall countinue here, if --force was given */ goto error; } /* We are not currently catching the error here. */ if((error= write_to_table(raw_table_name, mysql))) if (exitcode == 0) exitcode= error; error: if (mysql) db_disconnect(current_host, mysql); pthread_mutex_lock(&counter_mutex); counter--; pthread_cond_signal(&count_threshhold); pthread_mutex_unlock(&counter_mutex); mysql_thread_end(); return 0; } #endif int main(int argc, char **argv) { int error=0; char **argv_to_free; MY_INIT(argv[0]); my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,&argc,&argv)) return 1; my_getopt_use_args_separator= FALSE; /* argv is changed in the program */ argv_to_free= argv; if (get_options(&argc, &argv)) { free_defaults(argv_to_free); return(1); } #ifdef HAVE_LIBPTHREAD if (opt_use_threads && !lock_tables) { pthread_t mainthread; /* Thread descriptor */ pthread_attr_t attr; /* Thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_mutex_init(&counter_mutex, NULL); pthread_cond_init(&count_threshhold, NULL); for (counter= 0; *argv != NULL; argv++) /* Loop through tables */ { pthread_mutex_lock(&counter_mutex); while (counter == opt_use_threads) { struct timespec abstime; set_timespec(abstime, 3); pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } /* Before exiting the lock we set ourselves up for the next thread */ counter++; pthread_mutex_unlock(&counter_mutex); /* now create the thread */ if (pthread_create(&mainthread, &attr, worker_thread, (void *)*argv) != 0) { pthread_mutex_lock(&counter_mutex); counter--; pthread_mutex_unlock(&counter_mutex); fprintf(stderr,"%s: Could not create thread\n", my_progname); } } /* We loop until we know that all children have cleaned up. */ pthread_mutex_lock(&counter_mutex); while (counter) { struct timespec abstime; set_timespec(abstime, 3); pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } pthread_mutex_unlock(&counter_mutex); pthread_mutex_destroy(&counter_mutex); pthread_cond_destroy(&count_threshhold); pthread_attr_destroy(&attr); } else #endif { MYSQL *mysql= 0; if (!(mysql= db_connect(current_host,current_db,current_user,opt_password))) { free_defaults(argv_to_free); return(1); /* purecov: deadcode */ } if (mysql_query(mysql, "/*!40101 set @@character_set_database=binary */;")) { db_error(mysql); /* We shall countinue here, if --force was given */ return(1); } if (lock_tables) lock_table(mysql, argc, argv); for (; *argv != NULL; argv++) if ((error= write_to_table(*argv, mysql))) if (exitcode == 0) exitcode= error; db_disconnect(current_host, mysql); } my_free(opt_password); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif free_defaults(argv_to_free); my_end(my_end_arg); return(exitcode); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_1571_7
crossvul-cpp_data_good_5017_0
/* * libndp.c - Neighbour discovery library * Copyright (C) 2013-2015 Jiri Pirko <jiri@resnulli.us> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/socket.h> #include <sys/select.h> #include <netinet/in.h> #include <netinet/icmp6.h> #include <arpa/inet.h> #include <net/ethernet.h> #include <assert.h> #include <ndp.h> #include "ndp_private.h" #include "list.h" /** * SECTION: logging * @short_description: libndp logging facility */ void ndp_log(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, ...) { va_list args; va_start(args, format); ndp->log_fn(ndp, priority, file, line, fn, format, args); va_end(args); } static void log_stderr(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args) { fprintf(stderr, "libndp: %s: ", fn); vfprintf(stderr, format, args); fprintf(stderr, "\n"); } static int log_priority(const char *priority) { char *endptr; int prio; prio = strtol(priority, &endptr, 10); if (endptr[0] == '\0' || isspace(endptr[0])) return prio; if (strncmp(priority, "err", 3) == 0) return LOG_ERR; if (strncmp(priority, "info", 4) == 0) return LOG_INFO; if (strncmp(priority, "debug", 5) == 0) return LOG_DEBUG; return 0; } /** * ndp_set_log_fn: * @ndp: libndp library context * @log_fn: function to be called for logging messages * * The built-in logging writes to stderr. It can be * overridden by a custom function, to plug log messages * into the user's logging functionality. **/ NDP_EXPORT void ndp_set_log_fn(struct ndp *ndp, void (*log_fn)(struct ndp *ndp, int priority, const char *file, int line, const char *fn, const char *format, va_list args)) { ndp->log_fn = log_fn; dbg(ndp, "Custom logging function %p registered.", log_fn); } /** * ndp_get_log_priority: * @ndp: libndp library context * * Returns: the current logging priority. **/ NDP_EXPORT int ndp_get_log_priority(struct ndp *ndp) { return ndp->log_priority; } /** * ndp_set_log_priority: * @ndp: libndp library context * @priority: the new logging priority * * Set the current logging priority. The value controls which messages * are logged. **/ NDP_EXPORT void ndp_set_log_priority(struct ndp *ndp, int priority) { ndp->log_priority = priority; } /** * SECTION: helpers * @short_description: various internal helper functions */ #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #define BUG_ON(expr) { if (expr) assert(0); } static void *myzalloc(size_t size) { return calloc(1, size); } static int myrecvfrom6(int sockfd, void *buf, size_t *buflen, int flags, struct in6_addr *addr, uint32_t *ifindex, int *hoplimit) { struct sockaddr_in6 sin6; unsigned char cbuf[2 * CMSG_SPACE(sizeof(struct in6_pktinfo))]; struct iovec iovec; struct msghdr msghdr; struct cmsghdr *cmsghdr; ssize_t len; iovec.iov_len = *buflen; iovec.iov_base = buf; memset(&msghdr, 0, sizeof(msghdr)); msghdr.msg_name = &sin6; msghdr.msg_namelen = sizeof(sin6); msghdr.msg_iov = &iovec; msghdr.msg_iovlen = 1; msghdr.msg_control = cbuf; msghdr.msg_controllen = sizeof(cbuf); len = recvmsg(sockfd, &msghdr, flags); if (len == -1) return -errno; *buflen = len; /* Set ifindex to scope_id now. But since scope_id gets not * set by kernel for linklocal addresses, use pktinfo to obtain that * value right after. */ *ifindex = sin6.sin6_scope_id; for (cmsghdr = CMSG_FIRSTHDR(&msghdr); cmsghdr; cmsghdr = CMSG_NXTHDR(&msghdr, cmsghdr)) { if (cmsghdr->cmsg_level != IPPROTO_IPV6) continue; switch(cmsghdr->cmsg_type) { case IPV6_PKTINFO: if (cmsghdr->cmsg_len == CMSG_LEN(sizeof(struct in6_pktinfo))) { struct in6_pktinfo *pktinfo; pktinfo = (struct in6_pktinfo *) CMSG_DATA(cmsghdr); *ifindex = pktinfo->ipi6_ifindex; } break; case IPV6_HOPLIMIT: if (cmsghdr->cmsg_len == CMSG_LEN(sizeof(int))) { int *val; val = (int *) CMSG_DATA(cmsghdr); *hoplimit = *val; } break; } } *addr = sin6.sin6_addr; return 0; } static int mysendto6(int sockfd, void *buf, size_t buflen, int flags, struct in6_addr *addr, uint32_t ifindex) { struct sockaddr_in6 sin6; ssize_t ret; memset(&sin6, 0, sizeof(sin6)); memcpy(&sin6.sin6_addr, addr, sizeof(sin6.sin6_addr)); sin6.sin6_scope_id = ifindex; resend: ret = sendto(sockfd, buf, buflen, flags, &sin6, sizeof(sin6)); if (ret == -1) { switch(errno) { case EINTR: goto resend; default: return -errno; } } return 0; } static const char *str_in6_addr(struct in6_addr *addr) { static char buf[INET6_ADDRSTRLEN]; return inet_ntop(AF_INET6, addr, buf, sizeof(buf)); } /** * SECTION: NDP implementation * @short_description: functions that actually implements NDP */ static int ndp_sock_open(struct ndp *ndp) { int sock; //struct icmp6_filter flt; int ret; int err; int val; sock = socket(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6); if (sock == -1) { err(ndp, "Failed to create ICMP6 socket."); return -errno; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVPKTINFO, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVPKTINFO."); err = -errno; goto close_sock; } val = 255; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_MULTICAST_HOPS, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_MULTICAST_HOPS."); err = -errno; goto close_sock; } val = 1; ret = setsockopt(sock, IPPROTO_IPV6, IPV6_RECVHOPLIMIT, &val, sizeof(val)); if (ret == -1) { err(ndp, "Failed to setsockopt IPV6_RECVHOPLIMIT,."); err = -errno; goto close_sock; } ndp->sock = sock; return 0; close_sock: close(sock); return err; } static void ndp_sock_close(struct ndp *ndp) { close(ndp->sock); } struct ndp_msggeneric { void *dataptr; /* must be first */ }; struct ndp_msgrs { struct nd_router_solicit *rs; /* must be first */ }; struct ndp_msgra { struct nd_router_advert *ra; /* must be first */ }; struct ndp_msgns { struct nd_neighbor_solicit *ns; /* must be first */ }; struct ndp_msgna { struct nd_neighbor_advert *na; /* must be first */ }; struct ndp_msgr { struct nd_redirect *r; /* must be first */ }; struct ndp_msg { #define NDP_MSG_BUFLEN 1500 unsigned char buf[NDP_MSG_BUFLEN]; size_t len; struct in6_addr addrto; uint32_t ifindex; int hoplimit; struct icmp6_hdr * icmp6_hdr; unsigned char * opts_start; /* pointer to buf at the place where opts start */ union { struct ndp_msggeneric generic; struct ndp_msgrs rs; struct ndp_msgra ra; struct ndp_msgns ns; struct ndp_msgna na; struct ndp_msgr r; } nd_msg; }; struct ndp_msg_type_info { #define NDP_STRABBR_SIZE 4 char strabbr[NDP_STRABBR_SIZE]; uint8_t raw_type; size_t raw_struct_size; void (*addrto_adjust)(struct in6_addr *addr); }; static void ndp_msg_addrto_adjust_all_nodes(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x1); } static void ndp_msg_addrto_adjust_all_routers(struct in6_addr *addr) { struct in6_addr any = IN6ADDR_ANY_INIT; if (memcmp(addr, &any, sizeof(any))) return; addr->s6_addr32[0] = htonl(0xFF020000); addr->s6_addr32[1] = 0; addr->s6_addr32[2] = 0; addr->s6_addr32[3] = htonl(0x2); } static struct ndp_msg_type_info ndp_msg_type_info_list[] = { [NDP_MSG_RS] = { .strabbr = "RS", .raw_type = ND_ROUTER_SOLICIT, .raw_struct_size = sizeof(struct nd_router_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_routers, }, [NDP_MSG_RA] = { .strabbr = "RA", .raw_type = ND_ROUTER_ADVERT, .raw_struct_size = sizeof(struct nd_router_advert), }, [NDP_MSG_NS] = { .strabbr = "NS", .raw_type = ND_NEIGHBOR_SOLICIT, .raw_struct_size = sizeof(struct nd_neighbor_solicit), .addrto_adjust = ndp_msg_addrto_adjust_all_nodes, }, [NDP_MSG_NA] = { .strabbr = "NA", .raw_type = ND_NEIGHBOR_ADVERT, .raw_struct_size = sizeof(struct nd_neighbor_advert), }, [NDP_MSG_R] = { .strabbr = "R", .raw_type = ND_REDIRECT, .raw_struct_size = sizeof(struct nd_redirect), }, }; #define NDP_MSG_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_type_info_list) struct ndp_msg_type_info *ndp_msg_type_info(enum ndp_msg_type msg_type) { return &ndp_msg_type_info_list[msg_type]; } static int ndp_msg_type_by_raw_type(enum ndp_msg_type *p_msg_type, uint8_t raw_type) { int i; for (i = 0; i < NDP_MSG_TYPE_LIST_SIZE; i++) { if (ndp_msg_type_info(i)->raw_type == raw_type) { *p_msg_type = i; return 0; } } return -ENOENT; } static bool ndp_msg_check_valid(struct ndp_msg *msg) { size_t len = ndp_msg_payload_len(msg); enum ndp_msg_type msg_type = ndp_msg_type(msg); if (len < ndp_msg_type_info(msg_type)->raw_struct_size) return false; return true; } static struct ndp_msg *ndp_msg_alloc(void) { struct ndp_msg *msg; msg = myzalloc(sizeof(*msg)); if (!msg) return NULL; msg->icmp6_hdr = (struct icmp6_hdr *) msg->buf; return msg; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type); static void ndp_msg_init(struct ndp_msg *msg, enum ndp_msg_type msg_type) { size_t raw_struct_size = ndp_msg_type_info(msg_type)->raw_struct_size; ndp_msg_type_set(msg, msg_type); msg->len = raw_struct_size; msg->opts_start = msg->buf + raw_struct_size; /* Set-up "first pointers" in all ndp_msgrs, ndp_msgra, ndp_msgns, * ndp_msgna, ndp_msgr structures. */ msg->nd_msg.generic.dataptr = ndp_msg_payload(msg); } /** * ndp_msg_new: * @p_msg: pointer where new message structure address will be stored * @msg_type: message type * * Allocate new message structure of a specified type and initialize it. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_new(struct ndp_msg **p_msg, enum ndp_msg_type msg_type) { struct ndp_msg *msg; if (msg_type == NDP_MSG_ALL) return -EINVAL; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; ndp_msg_init(msg, msg_type); *p_msg = msg; return 0; } /** * ndp_msg_destroy: * * Destroy message structure. **/ NDP_EXPORT void ndp_msg_destroy(struct ndp_msg *msg) { free(msg); } /** * ndp_msg_payload: * @msg: message structure * * Get raw Neighbour discovery packet data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload(struct ndp_msg *msg) { return msg->buf; } /** * ndp_msg_payload_maxlen: * @msg: message structure * * Get raw Neighbour discovery packet data maximum length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_maxlen(struct ndp_msg *msg) { return sizeof(msg->buf); } /** * ndp_msg_payload_len: * @msg: message structure * * Get raw Neighbour discovery packet data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_len(struct ndp_msg *msg) { return msg->len; } /** * ndp_msg_payload_len_set: * @msg: message structure * * Set raw Neighbour discovery packet data length. **/ NDP_EXPORT void ndp_msg_payload_len_set(struct ndp_msg *msg, size_t len) { if (len > sizeof(msg->buf)) len = sizeof(msg->buf); msg->len = len; } /** * ndp_msg_payload_opts: * @msg: message structure * * Get raw Neighbour discovery packet options part data. * * Returns: pointer to raw data. **/ NDP_EXPORT void *ndp_msg_payload_opts(struct ndp_msg *msg) { return msg->opts_start; } static void *ndp_msg_payload_opts_offset(struct ndp_msg *msg, int offset) { unsigned char *ptr = ndp_msg_payload_opts(msg); return ptr + offset; } /** * ndp_msg_payload_opts_len: * @msg: message structure * * Get raw Neighbour discovery packet options part data length. * * Returns: length in bytes. **/ NDP_EXPORT size_t ndp_msg_payload_opts_len(struct ndp_msg *msg) { return msg->len - (msg->opts_start - msg->buf); } /** * ndp_msgrs: * @msg: message structure * * Get RS message structure by passed @msg. * * Returns: RS message structure or NULL in case the message is not of type RS. **/ NDP_EXPORT struct ndp_msgrs *ndp_msgrs(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RS) return NULL; return &msg->nd_msg.rs; } /** * ndp_msgra: * @msg: message structure * * Get RA message structure by passed @msg. * * Returns: RA message structure or NULL in case the message is not of type RA. **/ NDP_EXPORT struct ndp_msgra *ndp_msgra(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_RA) return NULL; return &msg->nd_msg.ra; } /** * ndp_msgns: * @msg: message structure * * Get NS message structure by passed @msg. * * Returns: NS message structure or NULL in case the message is not of type NS. **/ NDP_EXPORT struct ndp_msgns *ndp_msgns(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NS) return NULL; return &msg->nd_msg.ns; } /** * ndp_msgna: * @msg: message structure * * Get NA message structure by passed @msg. * * Returns: NA message structure or NULL in case the message is not of type NA. **/ NDP_EXPORT struct ndp_msgna *ndp_msgna(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_NA) return NULL; return &msg->nd_msg.na; } /** * ndp_msgr: * @msg: message structure * * Get R message structure by passed @msg. * * Returns: R message structure or NULL in case the message is not of type R. **/ NDP_EXPORT struct ndp_msgr *ndp_msgr(struct ndp_msg *msg) { if (ndp_msg_type(msg) != NDP_MSG_R) return NULL; return &msg->nd_msg.r; } /** * ndp_msg_type: * @msg: message structure * * Get type of message. * * Returns: Message type **/ NDP_EXPORT enum ndp_msg_type ndp_msg_type(struct ndp_msg *msg) { enum ndp_msg_type msg_type; int err; err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); /* Type should be always set correctly (ensured by ndp_msg_init) */ BUG_ON(err); return msg_type; } static void ndp_msg_type_set(struct ndp_msg *msg, enum ndp_msg_type msg_type) { msg->icmp6_hdr->icmp6_type = ndp_msg_type_info(msg_type)->raw_type; } /** * ndp_msg_addrto: * @msg: message structure * * Get "to address" of message. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_addrto(struct ndp_msg *msg) { return &msg->addrto; } /** * ndp_msg_ifindex: * @msg: message structure * * Get interface index of message. * * Returns: Interface index **/ NDP_EXPORT uint32_t ndp_msg_ifindex(struct ndp_msg *msg) { return msg->ifindex; } /** * ndp_msg_ifindex_set: * @msg: message structure * * Set raw interface index of message. **/ NDP_EXPORT void ndp_msg_ifindex_set(struct ndp_msg *msg, uint32_t ifindex) { msg->ifindex = ifindex; } /** * ndp_msg_send: * @ndp: libndp library context * @msg: message structure * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send(struct ndp *ndp, struct ndp_msg *msg) { return ndp_msg_send_with_flags(ndp, msg, ND_OPT_NORMAL); } /** * ndp_msg_send_with_flags: * @ndp: libndp library context * @msg: message structure * @flags: option flags within message type * * Send message. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msg_send_with_flags(struct ndp *ndp, struct ndp_msg *msg, uint8_t flags) { enum ndp_msg_type msg_type = ndp_msg_type(msg); if (ndp_msg_type_info(msg_type)->addrto_adjust) ndp_msg_type_info(msg_type)->addrto_adjust(&msg->addrto); switch (msg_type) { case NDP_MSG_NA: if (flags & ND_OPT_NA_UNSOL) { ndp_msgna_flag_override_set((struct ndp_msgna*)&msg->nd_msg, true); ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, false); ndp_msg_addrto_adjust_all_nodes(&msg->addrto); } else { ndp_msgna_flag_solicited_set((struct ndp_msgna*)&msg->nd_msg, true); } break; default: break; } return mysendto6(ndp->sock, msg->buf, msg->len, 0, &msg->addrto, msg->ifindex); } /** * SECTION: msgra getters/setters * @short_description: Getters and setters for RA message */ /** * ndp_msgra_curhoplimit: * @msgra: RA message structure * * Get RA curhoplimit. * * Returns: curhoplimit. **/ NDP_EXPORT uint8_t ndp_msgra_curhoplimit(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_curhoplimit; } /** * ndp_msgra_curhoplimit_set: * @msgra: RA message structure * * Set RA curhoplimit. **/ NDP_EXPORT void ndp_msgra_curhoplimit_set(struct ndp_msgra *msgra, uint8_t curhoplimit) { msgra->ra->nd_ra_curhoplimit = curhoplimit; } /** * ndp_msgra_flag_managed: * @msgra: RA message structure * * Get RA managed flag. * * Returns: managed flag. **/ NDP_EXPORT bool ndp_msgra_flag_managed(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_managed_set: * @msgra: RA message structure * * Set RA managed flag. **/ NDP_EXPORT void ndp_msgra_flag_managed_set(struct ndp_msgra *msgra, bool flag_managed) { if (flag_managed) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_MANAGED; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_MANAGED; } /** * ndp_msgra_flag_other: * @msgra: RA message structure * * Get RA other flag. * * Returns: other flag. **/ NDP_EXPORT bool ndp_msgra_flag_other(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_other_set: * @msgra: RA message structure * * Set RA other flag. **/ NDP_EXPORT void ndp_msgra_flag_other_set(struct ndp_msgra *msgra, bool flag_other) { if (flag_other) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_OTHER; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_OTHER; } /** * ndp_msgra_flag_home_agent: * @msgra: RA message structure * * Get RA home_agent flag. * * Returns: home_agent flag. **/ NDP_EXPORT bool ndp_msgra_flag_home_agent(struct ndp_msgra *msgra) { return msgra->ra->nd_ra_flags_reserved & ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_flag_home_agent_set: * @msgra: RA message structure * * Set RA home_agent flag. **/ NDP_EXPORT void ndp_msgra_flag_home_agent_set(struct ndp_msgra *msgra, bool flag_home_agent) { if (flag_home_agent) msgra->ra->nd_ra_flags_reserved |= ND_RA_FLAG_HOME_AGENT; else msgra->ra->nd_ra_flags_reserved &= ~ND_RA_FLAG_HOME_AGENT; } /** * ndp_msgra_route_preference: * @msgra: RA message structure * * Get route preference. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msgra_route_preference(struct ndp_msgra *msgra) { uint8_t prf = (msgra->ra->nd_ra_flags_reserved >> 3) & 3; /* rfc4191 says: * If the Router Lifetime is zero, the preference value MUST be set to * (00) by the sender and MUST be ignored by the receiver. * If the Reserved (10) value is received, the receiver MUST treat the * value as if it were (00). */ if (prf == 2 || !ndp_msgra_router_lifetime(msgra)) prf = 0; return prf; } /** * ndp_msgra_route_preference_set: * @msgra: RA message structure * @pref: preference * * Set route preference. **/ NDP_EXPORT void ndp_msgra_route_preference_set(struct ndp_msgra *msgra, enum ndp_route_preference pref) { msgra->ra->nd_ra_flags_reserved &= ~(3 << 3); msgra->ra->nd_ra_flags_reserved |= (pref << 3); } /** * ndp_msgra_router_lifetime: * @msgra: RA message structure * * Get RA router lifetime. * * Returns: router lifetime in seconds. **/ NDP_EXPORT uint16_t ndp_msgra_router_lifetime(struct ndp_msgra *msgra) { return ntohs(msgra->ra->nd_ra_router_lifetime); } /** * ndp_msgra_router_lifetime_set: * @msgra: RA message structure * * Set RA router lifetime. **/ NDP_EXPORT void ndp_msgra_router_lifetime_set(struct ndp_msgra *msgra, uint16_t router_lifetime) { msgra->ra->nd_ra_router_lifetime = htons(router_lifetime); } /** * ndp_msgra_reachable_time: * @msgra: RA message structure * * Get RA reachable time. * * Returns: reachable time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_reachable_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_reachable); } /** * ndp_msgra_reachable_time_set: * @msgra: RA message structure * * Set RA reachable time. **/ NDP_EXPORT void ndp_msgra_reachable_time_set(struct ndp_msgra *msgra, uint32_t reachable_time) { msgra->ra->nd_ra_reachable = htonl(reachable_time); } /** * ndp_msgra_retransmit_time: * @msgra: RA message structure * * Get RA retransmit time. * * Returns: retransmit time in milliseconds. **/ NDP_EXPORT uint32_t ndp_msgra_retransmit_time(struct ndp_msgra *msgra) { return ntohl(msgra->ra->nd_ra_retransmit); } /** * ndp_msgra_retransmit_time_set: * @msgra: RA message structure * * Set RA retransmit time. **/ NDP_EXPORT void ndp_msgra_retransmit_time_set(struct ndp_msgra *msgra, uint32_t retransmit_time) { msgra->ra->nd_ra_retransmit = htonl(retransmit_time); } /** * SECTION: msgna getters/setters * @short_description: Getters and setters for NA message */ /** * ndp_msgna_flag_router: * @msgna: NA message structure * * Get NA router flag. * * Returns: router flag. **/ NDP_EXPORT bool ndp_msgna_flag_router(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_router_set: * @msgna: NA message structure * * Set NA router flag. **/ NDP_EXPORT void ndp_msgna_flag_router_set(struct ndp_msgna *msgna, bool flag_router) { if (flag_router) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_ROUTER; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_ROUTER; } /** * ndp_msgna_flag_solicited: * @msgna: NA message structure * * Get NA solicited flag. * * Returns: solicited flag. **/ NDP_EXPORT bool ndp_msgna_flag_solicited(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_solicited_set: * @msgna: NA message structure * * Set NA managed flag. **/ NDP_EXPORT void ndp_msgna_flag_solicited_set(struct ndp_msgna *msgna, bool flag_solicited) { if (flag_solicited) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_SOLICITED; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_SOLICITED; } /** * ndp_msgna_flag_override: * @msgna: NA message structure * * Get NA override flag. * * Returns: override flag. **/ NDP_EXPORT bool ndp_msgna_flag_override(struct ndp_msgna *msgna) { return msgna->na->nd_na_flags_reserved & ND_NA_FLAG_OVERRIDE; } /** * ndp_msgna_flag_override_set: * @msgra: NA message structure * * Set NA override flag. */ NDP_EXPORT void ndp_msgna_flag_override_set(struct ndp_msgna *msgna, bool flag_override) { if (flag_override) msgna->na->nd_na_flags_reserved |= ND_NA_FLAG_OVERRIDE; else msgna->na->nd_na_flags_reserved &= ~ND_NA_FLAG_OVERRIDE; } /** * SECTION: msg_opt infrastructure * @short_description: Infrastructure for options */ struct ndp_msg_opt_type_info { uint8_t raw_type; size_t raw_struct_size; bool (*check_valid)(void *opt_data); }; static bool ndp_msg_opt_route_check_valid(void *opt_data) { struct __nd_opt_route_info *ri = opt_data; /* rfc4191 says: * If the Reserved (10) value is received, the Route Information Option * MUST be ignored. */ if (((ri->nd_opt_ri_prf_reserved >> 3) & 3) == 2) return false; return true; } static struct ndp_msg_opt_type_info ndp_msg_opt_type_info_list[] = { [NDP_MSG_OPT_SLLADDR] = { .raw_type = ND_OPT_SOURCE_LINKADDR, }, [NDP_MSG_OPT_TLLADDR] = { .raw_type = ND_OPT_TARGET_LINKADDR, }, [NDP_MSG_OPT_PREFIX] = { .raw_type = ND_OPT_PREFIX_INFORMATION, .raw_struct_size = sizeof(struct nd_opt_prefix_info), }, [NDP_MSG_OPT_REDIR] = { .raw_type = ND_OPT_REDIRECTED_HEADER, }, [NDP_MSG_OPT_MTU] = { .raw_type = ND_OPT_MTU, .raw_struct_size = sizeof(struct nd_opt_mtu), }, [NDP_MSG_OPT_ROUTE] = { .raw_type = __ND_OPT_ROUTE_INFO, .raw_struct_size = sizeof(struct __nd_opt_route_info), .check_valid = ndp_msg_opt_route_check_valid, }, [NDP_MSG_OPT_RDNSS] = { .raw_type = __ND_OPT_RDNSS, .raw_struct_size = sizeof(struct __nd_opt_rdnss), }, [NDP_MSG_OPT_DNSSL] = { .raw_type = __ND_OPT_DNSSL, .raw_struct_size = sizeof(struct __nd_opt_dnssl), }, }; #define NDP_MSG_OPT_TYPE_LIST_SIZE ARRAY_SIZE(ndp_msg_opt_type_info_list) struct ndp_msg_opt_type_info *ndp_msg_opt_type_info(enum ndp_msg_opt_type msg_opt_type) { return &ndp_msg_opt_type_info_list[msg_opt_type]; } struct ndp_msg_opt_type_info *ndp_msg_opt_type_info_by_raw_type(uint8_t raw_type) { struct ndp_msg_opt_type_info *info; int i; for (i = 0; i < NDP_MSG_OPT_TYPE_LIST_SIZE; i++) { info = &ndp_msg_opt_type_info_list[i]; if (info->raw_type == raw_type) return info; } return NULL; } /** * ndp_msg_next_opt_offset: * @msg: message structure * @offset: option payload offset * @opt_type: option type * * Find next offset of option of given type. If offset is -1, start from * beginning, otherwise start from the given offset. * This funstion is internally used by ndp_msg_opt_for_each_offset() macro. * * Returns: offset in opt payload of found opt of -1 in case it was not found. **/ NDP_EXPORT int ndp_msg_next_opt_offset(struct ndp_msg *msg, int offset, enum ndp_msg_opt_type opt_type) { unsigned char *opts_start = ndp_msg_payload_opts(msg); unsigned char *ptr = opts_start; size_t len = ndp_msg_payload_opts_len(msg); uint8_t opt_raw_type = ndp_msg_opt_type_info(opt_type)->raw_type; bool ignore = true; if (offset == -1) { offset = 0; ignore = false; } ptr += offset; len -= offset; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len || len < cur_opt_len) break; if (cur_opt_raw_type == opt_raw_type && !ignore) return ptr - opts_start; ptr += cur_opt_len; len -= cur_opt_len; ignore = false; } return -1; } #define __INVALID_OPT_TYPE_MAGIC 0xff /* * Check for validity of options and mark by magic opt type in case it is not * so ndp_msg_next_opt_offset() will ignore it. */ static bool ndp_msg_check_opts(struct ndp_msg *msg) { unsigned char *ptr = ndp_msg_payload_opts(msg); size_t len = ndp_msg_payload_opts_len(msg); struct ndp_msg_opt_type_info *info; while (len > 0) { uint8_t cur_opt_raw_type = ptr[0]; unsigned int cur_opt_len = ptr[1] << 3; /* convert to bytes */ if (!cur_opt_len) return false; if (len < cur_opt_len) break; info = ndp_msg_opt_type_info_by_raw_type(cur_opt_raw_type); if (info) { if (cur_opt_len < info->raw_struct_size || (info->check_valid && !info->check_valid(ptr))) ptr[0] = __INVALID_OPT_TYPE_MAGIC; } ptr += cur_opt_len; len -= cur_opt_len; } return true; } /** * SECTION: msg_opt getters/setters * @short_description: Getters and setters for options */ /** * ndp_msg_opt_slladdr: * @msg: message structure * @offset: in-message offset * * Get source linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to source linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_slladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_slladdr_len: * @msg: message structure * @offset: in-message offset * * Get source linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: source linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_slladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_tlladdr: * @msg: message structure * @offset: in-message offset * * Get target linkaddr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to target linkaddr. **/ NDP_EXPORT unsigned char *ndp_msg_opt_tlladdr(struct ndp_msg *msg, int offset) { unsigned char *opt_data = ndp_msg_payload_opts_offset(msg, offset); return &opt_data[2]; } /** * ndp_msg_opt_tlladdr_len: * @msg: message structure * @offset: in-message offset * * Get target linkaddr length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: target linkaddr length. **/ NDP_EXPORT size_t ndp_msg_opt_tlladdr_len(struct ndp_msg *msg, int offset) { return ETH_ALEN; } /** * ndp_msg_opt_prefix: * @msg: message structure * @offset: in-message offset * * Get prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: pointer to address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_prefix(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return &pi->nd_opt_pi_prefix; } /** * ndp_msg_opt_prefix_len: * @msg: message structure * @offset: in-message offset * * Get prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_prefix_len(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_prefix_len; } /** * ndp_msg_opt_prefix_valid_time: * @msg: message structure * @offset: in-message offset * * Get prefix valid time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: valid time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_valid_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_valid_time); } /** * ndp_msg_opt_prefix_preferred_time: * @msg: message structure * @offset: in-message offset * * Get prefix preferred time. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: preferred time in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_prefix_preferred_time(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return ntohl(pi->nd_opt_pi_preferred_time); } /** * ndp_msg_opt_prefix_flag_on_link: * @msg: message structure * @offset: in-message offset * * Get on-link flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: on-link flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_on_link(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_ONLINK; } /** * ndp_msg_opt_prefix_flag_auto_addr_conf: * @msg: message structure * @offset: in-message offset * * Get autonomous address-configuration flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: autonomous address-configuration flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_auto_addr_conf(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_AUTO; } /** * ndp_msg_opt_prefix_flag_router_addr: * @msg: message structure * @offset: in-message offset * * Get router address flag. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: router address flag. **/ NDP_EXPORT bool ndp_msg_opt_prefix_flag_router_addr(struct ndp_msg *msg, int offset) { struct nd_opt_prefix_info *pi = ndp_msg_payload_opts_offset(msg, offset); return pi->nd_opt_pi_flags_reserved & ND_OPT_PI_FLAG_RADDR; } /** * ndp_msg_opt_mtu: * @msg: message structure * @offset: in-message offset * * Get MTU. User should check if mtu option is present before calling this. * * Returns: MTU. **/ NDP_EXPORT uint32_t ndp_msg_opt_mtu(struct ndp_msg *msg, int offset) { struct nd_opt_mtu *mtu = ndp_msg_payload_opts_offset(msg, offset); return ntohl(mtu->nd_opt_mtu_mtu); } /** * ndp_msg_opt_route_prefix: * @msg: message structure * @offset: in-message offset * * Get route prefix addr. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_route_prefix(struct ndp_msg *msg, int offset) { static struct in6_addr prefix; struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); memset(&prefix, 0, sizeof(prefix)); memcpy(&prefix, &ri->nd_opt_ri_prefix, (ri->nd_opt_ri_len - 1) << 3); return &prefix; } /** * ndp_msg_opt_route_prefix_len: * @msg: message structure * @offset: in-message offset * * Get route prefix length. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: length of route prefix. **/ NDP_EXPORT uint8_t ndp_msg_opt_route_prefix_len(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ri->nd_opt_ri_prefix_len; } /** * ndp_msg_opt_route_lifetime: * @msg: message structure * @offset: in-message offset * * Get route lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_route_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return ntohl(ri->nd_opt_ri_lifetime); } /** * ndp_msg_opt_route_preference: * @msg: message structure * @offset: in-message offset * * Get route preference. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route preference. **/ NDP_EXPORT enum ndp_route_preference ndp_msg_opt_route_preference(struct ndp_msg *msg, int offset) { struct __nd_opt_route_info *ri = ndp_msg_payload_opts_offset(msg, offset); return (ri->nd_opt_ri_prf_reserved >> 3) & 3; } /** * ndp_msg_opt_rdnss_lifetime: * @msg: message structure * @offset: in-message offset * * Get Recursive DNS Server lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_rdnss_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); return ntohl(rdnss->nd_opt_rdnss_lifetime); } /** * ndp_msg_opt_rdnss_addr: * @msg: message structure * @offset: in-message offset * @addr_index: address index * * Get Recursive DNS Server address. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT struct in6_addr *ndp_msg_opt_rdnss_addr(struct ndp_msg *msg, int offset, int addr_index) { static struct in6_addr addr; struct __nd_opt_rdnss *rdnss = ndp_msg_payload_opts_offset(msg, offset); size_t len = rdnss->nd_opt_rdnss_len << 3; /* convert to bytes */ len -= in_struct_offset(struct __nd_opt_rdnss, nd_opt_rdnss_addresses); if ((addr_index + 1) * sizeof(addr) > len) return NULL; memcpy(&addr, &rdnss->nd_opt_rdnss_addresses[addr_index * sizeof(addr)], sizeof(addr)); return &addr; } /** * ndp_msg_opt_dnssl_lifetime: * @msg: message structure * @offset: in-message offset * * Get DNS Search List lifetime. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: route lifetime in seconds, (uint32_t) -1 means infinity. **/ NDP_EXPORT uint32_t ndp_msg_opt_dnssl_lifetime(struct ndp_msg *msg, int offset) { struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); return ntohl(dnssl->nd_opt_dnssl_lifetime); } /** * ndp_msg_opt_dnssl_domain: * @msg: message structure * @offset: in-message offset * @domain_index: domain index * * Get DNS Search List domain. * User should use this function only inside ndp_msg_opt_for_each_offset() * macro loop. * * Returns: address. **/ NDP_EXPORT char *ndp_msg_opt_dnssl_domain(struct ndp_msg *msg, int offset, int domain_index) { int i; static char buf[256]; struct __nd_opt_dnssl *dnssl = ndp_msg_payload_opts_offset(msg, offset); size_t len = dnssl->nd_opt_dnssl_len << 3; /* convert to bytes */ char *ptr; len -= in_struct_offset(struct __nd_opt_dnssl, nd_opt_dnssl_domains); ptr = dnssl->nd_opt_dnssl_domains; i = 0; while (len > 0) { size_t buf_len = 0; while (len > 0) { uint8_t dom_len = *ptr; ptr++; len--; if (!dom_len) break; if (dom_len > len) return NULL; if (buf_len + dom_len + 1 > sizeof(buf)) return NULL; memcpy(buf + buf_len, ptr, dom_len); buf[buf_len + dom_len] = '.'; ptr += dom_len; len -= dom_len; buf_len += dom_len + 1; } if (!buf_len) break; buf[buf_len - 1] = '\0'; /* overwrite final '.' */ if (i++ == domain_index) return buf; } return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg); static int ndp_sock_recv(struct ndp *ndp) { struct ndp_msg *msg; enum ndp_msg_type msg_type; size_t len; int err; msg = ndp_msg_alloc(); if (!msg) return -ENOMEM; len = ndp_msg_payload_maxlen(msg); err = myrecvfrom6(ndp->sock, msg->buf, &len, 0, &msg->addrto, &msg->ifindex, &msg->hoplimit); if (err) { err(ndp, "Failed to receive message"); goto free_msg; } dbg(ndp, "rcvd from: %s, ifindex: %u, hoplimit: %d", str_in6_addr(&msg->addrto), msg->ifindex, msg->hoplimit); if (msg->hoplimit != 255) { warn(ndp, "ignoring packet with bad hop limit (%d)", msg->hoplimit); err = 0; goto free_msg; } if (len < sizeof(*msg->icmp6_hdr)) { warn(ndp, "rcvd icmp6 packet too short (%luB)", len); err = 0; goto free_msg; } err = ndp_msg_type_by_raw_type(&msg_type, msg->icmp6_hdr->icmp6_type); if (err) { err = 0; goto free_msg; } ndp_msg_init(msg, msg_type); ndp_msg_payload_len_set(msg, len); if (!ndp_msg_check_valid(msg)) { warn(ndp, "rcvd invalid ND message"); err = 0; goto free_msg; } dbg(ndp, "rcvd %s, len: %zuB", ndp_msg_type_info(msg_type)->strabbr, len); if (!ndp_msg_check_opts(msg)) { err = 0; goto free_msg; } err = ndp_call_handlers(ndp, msg);; free_msg: ndp_msg_destroy(msg); return err; } /** * SECTION: msgrcv handler * @short_description: msgrcv handler and related stuff */ struct ndp_msgrcv_handler_item { struct list_item list; ndp_msgrcv_handler_func_t func; enum ndp_msg_type msg_type; uint32_t ifindex; void * priv; }; static struct ndp_msgrcv_handler_item * ndp_find_msgrcv_handler_item(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) if (handler_item->func == func && handler_item->msg_type == msg_type && handler_item->ifindex == ifindex && handler_item->priv == priv) return handler_item; return NULL; } static int ndp_call_handlers(struct ndp *ndp, struct ndp_msg *msg) { struct ndp_msgrcv_handler_item *handler_item; int err; list_for_each_node_entry(handler_item, &ndp->msgrcv_handler_list, list) { if (handler_item->msg_type != NDP_MSG_ALL && handler_item->msg_type != ndp_msg_type(msg)) continue; if (handler_item->ifindex && handler_item->ifindex != msg->ifindex) continue; err = handler_item->func(ndp, msg, handler_item->priv); if (err) return err; } return 0; } /** * ndp_msgrcv_handler_register: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Registers custom @func handler which is going to be called when * specified @msg_type is received. If one wants the function to be * called for all message types, pass NDP_MSG_ALL, * Note that @ifindex can be set to filter only messages received on * specified interface. For @func to be called for messages received on * all interfaces, just set 0. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_msgrcv_handler_register(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; if (ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv)) return -EEXIST; if (!func) return -EINVAL; handler_item = malloc(sizeof(*handler_item)); if (!handler_item) return -ENOMEM; handler_item->func = func; handler_item->msg_type = msg_type; handler_item->ifindex = ifindex; handler_item->priv = priv; list_add_tail(&ndp->msgrcv_handler_list, &handler_item->list); return 0; } /** * ndp_msgrcv_handler_unregister: * @ndp: libndp library context * @func: handler function for received messages * @msg_type: message type to match * @ifindex: interface index to match * @priv: func private data * * Unregisters custom @func handler. * **/ NDP_EXPORT void ndp_msgrcv_handler_unregister(struct ndp *ndp, ndp_msgrcv_handler_func_t func, enum ndp_msg_type msg_type, uint32_t ifindex, void *priv) { struct ndp_msgrcv_handler_item *handler_item; handler_item = ndp_find_msgrcv_handler_item(ndp, func, msg_type, ifindex, priv); if (!handler_item) return; list_del(&handler_item->list); free(handler_item); } /** * SECTION: event fd * @short_description: event filedescriptor related stuff */ /** * ndp_get_eventfd: * @ndp: libndp library context * * Get eventfd filedesctiptor. * * Returns: fd. **/ NDP_EXPORT int ndp_get_eventfd(struct ndp *ndp) { return ndp->sock; } /** * ndp_call_eventfd_handler: * @ndp: libndp library context * * Call eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_call_eventfd_handler(struct ndp *ndp) { return ndp_sock_recv(ndp); } /** * ndp_callall_eventfd_handler: * @ndp: libndp library context * * Call all pending events on eventfd handler. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_callall_eventfd_handler(struct ndp *ndp) { fd_set rfds; int fdmax; struct timeval tv; int fd = ndp_get_eventfd(ndp); int ret; int err; memset(&tv, 0, sizeof(tv)); FD_ZERO(&rfds); FD_SET(fd, &rfds); fdmax = fd + 1; while (true) { ret = select(fdmax, &rfds, NULL, NULL, &tv); if (ret == -1) return -errno; if (!FD_ISSET(fd, &rfds)) return 0; err = ndp_call_eventfd_handler(ndp); if (err) return err; } } /** * SECTION: Exported context functions * @short_description: Core context functions exported to user */ /** * ndp_open: * @p_ndp: pointer where new libndp library context address will be stored * * Allocates and initializes library context, opens raw socket. * * Returns: zero on success or negative number in case of an error. **/ NDP_EXPORT int ndp_open(struct ndp **p_ndp) { struct ndp *ndp; const char *env; int err; ndp = myzalloc(sizeof(*ndp)); if (!ndp) return -ENOMEM; ndp->log_fn = log_stderr; ndp->log_priority = LOG_ERR; /* environment overwrites config */ env = getenv("NDP_LOG"); if (env != NULL) ndp_set_log_priority(ndp, log_priority(env)); dbg(ndp, "ndp context %p created.", ndp); dbg(ndp, "log_priority=%d", ndp->log_priority); list_init(&ndp->msgrcv_handler_list); err = ndp_sock_open(ndp); if (err) goto free_ndp; *p_ndp = ndp; return 0; free_ndp: free(ndp); return err; } /** * ndp_close: * @ndp: libndp library context * * Do library context cleanup. **/ NDP_EXPORT void ndp_close(struct ndp *ndp) { ndp_sock_close(ndp); free(ndp); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5017_0
crossvul-cpp_data_bad_2421_0
/* * Copyright (c) 2005-2007 William Pitcock, et al. * Rights to this code are as documented in doc/LICENSE. * * This file contains code for the CService FLAGS functions. * */ #include "atheme.h" #include "template.h" DECLARE_MODULE_V1 ( "chanserv/flags", false, _modinit, _moddeinit, PACKAGE_STRING, "Atheme Development Group <http://www.atheme.org>" ); static void cs_cmd_flags(sourceinfo_t *si, int parc, char *parv[]); command_t cs_flags = { "FLAGS", N_("Manipulates specific permissions on a channel."), AC_NONE, 3, cs_cmd_flags, { .path = "cservice/flags" } }; void _modinit(module_t *m) { service_named_bind_command("chanserv", &cs_flags); } void _moddeinit(module_unload_intent_t intent) { service_named_unbind_command("chanserv", &cs_flags); } typedef struct { const char *res; unsigned int level; } template_iter_t; static int global_template_search(const char *key, void *data, void *privdata) { template_iter_t *iter = privdata; default_template_t *def_t = data; if (def_t->flags == iter->level) iter->res = key; return 0; } static const char *get_template_name(mychan_t *mc, unsigned int level) { metadata_t *md; const char *p, *q, *r; char *s; char ss[40]; static char flagname[400]; template_iter_t iter; md = metadata_find(mc, "private:templates"); if (md != NULL) { p = md->value; while (p != NULL) { while (*p == ' ') p++; q = strchr(p, '='); if (q == NULL) break; r = strchr(q, ' '); if (r != NULL && r < q) break; mowgli_strlcpy(ss, q, sizeof ss); if (r != NULL && r - q < (int)(sizeof ss - 1)) { ss[r - q] = '\0'; } if (level == flags_to_bitmask(ss, 0)) { mowgli_strlcpy(flagname, p, sizeof flagname); s = strchr(flagname, '='); if (s != NULL) *s = '\0'; return flagname; } p = r; } } iter.res = NULL; iter.level = level; mowgli_patricia_foreach(global_template_dict, global_template_search, &iter); return iter.res; } static void do_list(sourceinfo_t *si, mychan_t *mc, unsigned int flags) { chanacs_t *ca; mowgli_node_t *n; bool operoverride = false; unsigned int i = 1; if (!(mc->flags & MC_PUBACL) && !chanacs_source_has_flag(mc, si, CA_ACLVIEW)) { if (has_priv(si, PRIV_CHAN_AUSPEX)) operoverride = true; else { command_fail(si, fault_noprivs, _("You are not authorized to perform this operation.")); return; } } command_success_nodata(si, _("Entry Nickname/Host Flags")); command_success_nodata(si, "----- ---------------------- -----"); MOWGLI_ITER_FOREACH(n, mc->chanacs.head) { const char *template, *mod_ago; struct tm tm; char mod_date[64]; ca = n->data; if (flags && !(ca->level & flags)) continue; template = get_template_name(mc, ca->level); mod_ago = ca->tmodified ? time_ago(ca->tmodified) : "?"; tm = *localtime(&ca->tmodified); strftime(mod_date, sizeof mod_date, TIME_FORMAT, &tm); if (template != NULL) command_success_nodata(si, _("%-5d %-22s %-20s (%s) (%s) [modified %s ago, on %s]"), i, ca->entity ? ca->entity->name : ca->host, bitmask_to_flags(ca->level), template, mc->name, mod_ago, mod_date); else command_success_nodata(si, _("%-5d %-22s %-20s (%s) [modified %s ago, on %s]"), i, ca->entity ? ca->entity->name : ca->host, bitmask_to_flags(ca->level), mc->name, mod_ago, mod_date); i++; } command_success_nodata(si, "----- ---------------------- -----"); command_success_nodata(si, _("End of \2%s\2 FLAGS listing."), mc->name); if (operoverride) logcommand(si, CMDLOG_ADMIN, "FLAGS: \2%s\2 (oper override)", mc->name); else logcommand(si, CMDLOG_GET, "FLAGS: \2%s\2", mc->name); } /* FLAGS <channel> [user] [flags] */ static void cs_cmd_flags(sourceinfo_t *si, int parc, char *parv[]) { chanacs_t *ca; mowgli_node_t *n; char *channel = parv[0]; char *target = sstrdup(parv[1]); char *flagstr = parv[2]; const char *str1; unsigned int addflags, removeflags, restrictflags; hook_channel_acl_req_t req; mychan_t *mc; if (parc < 1) { command_fail(si, fault_needmoreparams, STR_INSUFFICIENT_PARAMS, "FLAGS"); command_fail(si, fault_needmoreparams, _("Syntax: FLAGS <channel> [target] [flags]")); return; } mc = mychan_find(channel); if (!mc) { command_fail(si, fault_nosuch_target, _("Channel \2%s\2 is not registered."), channel); return; } if (metadata_find(mc, "private:close:closer") && (target || !has_priv(si, PRIV_CHAN_AUSPEX))) { command_fail(si, fault_noprivs, _("\2%s\2 is closed."), channel); return; } if (!target || (target && target[0] == '+' && flagstr == NULL)) { unsigned int flags = (target != NULL) ? flags_to_bitmask(target, 0) : 0; do_list(si, mc, flags); return; } /* * following conditions are for compatibility with Anope just to avoid a whole clusterfuck * of confused users caused by their 'innovation.' yeah, that's a word for it alright. * * anope 1.9's shiny new FLAGS command has: * * FLAGS #channel LIST * FLAGS #channel MODIFY user flagspec * FLAGS #channel CLEAR * * obviously they do not support the atheme syntax, because lets face it, they like to * 'innovate.' this is, of course, hilarious for obvious reasons. never mind that we * *invented* the FLAGS system for channel ACLs, so you would think they would find it * worthwhile to be compatible here. i guess that would have been too obvious or something * about their whole 'stealing our design' thing that they have been doing in 1.9 since the * beginning... or do i mean 'innovating?' * * anyway we rewrite the commands as appropriate in the two if blocks below so that they * are processed by the flags code as the user would intend. obviously, we're not really * capable of handling the anope flag model (which makes honestly zero sense to me, and is * extremely complex which kind of misses the entire point of the flags UI design...) so if * some user tries passing anope flags, it will probably be hilarious. the good news is * most of the anope flags tie up to atheme flags in some weird way anyway (probably because, * i don't know, they copied the entire design and then fucked it up? yeah. probably that.) * * --nenolod */ else if (!strcasecmp(target, "LIST") && myentity_find_ext(target) == NULL) { do_list(si, mc, 0); free(target); return; } else if (!strcasecmp(target, "CLEAR") && myentity_find_ext(target) == NULL) { free(target); if (!chanacs_source_has_flag(mc, si, CA_FOUNDER)) { command_fail(si, fault_noprivs, "You are not authorized to perform this operation."); return; } mowgli_node_t *tn; MOWGLI_ITER_FOREACH_SAFE(n, tn, mc->chanacs.head) { ca = n->data; if (ca->level & CA_FOUNDER) continue; object_unref(ca); } logcommand(si, CMDLOG_DO, "CLEAR:FLAGS: \2%s\2", mc->name); command_success_nodata(si, _("Cleared flags in \2%s\2."), mc->name); return; } else if (!strcasecmp(target, "MODIFY") && myentity_find_ext(target) == NULL) { free(target); if (parc < 3) { command_fail(si, fault_needmoreparams, STR_INSUFFICIENT_PARAMS, "FLAGS"); command_fail(si, fault_needmoreparams, _("Syntax: FLAGS <#channel> MODIFY [target] <flags>")); return; } flagstr = strchr(parv[2], ' '); if (flagstr) *flagstr++ = '\0'; target = strdup(parv[2]); } { myentity_t *mt; if (!si->smu) { command_fail(si, fault_noprivs, _("You are not logged in.")); return; } if (!flagstr) { if (!(mc->flags & MC_PUBACL) && !chanacs_source_has_flag(mc, si, CA_ACLVIEW)) { command_fail(si, fault_noprivs, _("You are not authorized to execute this command.")); return; } if (validhostmask(target)) ca = chanacs_find_host_literal(mc, target, 0); else { if (!(mt = myentity_find_ext(target))) { command_fail(si, fault_nosuch_target, _("\2%s\2 is not registered."), target); return; } free(target); target = sstrdup(mt->name); ca = chanacs_find_literal(mc, mt, 0); } if (ca != NULL) { str1 = bitmask_to_flags2(ca->level, 0); command_success_string(si, str1, _("Flags for \2%s\2 in \2%s\2 are \2%s\2."), target, channel, str1); } else command_success_string(si, "", _("No flags for \2%s\2 in \2%s\2."), target, channel); logcommand(si, CMDLOG_GET, "FLAGS: \2%s\2 on \2%s\2", mc->name, target); return; } /* founder may always set flags -- jilles */ restrictflags = chanacs_source_flags(mc, si); if (restrictflags & CA_FOUNDER) restrictflags = ca_all; else { if (!(restrictflags & CA_FLAGS)) { /* allow a user to remove their own access * even without +f */ if (restrictflags & CA_AKICK || si->smu == NULL || irccasecmp(target, entity(si->smu)->name) || strcmp(flagstr, "-*")) { command_fail(si, fault_noprivs, _("You are not authorized to execute this command.")); return; } } if (irccasecmp(target, entity(si->smu)->name)) restrictflags = allow_flags(mc, restrictflags); else restrictflags |= allow_flags(mc, restrictflags); } if (*flagstr == '+' || *flagstr == '-' || *flagstr == '=') { flags_make_bitmasks(flagstr, &addflags, &removeflags); if (addflags == 0 && removeflags == 0) { command_fail(si, fault_badparams, _("No valid flags given, use /%s%s HELP FLAGS for a list"), ircd->uses_rcommand ? "" : "msg ", chansvs.me->disp); return; } } else { addflags = get_template_flags(mc, flagstr); if (addflags == 0) { /* Hack -- jilles */ if (*target == '+' || *target == '-' || *target == '=') command_fail(si, fault_badparams, _("Usage: FLAGS %s [target] [flags]"), mc->name); else command_fail(si, fault_badparams, _("Invalid template name given, use /%s%s TEMPLATE %s for a list"), ircd->uses_rcommand ? "" : "msg ", chansvs.me->disp, mc->name); return; } removeflags = ca_all & ~addflags; } if (!validhostmask(target)) { if (!(mt = myentity_find_ext(target))) { command_fail(si, fault_nosuch_target, _("\2%s\2 is not registered."), target); return; } free(target); target = sstrdup(mt->name); ca = chanacs_open(mc, mt, NULL, true, entity(si->smu)); if (ca->level & CA_FOUNDER && removeflags & CA_FLAGS && !(removeflags & CA_FOUNDER)) { command_fail(si, fault_noprivs, _("You may not remove a founder's +f access.")); return; } if (ca->level & CA_FOUNDER && removeflags & CA_FOUNDER && mychan_num_founders(mc) == 1) { command_fail(si, fault_noprivs, _("You may not remove the last founder.")); return; } if (!(ca->level & CA_FOUNDER) && addflags & CA_FOUNDER) { if (mychan_num_founders(mc) >= chansvs.maxfounders) { command_fail(si, fault_noprivs, _("Only %d founders allowed per channel."), chansvs.maxfounders); chanacs_close(ca); return; } if (!myentity_can_register_channel(mt)) { command_fail(si, fault_toomany, _("\2%s\2 has too many channels registered."), mt->name); chanacs_close(ca); return; } if (!myentity_allow_foundership(mt)) { command_fail(si, fault_toomany, _("\2%s\2 cannot take foundership of a channel."), mt->name); chanacs_close(ca); return; } } if (addflags & CA_FOUNDER) addflags |= CA_FLAGS, removeflags &= ~CA_FLAGS; /* If NEVEROP is set, don't allow adding new entries * except sole +b. Adding flags if the current level * is +b counts as adding an entry. * -- jilles */ /* XXX: not all entities are users */ if (isuser(mt) && (MU_NEVEROP & user(mt)->flags && addflags != CA_AKICK && addflags != 0 && (ca->level == 0 || ca->level == CA_AKICK))) { command_fail(si, fault_noprivs, _("\2%s\2 does not wish to be added to channel access lists (NEVEROP set)."), mt->name); chanacs_close(ca); return; } if (ca->level == 0 && chanacs_is_table_full(ca)) { command_fail(si, fault_toomany, _("Channel %s access list is full."), mc->name); chanacs_close(ca); return; } req.ca = ca; req.oldlevel = ca->level; if (!chanacs_modify(ca, &addflags, &removeflags, restrictflags)) { command_fail(si, fault_noprivs, _("You are not allowed to set \2%s\2 on \2%s\2 in \2%s\2."), bitmask_to_flags2(addflags, removeflags), mt->name, mc->name); chanacs_close(ca); return; } req.newlevel = ca->level; hook_call_channel_acl_change(&req); chanacs_close(ca); } else { if (addflags & CA_FOUNDER) { command_fail(si, fault_badparams, _("You may not set founder status on a hostmask.")); return; } ca = chanacs_open(mc, NULL, target, true, entity(si->smu)); if (ca->level == 0 && chanacs_is_table_full(ca)) { command_fail(si, fault_toomany, _("Channel %s access list is full."), mc->name); chanacs_close(ca); return; } req.ca = ca; req.oldlevel = ca->level; if (!chanacs_modify(ca, &addflags, &removeflags, restrictflags)) { command_fail(si, fault_noprivs, _("You are not allowed to set \2%s\2 on \2%s\2 in \2%s\2."), bitmask_to_flags2(addflags, removeflags), target, mc->name); chanacs_close(ca); return; } req.newlevel = ca->level; hook_call_channel_acl_change(&req); chanacs_close(ca); } if ((addflags | removeflags) == 0) { command_fail(si, fault_nochange, _("Channel access to \2%s\2 for \2%s\2 unchanged."), channel, target); return; } flagstr = bitmask_to_flags2(addflags, removeflags); command_success_nodata(si, _("Flags \2%s\2 were set on \2%s\2 in \2%s\2."), flagstr, target, channel); logcommand(si, CMDLOG_SET, "FLAGS: \2%s\2 \2%s\2 \2%s\2", mc->name, target, flagstr); verbose(mc, "\2%s\2 set flags \2%s\2 on \2%s\2", get_source_name(si), flagstr, target); } free(target); } /* vim:cinoptions=>s,e0,n0,f0,{0,}0,^0,=s,ps,t0,c3,+s,(2s,us,)20,*30,gs,hs * vim:ts=8 * vim:sw=8 * vim:noexpandtab */
./CrossVul/dataset_final_sorted/CWE-284/c/bad_2421_0
crossvul-cpp_data_good_5019_0
/* * vMTRR implementation * * Copyright (C) 2006 Qumranet, Inc. * Copyright 2010 Red Hat, Inc. and/or its affiliates. * Copyright(C) 2015 Intel Corporation. * * Authors: * Yaniv Kamay <yaniv@qumranet.com> * Avi Kivity <avi@qumranet.com> * Marcelo Tosatti <mtosatti@redhat.com> * Paolo Bonzini <pbonzini@redhat.com> * Xiao Guangrong <guangrong.xiao@linux.intel.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. */ #include <linux/kvm_host.h> #include <asm/mtrr.h> #include "cpuid.h" #include "mmu.h" #define IA32_MTRR_DEF_TYPE_E (1ULL << 11) #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10) #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff) static bool msr_mtrr_valid(unsigned msr) { switch (msr) { case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1: case MSR_MTRRfix64K_00000: case MSR_MTRRfix16K_80000: case MSR_MTRRfix16K_A0000: case MSR_MTRRfix4K_C0000: case MSR_MTRRfix4K_C8000: case MSR_MTRRfix4K_D0000: case MSR_MTRRfix4K_D8000: case MSR_MTRRfix4K_E0000: case MSR_MTRRfix4K_E8000: case MSR_MTRRfix4K_F0000: case MSR_MTRRfix4K_F8000: case MSR_MTRRdefType: case MSR_IA32_CR_PAT: return true; } return false; } static bool valid_pat_type(unsigned t) { return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */ } static bool valid_mtrr_type(unsigned t) { return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */ } bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int i; u64 mask; if (!msr_mtrr_valid(msr)) return false; if (msr == MSR_IA32_CR_PAT) { for (i = 0; i < 8; i++) if (!valid_pat_type((data >> (i * 8)) & 0xff)) return false; return true; } else if (msr == MSR_MTRRdefType) { if (data & ~0xcff) return false; return valid_mtrr_type(data & 0xff); } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) { for (i = 0; i < 8 ; i++) if (!valid_mtrr_type((data >> (i * 8)) & 0xff)) return false; return true; } /* variable MTRRs */ WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR)); mask = (~0ULL) << cpuid_maxphyaddr(vcpu); if ((msr & 1) == 0) { /* MTRR base */ if (!valid_mtrr_type(data & 0xff)) return false; mask |= 0xf00; } else /* MTRR mask */ mask |= 0x7ff; if (data & mask) { kvm_inject_gp(vcpu, 0); return false; } return true; } EXPORT_SYMBOL_GPL(kvm_mtrr_valid); static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E); } static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state) { return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE); } static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) { return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; } static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu) { /* * Intel SDM 11.11.2.2: all MTRRs are disabled when * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC * memory type is applied to all of physical memory. * * However, virtual machines can be run with CPUID such that * there are no MTRRs. In that case, the firmware will never * enable MTRRs and it is obviously undesirable to run the * guest entirely with UC memory and we use WB. */ if (guest_cpuid_has_mtrr(vcpu)) return MTRR_TYPE_UNCACHABLE; else return MTRR_TYPE_WRBACK; } /* * Three terms are used in the following code: * - segment, it indicates the address segments covered by fixed MTRRs. * - unit, it corresponds to the MSR entry in the segment. * - range, a range is covered in one memory cache type. */ struct fixed_mtrr_segment { u64 start; u64 end; int range_shift; /* the start position in kvm_mtrr.fixed_ranges[]. */ int range_start; }; static struct fixed_mtrr_segment fixed_seg_table[] = { /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */ { .start = 0x0, .end = 0x80000, .range_shift = 16, /* 64K */ .range_start = 0, }, /* * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units, * 16K fixed mtrr. */ { .start = 0x80000, .end = 0xc0000, .range_shift = 14, /* 16K */ .range_start = 8, }, /* * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units, * 4K fixed mtrr. */ { .start = 0xc0000, .end = 0x100000, .range_shift = 12, /* 12K */ .range_start = 24, } }; /* * The size of unit is covered in one MSR, one MSR entry contains * 8 ranges so that unit size is always 8 * 2^range_shift. */ static u64 fixed_mtrr_seg_unit_size(int seg) { return 8 << fixed_seg_table[seg].range_shift; } static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit) { switch (msr) { case MSR_MTRRfix64K_00000: *seg = 0; *unit = 0; break; case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000: *seg = 1; *unit = msr - MSR_MTRRfix16K_80000; break; case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000: *seg = 2; *unit = msr - MSR_MTRRfix4K_C0000; break; default: return false; } return true; } static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; u64 unit_size = fixed_mtrr_seg_unit_size(seg); *start = mtrr_seg->start + unit * unit_size; *end = *start + unit_size; WARN_ON(*end > mtrr_seg->end); } static int fixed_mtrr_seg_unit_range_index(int seg, int unit) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg) > mtrr_seg->end); /* each unit has 8 ranges. */ return mtrr_seg->range_start + 8 * unit; } static int fixed_mtrr_seg_end_range_index(int seg) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; int n; n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift; return mtrr_seg->range_start + n - 1; } static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end) { int seg, unit; if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) return false; fixed_mtrr_seg_unit_range(seg, unit, start, end); return true; } static int fixed_msr_to_range_index(u32 msr) { int seg, unit; if (!fixed_msr_to_seg_unit(msr, &seg, &unit)) return -1; return fixed_mtrr_seg_unit_range_index(seg, unit); } static int fixed_mtrr_addr_to_seg(u64 addr) { struct fixed_mtrr_segment *mtrr_seg; int seg, seg_num = ARRAY_SIZE(fixed_seg_table); for (seg = 0; seg < seg_num; seg++) { mtrr_seg = &fixed_seg_table[seg]; if (mtrr_seg->start <= addr && addr < mtrr_seg->end) return seg; } return -1; } static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg) { struct fixed_mtrr_segment *mtrr_seg; int index; mtrr_seg = &fixed_seg_table[seg]; index = mtrr_seg->range_start; index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift; return index; } static u64 fixed_mtrr_range_end_addr(int seg, int index) { struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg]; int pos = index - mtrr_seg->range_start; return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift); } static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end) { u64 mask; *start = range->base & PAGE_MASK; mask = range->mask & PAGE_MASK; /* This cannot overflow because writing to the reserved bits of * variable MTRRs causes a #GP. */ *end = (*start | ~mask) + 1; } static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; gfn_t start, end; int index; if (msr == MSR_IA32_CR_PAT || !tdp_enabled || !kvm_arch_has_noncoherent_dma(vcpu->kvm)) return; if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType) return; /* fixed MTRRs. */ if (fixed_msr_to_range(msr, &start, &end)) { if (!fixed_mtrr_is_enabled(mtrr_state)) return; } else if (msr == MSR_MTRRdefType) { start = 0x0; end = ~0ULL; } else { /* variable range MTRRs. */ index = (msr - 0x200) / 2; var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end); } kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end)); } static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range) { return (range->mask & (1 << 11)) != 0; } static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct kvm_mtrr_range *tmp, *cur; int index, is_mtrr_mask; index = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * index; cur = &mtrr_state->var_ranges[index]; /* remove the entry if it's in the list. */ if (var_mtrr_range_is_valid(cur)) list_del(&mtrr_state->var_ranges[index].node); /* Extend the mask with all 1 bits to the left, since those * bits must implicitly be 0. The bits are then cleared * when reading them. */ if (!is_mtrr_mask) cur->base = data; else cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu)); /* add it to the list if it's enabled. */ if (var_mtrr_range_is_valid(cur)) { list_for_each_entry(tmp, &mtrr_state->head, node) if (cur->base >= tmp->base) break; list_add_tail(&cur->node, &tmp->node); } } int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data) { int index; if (!kvm_mtrr_valid(vcpu, msr, data)) return 1; index = fixed_msr_to_range_index(msr); if (index >= 0) *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data; else if (msr == MSR_MTRRdefType) vcpu->arch.mtrr_state.deftype = data; else if (msr == MSR_IA32_CR_PAT) vcpu->arch.pat = data; else set_var_mtrr_msr(vcpu, msr, data); update_mtrr(vcpu, msr); return 0; } int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) { int index; /* MSR_MTRRcap is a readonly MSR. */ if (msr == MSR_MTRRcap) { /* * SMRR = 0 * WC = 1 * FIX = 1 * VCNT = KVM_NR_VAR_MTRR */ *pdata = 0x500 | KVM_NR_VAR_MTRR; return 0; } if (!msr_mtrr_valid(msr)) return 1; index = fixed_msr_to_range_index(msr); if (index >= 0) *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index]; else if (msr == MSR_MTRRdefType) *pdata = vcpu->arch.mtrr_state.deftype; else if (msr == MSR_IA32_CR_PAT) *pdata = vcpu->arch.pat; else { /* Variable MTRRs */ int is_mtrr_mask; index = (msr - 0x200) / 2; is_mtrr_mask = msr - 0x200 - 2 * index; if (!is_mtrr_mask) *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; else *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1; } return 0; } void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu) { INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head); } struct mtrr_iter { /* input fields. */ struct kvm_mtrr *mtrr_state; u64 start; u64 end; /* output fields. */ int mem_type; /* mtrr is completely disabled? */ bool mtrr_disabled; /* [start, end) is not fully covered in MTRRs? */ bool partial_map; /* private fields. */ union { /* used for fixed MTRRs. */ struct { int index; int seg; }; /* used for var MTRRs. */ struct { struct kvm_mtrr_range *range; /* max address has been covered in var MTRRs. */ u64 start_max; }; }; bool fixed; }; static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter) { int seg, index; if (!fixed_mtrr_is_enabled(iter->mtrr_state)) return false; seg = fixed_mtrr_addr_to_seg(iter->start); if (seg < 0) return false; iter->fixed = true; index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg); iter->index = index; iter->seg = seg; return true; } static bool match_var_range(struct mtrr_iter *iter, struct kvm_mtrr_range *range) { u64 start, end; var_mtrr_range(range, &start, &end); if (!(start >= iter->end || end <= iter->start)) { iter->range = range; /* * the function is called when we do kvm_mtrr.head walking. * Range has the minimum base address which interleaves * [looker->start_max, looker->end). */ iter->partial_map |= iter->start_max < start; /* update the max address has been covered. */ iter->start_max = max(iter->start_max, end); return true; } return false; } static void __mtrr_lookup_var_next(struct mtrr_iter *iter) { struct kvm_mtrr *mtrr_state = iter->mtrr_state; list_for_each_entry_continue(iter->range, &mtrr_state->head, node) if (match_var_range(iter, iter->range)) return; iter->range = NULL; iter->partial_map |= iter->start_max < iter->end; } static void mtrr_lookup_var_start(struct mtrr_iter *iter) { struct kvm_mtrr *mtrr_state = iter->mtrr_state; iter->fixed = false; iter->start_max = iter->start; iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node); __mtrr_lookup_var_next(iter); } static void mtrr_lookup_fixed_next(struct mtrr_iter *iter) { /* terminate the lookup. */ if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) { iter->fixed = false; iter->range = NULL; return; } iter->index++; /* have looked up for all fixed MTRRs. */ if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges)) return mtrr_lookup_var_start(iter); /* switch to next segment. */ if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg)) iter->seg++; } static void mtrr_lookup_var_next(struct mtrr_iter *iter) { __mtrr_lookup_var_next(iter); } static void mtrr_lookup_start(struct mtrr_iter *iter) { if (!mtrr_is_enabled(iter->mtrr_state)) { iter->mtrr_disabled = true; return; } if (!mtrr_lookup_fixed_start(iter)) mtrr_lookup_var_start(iter); } static void mtrr_lookup_init(struct mtrr_iter *iter, struct kvm_mtrr *mtrr_state, u64 start, u64 end) { iter->mtrr_state = mtrr_state; iter->start = start; iter->end = end; iter->mtrr_disabled = false; iter->partial_map = false; iter->fixed = false; iter->range = NULL; mtrr_lookup_start(iter); } static bool mtrr_lookup_okay(struct mtrr_iter *iter) { if (iter->fixed) { iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index]; return true; } if (iter->range) { iter->mem_type = iter->range->base & 0xff; return true; } return false; } static void mtrr_lookup_next(struct mtrr_iter *iter) { if (iter->fixed) mtrr_lookup_fixed_next(iter); else mtrr_lookup_var_next(iter); } #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \ for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \ mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_)) u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct mtrr_iter iter; u64 start, end; int type = -1; const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK) | (1 << MTRR_TYPE_WRTHROUGH); start = gfn_to_gpa(gfn); end = start + PAGE_SIZE; mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { int curr_type = iter.mem_type; /* * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR * Precedences. */ if (type == -1) { type = curr_type; continue; } /* * If two or more variable memory ranges match and the * memory types are identical, then that memory type is * used. */ if (type == curr_type) continue; /* * If two or more variable memory ranges match and one of * the memory types is UC, the UC memory type used. */ if (curr_type == MTRR_TYPE_UNCACHABLE) return MTRR_TYPE_UNCACHABLE; /* * If two or more variable memory ranges match and the * memory types are WT and WB, the WT memory type is used. */ if (((1 << type) & wt_wb_mask) && ((1 << curr_type) & wt_wb_mask)) { type = MTRR_TYPE_WRTHROUGH; continue; } /* * For overlaps not defined by the above rules, processor * behavior is undefined. */ /* We use WB for this undefined behavior. :( */ return MTRR_TYPE_WRBACK; } if (iter.mtrr_disabled) return mtrr_disabled_type(vcpu); /* not contained in any MTRRs. */ if (type == -1) return mtrr_default_type(mtrr_state); /* * We just check one page, partially covered by MTRRs is * impossible. */ WARN_ON(iter.partial_map); return type; } EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int page_num) { struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state; struct mtrr_iter iter; u64 start, end; int type = -1; start = gfn_to_gpa(gfn); end = gfn_to_gpa(gfn + page_num); mtrr_for_each_mem_type(&iter, mtrr_state, start, end) { if (type == -1) { type = iter.mem_type; continue; } if (type != iter.mem_type) return false; } if (iter.mtrr_disabled) return true; if (!iter.partial_map) return true; if (type == -1) return true; return type == mtrr_default_type(mtrr_state); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5019_0
crossvul-cpp_data_good_1571_7
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /* ** mysqlimport.c - Imports all given files ** into a table(s). */ #define IMPORT_VERSION "3.7" #include "client_priv.h" #include "my_default.h" #include "mysql_version.h" #ifdef HAVE_LIBPTHREAD #include <my_pthread.h> #endif #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ /* Global Thread counter */ uint counter; #ifdef HAVE_LIBPTHREAD pthread_mutex_t counter_mutex; pthread_cond_t count_threshhold; #endif static void db_error_with_table(MYSQL *mysql, char *table); static void db_error(MYSQL *mysql); static char *field_escape(char *to,const char *from,uint length); static char *add_load_option(char *ptr,const char *object, const char *statement); static my_bool verbose=0,lock_tables=0,ignore_errors=0,opt_delete=0, replace=0,silent=0,ignore=0,opt_compress=0, opt_low_priority= 0, tty_password= 0; static my_bool debug_info_flag= 0, debug_check_flag= 0; static uint opt_use_threads=0, opt_local_file=0, my_end_arg= 0; static char *opt_password=0, *current_user=0, *current_host=0, *current_db=0, *fields_terminated=0, *lines_terminated=0, *enclosed=0, *opt_enclosed=0, *escaped=0, *opt_columns=0, *default_charset= (char*) MYSQL_AUTODETECT_CHARSET_NAME; static uint opt_mysql_port= 0, opt_protocol= 0; static char *opt_bind_addr = NULL; static char * opt_mysql_unix_port=0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static longlong opt_ignore_lines= -1; #include <sslopt-vars.h> #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) static char *shared_memory_base_name=0; #endif static struct my_option my_long_options[] = { {"bind-address", 0, "IP address to bind to.", (uchar**) &opt_bind_addr, (uchar**) &opt_bind_addr, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", &charsets_dir, &charsets_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", &default_charset, &default_charset, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"columns", 'c', "Use only these columns to import the data to. Give the column names in a comma separated list. This is same as giving columns to LOAD DATA INFILE.", &opt_columns, &opt_columns, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compress", 'C', "Use compression in server/client protocol.", &opt_compress, &opt_compress, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug",'#', "Output debug log. Often this is 'd:t:o,filename'.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", OPT_DEBUG_INFO, "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"delete", 'd', "First delete all rows from table.", &opt_delete, &opt_delete, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"fields-terminated-by", OPT_FTB, "Fields in the input file are terminated by the given string.", &fields_terminated, &fields_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-enclosed-by", OPT_ENC, "Fields in the import file are enclosed by the given character.", &enclosed, &enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-optionally-enclosed-by", OPT_O_ENC, "Fields in the input file are optionally enclosed by the given character.", &opt_enclosed, &opt_enclosed, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"fields-escaped-by", OPT_ESC, "Fields in the input file are escaped by the given character.", &escaped, &escaped, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Continue even if we get an SQL error.", &ignore_errors, &ignore_errors, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"help", '?', "Displays this help and exits.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", &current_host, &current_host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ignore", 'i', "If duplicate unique key was found, keep old row.", &ignore, &ignore, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"ignore-lines", OPT_IGN_LINES, "Ignore first n lines of data infile.", &opt_ignore_lines, &opt_ignore_lines, 0, GET_LL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"lines-terminated-by", OPT_LTB, "Lines in the input file are terminated by the given string.", &lines_terminated, &lines_terminated, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"local", 'L', "Read all files through the client.", &opt_local_file, &opt_local_file, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"lock-tables", 'l', "Lock all tables for write (this disables threads).", &lock_tables, &lock_tables, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"low-priority", OPT_LOW_PRIORITY, "Use LOW_PRIORITY when updating the table.", &opt_low_priority, &opt_low_priority, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given it's asked from the tty.", 0, 0, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", &opt_mysql_port, &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"replace", 'r', "If duplicate unique key was found, replace old row.", &replace, &replace, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", &shared_memory_base_name, &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"silent", 's', "Be more silent.", &silent, &silent, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", &opt_mysql_unix_port, &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"use-threads", OPT_USE_THREADS, "Load files in parallel. The argument is the number " "of threads to use for loading data.", &opt_use_threads, &opt_use_threads, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &current_user, &current_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Print info about the various stages.", &verbose, &verbose, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static const char *load_default_groups[]= { "mysqlimport","client",0 }; static void print_version(void) { printf("%s Ver %s Distrib %s, for %s (%s)\n" ,my_progname, IMPORT_VERSION, MYSQL_SERVER_VERSION,SYSTEM_TYPE,MACHINE_TYPE); } static void usage(void) { print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); printf("\ Loads tables from text files in various formats. The base name of the\n\ text file must be the name of the table that should be used.\n\ If one uses sockets to connect to the MySQL server, the server will open and\n\ read the text file directly. In other cases the client will open the text\n\ file. The SQL command 'LOAD DATA INFILE' is used to import the rows.\n"); printf("\nUsage: %s [OPTIONS] database textfile...",my_progname); print_defaults("my",load_default_groups); my_print_help(my_long_options); my_print_variables(my_long_options); } static my_bool get_one_option(int optid, const struct my_option *opt __attribute__((unused)), char *argument) { switch(optid) { case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ if (argument) { char *start=argument; my_free(opt_password); opt_password=my_strdup(PSI_NOT_INSTRUMENTED, argument,MYF(MY_FAE)); while (*argument) *argument++= 'x'; /* Destroy argument */ if (*start) start[1]=0; /* Cut length of argument */ tty_password= 0; } else tty_password= 1; break; #ifdef _WIN32 case 'W': opt_protocol = MYSQL_PROTOCOL_PIPE; opt_local_file=1; break; #endif case OPT_MYSQL_PROTOCOL: opt_protocol= find_type_or_exit(argument, &sql_protocol_typelib, opt->name); break; case '#': DBUG_PUSH(argument ? argument : "d:t:o"); debug_check_flag= 1; break; #include <sslopt-case.h> case 'V': print_version(); exit(0); case 'I': case '?': usage(); exit(0); } return 0; } static int get_options(int *argc, char ***argv) { int ho_error; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) exit(ho_error); if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (enclosed && opt_enclosed) { fprintf(stderr, "You can't use ..enclosed.. and ..optionally-enclosed.. at the same time.\n"); return(1); } if (replace && ignore) { fprintf(stderr, "You can't use --ignore (-i) and --replace (-r) at the same time.\n"); return(1); } if (*argc < 2) { usage(); return 1; } current_db= *((*argv)++); (*argc)--; if (tty_password) opt_password=get_tty_password(NullS); return(0); } static int write_to_table(char *filename, MYSQL *mysql) { char tablename[FN_REFLEN], hard_path[FN_REFLEN], escaped_name[FN_REFLEN * 2 + 1], sql_statement[FN_REFLEN*16+256], *end, *pos; DBUG_ENTER("write_to_table"); DBUG_PRINT("enter",("filename: %s",filename)); fn_format(tablename, filename, "", "", 1 | 2); /* removes path & ext. */ if (!opt_local_file) my_stpcpy(hard_path,filename); else my_load_path(hard_path, filename, NULL); /* filename includes the path */ if (opt_delete) { if (verbose) fprintf(stdout, "Deleting the old data from table %s\n", tablename); snprintf(sql_statement, FN_REFLEN*16+256, "DELETE FROM %s", tablename); if (mysql_query(mysql, sql_statement)) { db_error_with_table(mysql, tablename); DBUG_RETURN(1); } } to_unix_path(hard_path); if (verbose) { if (opt_local_file) fprintf(stdout, "Loading data from LOCAL file: %s into %s\n", hard_path, tablename); else fprintf(stdout, "Loading data from SERVER file: %s into %s\n", hard_path, tablename); } mysql_real_escape_string(mysql, escaped_name, hard_path, (unsigned long) strlen(hard_path)); sprintf(sql_statement, "LOAD DATA %s %s INFILE '%s'", opt_low_priority ? "LOW_PRIORITY" : "", opt_local_file ? "LOCAL" : "", escaped_name); end= strend(sql_statement); if (replace) end= my_stpcpy(end, " REPLACE"); if (ignore) end= my_stpcpy(end, " IGNORE"); end= my_stpcpy(end, " INTO TABLE `"); /* Turn any ` into `` in table name. */ for (pos= tablename; *pos; pos++) { if (*pos == '`') *end++= '`'; *end++= *pos; } end= my_stpcpy(end, "`"); if (fields_terminated || enclosed || opt_enclosed || escaped) end= my_stpcpy(end, " FIELDS"); end= add_load_option(end, fields_terminated, " TERMINATED BY"); end= add_load_option(end, enclosed, " ENCLOSED BY"); end= add_load_option(end, opt_enclosed, " OPTIONALLY ENCLOSED BY"); end= add_load_option(end, escaped, " ESCAPED BY"); end= add_load_option(end, lines_terminated, " LINES TERMINATED BY"); if (opt_ignore_lines >= 0) end= my_stpcpy(longlong10_to_str(opt_ignore_lines, my_stpcpy(end, " IGNORE "),10), " LINES"); if (opt_columns) end= my_stpcpy(my_stpcpy(my_stpcpy(end, " ("), opt_columns), ")"); *end= '\0'; if (mysql_query(mysql, sql_statement)) { db_error_with_table(mysql, tablename); DBUG_RETURN(1); } if (!silent) { if (mysql_info(mysql)) /* If NULL-pointer, print nothing */ { fprintf(stdout, "%s.%s: %s\n", current_db, tablename, mysql_info(mysql)); } } DBUG_RETURN(0); } static void lock_table(MYSQL *mysql, int tablecount, char **raw_tablename) { DYNAMIC_STRING query; int i; char tablename[FN_REFLEN]; if (verbose) fprintf(stdout, "Locking tables for write\n"); init_dynamic_string(&query, "LOCK TABLES ", 256, 1024); for (i=0 ; i < tablecount ; i++) { fn_format(tablename, raw_tablename[i], "", "", 1 | 2); dynstr_append(&query, tablename); dynstr_append(&query, " WRITE,"); } if (mysql_real_query(mysql, query.str, query.length-1)) db_error(mysql); /* We shall countinue here, if --force was given */ } static MYSQL *db_connect(char *host, char *database, char *user, char *passwd) { MYSQL *mysql; if (verbose) fprintf(stdout, "Connecting to %s\n", host ? host : "localhost"); if (!(mysql= mysql_init(NULL))) return 0; if (opt_compress) mysql_options(mysql,MYSQL_OPT_COMPRESS,NullS); if (opt_local_file) mysql_options(mysql,MYSQL_OPT_LOCAL_INFILE, (char*) &opt_local_file); SSL_SET_OPTIONS(mysql); if (opt_protocol) mysql_options(mysql,MYSQL_OPT_PROTOCOL,(char*)&opt_protocol); if (opt_bind_addr) mysql_options(mysql,MYSQL_OPT_BIND,opt_bind_addr); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) if (shared_memory_base_name) mysql_options(mysql,MYSQL_SHARED_MEMORY_BASE_NAME,shared_memory_base_name); #endif if (opt_plugin_dir && *opt_plugin_dir) mysql_options(mysql, MYSQL_PLUGIN_DIR, opt_plugin_dir); if (opt_default_auth && *opt_default_auth) mysql_options(mysql, MYSQL_DEFAULT_AUTH, opt_default_auth); mysql_options(mysql, MYSQL_SET_CHARSET_NAME, default_charset); mysql_options(mysql, MYSQL_OPT_CONNECT_ATTR_RESET, 0); mysql_options4(mysql, MYSQL_OPT_CONNECT_ATTR_ADD, "program_name", "mysqlimport"); if (!(mysql_real_connect(mysql,host,user,passwd, database,opt_mysql_port,opt_mysql_unix_port, 0))) { ignore_errors=0; /* NO RETURN FROM db_error */ db_error(mysql); } mysql->reconnect= 0; if (verbose) fprintf(stdout, "Selecting database %s\n", database); if (mysql_select_db(mysql, database)) { ignore_errors=0; db_error(mysql); } return mysql; } static void db_disconnect(char *host, MYSQL *mysql) { if (verbose) fprintf(stdout, "Disconnecting from %s\n", host ? host : "localhost"); mysql_close(mysql); } static void safe_exit(int error, MYSQL *mysql) { if (ignore_errors) return; if (mysql) mysql_close(mysql); exit(error); } static void db_error_with_table(MYSQL *mysql, char *table) { my_printf_error(0,"Error: %d, %s, when using table: %s", MYF(0), mysql_errno(mysql), mysql_error(mysql), table); safe_exit(1, mysql); } static void db_error(MYSQL *mysql) { my_printf_error(0,"Error: %d %s", MYF(0), mysql_errno(mysql), mysql_error(mysql)); safe_exit(1, mysql); } static char *add_load_option(char *ptr, const char *object, const char *statement) { if (object) { /* Don't escape hex constants */ if (object[0] == '0' && (object[1] == 'x' || object[1] == 'X')) ptr= strxmov(ptr," ",statement," ",object,NullS); else { /* char constant; escape */ ptr= strxmov(ptr," ",statement," '",NullS); ptr= field_escape(ptr,object,(uint) strlen(object)); *ptr++= '\''; } } return ptr; } /* ** Allow the user to specify field terminator strings like: ** "'", "\", "\\" (escaped backslash), "\t" (tab), "\n" (newline) ** This is done by doubleing ' and add a end -\ if needed to avoid ** syntax errors from the SQL parser. */ static char *field_escape(char *to,const char *from,uint length) { const char *end; uint end_backslashes=0; for (end= from+length; from != end; from++) { *to++= *from; if (*from == '\\') end_backslashes^=1; /* find odd number of backslashes */ else { if (*from == '\'' && !end_backslashes) *to++= *from; /* We want a dublicate of "'" for MySQL */ end_backslashes=0; } } /* Add missing backslashes if user has specified odd number of backs.*/ if (end_backslashes) *to++= '\\'; return to; } int exitcode= 0; #ifdef HAVE_LIBPTHREAD pthread_handler_t worker_thread(void *arg) { int error; char *raw_table_name= (char *)arg; MYSQL *mysql= 0; if (mysql_thread_init()) goto error; if (!(mysql= db_connect(current_host,current_db,current_user,opt_password))) { goto error; } if (mysql_query(mysql, "/*!40101 set @@character_set_database=binary */;")) { db_error(mysql); /* We shall countinue here, if --force was given */ goto error; } /* We are not currently catching the error here. */ if((error= write_to_table(raw_table_name, mysql))) if (exitcode == 0) exitcode= error; error: if (mysql) db_disconnect(current_host, mysql); pthread_mutex_lock(&counter_mutex); counter--; pthread_cond_signal(&count_threshhold); pthread_mutex_unlock(&counter_mutex); mysql_thread_end(); return 0; } #endif int main(int argc, char **argv) { int error=0; char **argv_to_free; MY_INIT(argv[0]); my_getopt_use_args_separator= TRUE; if (load_defaults("my",load_default_groups,&argc,&argv)) return 1; my_getopt_use_args_separator= FALSE; /* argv is changed in the program */ argv_to_free= argv; if (get_options(&argc, &argv)) { free_defaults(argv_to_free); return(1); } #ifdef HAVE_LIBPTHREAD if (opt_use_threads && !lock_tables) { pthread_t mainthread; /* Thread descriptor */ pthread_attr_t attr; /* Thread attributes */ pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_mutex_init(&counter_mutex, NULL); pthread_cond_init(&count_threshhold, NULL); for (counter= 0; *argv != NULL; argv++) /* Loop through tables */ { pthread_mutex_lock(&counter_mutex); while (counter == opt_use_threads) { struct timespec abstime; set_timespec(abstime, 3); pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } /* Before exiting the lock we set ourselves up for the next thread */ counter++; pthread_mutex_unlock(&counter_mutex); /* now create the thread */ if (pthread_create(&mainthread, &attr, worker_thread, (void *)*argv) != 0) { pthread_mutex_lock(&counter_mutex); counter--; pthread_mutex_unlock(&counter_mutex); fprintf(stderr,"%s: Could not create thread\n", my_progname); } } /* We loop until we know that all children have cleaned up. */ pthread_mutex_lock(&counter_mutex); while (counter) { struct timespec abstime; set_timespec(abstime, 3); pthread_cond_timedwait(&count_threshhold, &counter_mutex, &abstime); } pthread_mutex_unlock(&counter_mutex); pthread_mutex_destroy(&counter_mutex); pthread_cond_destroy(&count_threshhold); pthread_attr_destroy(&attr); } else #endif { MYSQL *mysql= 0; if (!(mysql= db_connect(current_host,current_db,current_user,opt_password))) { free_defaults(argv_to_free); return(1); /* purecov: deadcode */ } if (mysql_query(mysql, "/*!40101 set @@character_set_database=binary */;")) { db_error(mysql); /* We shall countinue here, if --force was given */ return(1); } if (lock_tables) lock_table(mysql, argc, argv); for (; *argv != NULL; argv++) if ((error= write_to_table(*argv, mysql))) if (exitcode == 0) exitcode= error; db_disconnect(current_host, mysql); } my_free(opt_password); #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) my_free(shared_memory_base_name); #endif free_defaults(argv_to_free); my_end(my_end_arg); return(exitcode); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_1571_7
crossvul-cpp_data_bad_880_2
/* * Copyright (C) 2014-2019 Firejail Authors * * This file is part of firejail project * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "firejail.h" #include <sys/mount.h> #include <sys/stat.h> #include <sys/types.h> #include <dirent.h> static int tmpfs_mounted = 0; // build /run/firejail directory void preproc_build_firejail_dir(void) { struct stat s; // CentOS 6 doesn't have /run directory if (stat(RUN_FIREJAIL_BASEDIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_BASEDIR, 0755); } if (stat(RUN_FIREJAIL_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_DIR, 0755); } if (stat(RUN_FIREJAIL_NETWORK_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_NETWORK_DIR, 0755); } if (stat(RUN_FIREJAIL_BANDWIDTH_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_BANDWIDTH_DIR, 0755); } if (stat(RUN_FIREJAIL_NAME_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_NAME_DIR, 0755); } if (stat(RUN_FIREJAIL_PROFILE_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_PROFILE_DIR, 0755); } if (stat(RUN_FIREJAIL_X11_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_X11_DIR, 0755); } if (stat(RUN_FIREJAIL_APPIMAGE_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_APPIMAGE_DIR, 0755); } if (stat(RUN_FIREJAIL_LIB_DIR, &s)) { create_empty_dir_as_root(RUN_FIREJAIL_LIB_DIR, 0755); } if (stat(RUN_MNT_DIR, &s)) { create_empty_dir_as_root(RUN_MNT_DIR, 0755); } create_empty_file_as_root(RUN_RO_FILE, S_IRUSR); create_empty_dir_as_root(RUN_RO_DIR, S_IRUSR); } // build /run/firejail/mnt directory void preproc_mount_mnt_dir(void) { // mount tmpfs on top of /run/firejail/mnt if (!tmpfs_mounted) { if (arg_debug) printf("Mounting tmpfs on %s directory\n", RUN_MNT_DIR); if (mount("tmpfs", RUN_MNT_DIR, "tmpfs", MS_NOSUID | MS_STRICTATIME, "mode=755,gid=0") < 0) errExit("mounting /run/firejail/mnt"); tmpfs_mounted = 1; fs_logger2("tmpfs", RUN_MNT_DIR); #ifdef HAVE_SECCOMP if (arg_seccomp_block_secondary) copy_file(PATH_SECCOMP_BLOCK_SECONDARY, RUN_SECCOMP_BLOCK_SECONDARY, getuid(), getgid(), 0644); // root needed else { //copy default seccomp files copy_file(PATH_SECCOMP_32, RUN_SECCOMP_32, getuid(), getgid(), 0644); // root needed } if (arg_allow_debuggers) copy_file(PATH_SECCOMP_DEFAULT_DEBUG, RUN_SECCOMP_CFG, getuid(), getgid(), 0644); // root needed else copy_file(PATH_SECCOMP_DEFAULT, RUN_SECCOMP_CFG, getuid(), getgid(), 0644); // root needed if (arg_memory_deny_write_execute) copy_file(PATH_SECCOMP_MDWX, RUN_SECCOMP_MDWX, getuid(), getgid(), 0644); // root needed // as root, create empty RUN_SECCOMP_PROTOCOL and RUN_SECCOMP_POSTEXEC files create_empty_file_as_root(RUN_SECCOMP_PROTOCOL, 0644); if (set_perms(RUN_SECCOMP_PROTOCOL, getuid(), getgid(), 0644)) errExit("set_perms"); create_empty_file_as_root(RUN_SECCOMP_POSTEXEC, 0644); if (set_perms(RUN_SECCOMP_POSTEXEC, getuid(), getgid(), 0644)) errExit("set_perms"); #endif } } static void clean_dir(const char *name, int *pidarr, int start_pid, int max_pids) { DIR *dir; if (!(dir = opendir(name))) { fwarning("cannot clean %s directory\n", name); return; // we live to fight another day! } // clean leftover files struct dirent *entry; char *end; while ((entry = readdir(dir)) != NULL) { pid_t pid = strtol(entry->d_name, &end, 10); pid %= max_pids; if (end == entry->d_name || *end) continue; if (pid < start_pid) continue; if (pidarr[pid] == 0) delete_run_files(pid); } closedir(dir); } // clean run directory void preproc_clean_run(void) { int max_pids=32769; int start_pid = 100; // extract real max_pids FILE *fp = fopen("/proc/sys/kernel/pid_max", "r"); if (fp) { int val; if (fscanf(fp, "%d", &val) == 1) { if (val > 4194304) // this is the max value supported on 64 bit Linux kernels val = 4194304; if (val >= max_pids) max_pids = val + 1; } fclose(fp); } int *pidarr = malloc(max_pids * sizeof(int)); if (!pidarr) errExit("malloc"); memset(pidarr, 0, max_pids * sizeof(int)); // open /proc directory DIR *dir; if (!(dir = opendir("/proc"))) { // sleep 2 seconds and try again sleep(2); if (!(dir = opendir("/proc"))) { fprintf(stderr, "Error: cannot open /proc directory\n"); exit(1); } } // read /proc and populate pidarr with all active processes struct dirent *entry; char *end; while ((entry = readdir(dir)) != NULL) { pid_t pid = strtol(entry->d_name, &end, 10); pid %= max_pids; if (end == entry->d_name || *end) continue; if (pid < start_pid) continue; pidarr[pid] = 1; } closedir(dir); // clean profile and name directories clean_dir(RUN_FIREJAIL_PROFILE_DIR, pidarr, start_pid, max_pids); clean_dir(RUN_FIREJAIL_NAME_DIR, pidarr, start_pid, max_pids); free(pidarr); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_880_2
crossvul-cpp_data_bad_5204_2
/************************************************************************** * * Copyright (c) 2000-2003 Intel Corporation * All rights reserved. * Copyright (c) 2012 France Telecom All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * - Neither name of Intel Corporation nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************/ /*! * \file * * \brief Defines the Web Server and has functions to carry out * operations of the Web Server. */ #include "config.h" #if EXCLUDE_WEB_SERVER == 0 #include "webserver.h" #include "FileInfo.h" #include "httpparser.h" #include "httpreadwrite.h" #include "ithread.h" #include "membuffer.h" #include "ssdplib.h" #include "statcodes.h" #include "strintmap.h" #include "unixutil.h" #include "upnp.h" #include "upnpapi.h" #include "UpnpIntTypes.h" #include "UpnpStdInt.h" #include "upnputil.h" #include "VirtualDir.h" #include <assert.h> #include <fcntl.h> #include <sys/stat.h> #ifdef WIN32 #define snprintf _snprintf #endif /*! * Response Types. */ enum resp_type { RESP_FILEDOC, RESP_XMLDOC, RESP_HEADERS, RESP_WEBDOC, RESP_POST }; /* mapping of file extension to content-type of document */ struct document_type_t { /*! . */ const char *file_ext; /*! . */ const char *content_type; /*! . */ const char *content_subtype; }; struct xml_alias_t { /*! name of DOC from root; e.g.: /foo/bar/mydesc.xml */ membuffer name; /*! the XML document contents */ membuffer doc; /*! . */ time_t last_modified; /*! . */ int *ct; }; static const char *gMediaTypes[] = { /*! 0. */ NULL, /*! 1. */ "audio", /*! 2. */ "video", /*! 3. */ "image", /*! 4. */ "application", /*! 5. */ "text" }; /* * Defines. */ /* index into 'gMediaTypes' */ #define AUDIO_STR "\1" #define VIDEO_STR "\2" #define IMAGE_STR "\3" #define APPLICATION_STR "\4" #define TEXT_STR "\5" /* int index */ #define APPLICATION_INDEX 4 #define TEXT_INDEX 5 /* general */ #define NUM_MEDIA_TYPES 70 #define NUM_HTTP_HEADER_NAMES 33 #define ASCTIME_R_BUFFER_SIZE 26 #ifdef WIN32 static char *web_server_asctime_r(const struct tm *tm, char *buf) { if (tm == NULL || buf == NULL) return NULL; asctime_s(buf, ASCTIME_R_BUFFER_SIZE, tm); return buf; } #else #define web_server_asctime_r asctime_r #endif /* sorted by file extension; must have 'NUM_MEDIA_TYPES' extensions */ static const char *gEncodedMediaTypes = "aif\0" AUDIO_STR "aiff\0" "aifc\0" AUDIO_STR "aiff\0" "aiff\0" AUDIO_STR "aiff\0" "asf\0" VIDEO_STR "x-ms-asf\0" "asx\0" VIDEO_STR "x-ms-asf\0" "au\0" AUDIO_STR "basic\0" "avi\0" VIDEO_STR "msvideo\0" "bmp\0" IMAGE_STR "bmp\0" "css\0" TEXT_STR "css\0" "dcr\0" APPLICATION_STR "x-director\0" "dib\0" IMAGE_STR "bmp\0" "dir\0" APPLICATION_STR "x-director\0" "dxr\0" APPLICATION_STR "x-director\0" "gif\0" IMAGE_STR "gif\0" "hta\0" TEXT_STR "hta\0" "htm\0" TEXT_STR "html\0" "html\0" TEXT_STR "html\0" "jar\0" APPLICATION_STR "java-archive\0" "jfif\0" IMAGE_STR "pjpeg\0" "jpe\0" IMAGE_STR "jpeg\0" "jpeg\0" IMAGE_STR "jpeg\0" "jpg\0" IMAGE_STR "jpeg\0" "js\0" APPLICATION_STR "x-javascript\0" "kar\0" AUDIO_STR "midi\0" "m3u\0" AUDIO_STR "mpegurl\0" "mid\0" AUDIO_STR "midi\0" "midi\0" AUDIO_STR "midi\0" "mov\0" VIDEO_STR "quicktime\0" "mp2v\0" VIDEO_STR "x-mpeg2\0" "mp3\0" AUDIO_STR "mpeg\0" "mpe\0" VIDEO_STR "mpeg\0" "mpeg\0" VIDEO_STR "mpeg\0" "mpg\0" VIDEO_STR "mpeg\0" "mpv\0" VIDEO_STR "mpeg\0" "mpv2\0" VIDEO_STR "x-mpeg2\0" "pdf\0" APPLICATION_STR "pdf\0" "pjp\0" IMAGE_STR "jpeg\0" "pjpeg\0" IMAGE_STR "jpeg\0" "plg\0" TEXT_STR "html\0" "pls\0" AUDIO_STR "scpls\0" "png\0" IMAGE_STR "png\0" "qt\0" VIDEO_STR "quicktime\0" "ram\0" AUDIO_STR "x-pn-realaudio\0" "rmi\0" AUDIO_STR "mid\0" "rmm\0" AUDIO_STR "x-pn-realaudio\0" "rtf\0" APPLICATION_STR "rtf\0" "shtml\0" TEXT_STR "html\0" "smf\0" AUDIO_STR "midi\0" "snd\0" AUDIO_STR "basic\0" "spl\0" APPLICATION_STR "futuresplash\0" "ssm\0" APPLICATION_STR "streamingmedia\0" "swf\0" APPLICATION_STR "x-shockwave-flash\0" "tar\0" APPLICATION_STR "tar\0" "tcl\0" APPLICATION_STR "x-tcl\0" "text\0" TEXT_STR "plain\0" "tif\0" IMAGE_STR "tiff\0" "tiff\0" IMAGE_STR "tiff\0" "txt\0" TEXT_STR "plain\0" "ulw\0" AUDIO_STR "basic\0" "wav\0" AUDIO_STR "wav\0" "wax\0" AUDIO_STR "x-ms-wax\0" "wm\0" VIDEO_STR "x-ms-wm\0" "wma\0" AUDIO_STR "x-ms-wma\0" "wmv\0" VIDEO_STR "x-ms-wmv\0" "wvx\0" VIDEO_STR "x-ms-wvx\0" "xbm\0" IMAGE_STR "x-xbitmap\0" "xml\0" TEXT_STR "xml\0" "xsl\0" TEXT_STR "xml\0" "z\0" APPLICATION_STR "x-compress\0" "zip\0" APPLICATION_STR "zip\0" "\0"; /* *** end *** */ /*! * module variables - Globals, static and externs. */ static struct document_type_t gMediaTypeList[NUM_MEDIA_TYPES]; /*! Global variable. A local dir which serves as webserver root. */ membuffer gDocumentRootDir; /*! XML document. */ static struct xml_alias_t gAliasDoc; static ithread_mutex_t gWebMutex; extern str_int_entry Http_Header_Names[NUM_HTTP_HEADER_NAMES]; /*! * \brief Decodes list and stores it in gMediaTypeList. */ static UPNP_INLINE void media_list_init(void) { int i; const char *s = gEncodedMediaTypes; struct document_type_t *doc_type; for (i = 0; *s != '\0'; i++) { doc_type = &gMediaTypeList[i]; doc_type->file_ext = s; /* point to type. */ s += strlen(s) + 1; doc_type->content_type = gMediaTypes[(int)*s]; /* point to subtype. */ s++; doc_type->content_subtype = s; /* next entry. */ s += strlen(s) + 1; } assert(i == NUM_MEDIA_TYPES); } /*! * \brief Based on the extension, returns the content type and content * subtype. * * \return * \li \c 0 on success * \li \c -1 on error */ static UPNP_INLINE int search_extension( /*! [in] . */ const char *extension, /*! [out] . */ const char **con_type, /*! [out] . */ const char **con_subtype) { int top, mid, bot; int cmp; top = 0; bot = NUM_MEDIA_TYPES - 1; while (top <= bot) { mid = (top + bot) / 2; cmp = strcasecmp(extension, gMediaTypeList[mid].file_ext); if (cmp > 0) { /* look below mid. */ top = mid + 1; } else if (cmp < 0) { /* look above mid. */ bot = mid - 1; } else { /* cmp == 0 */ *con_type = gMediaTypeList[mid].content_type; *con_subtype = gMediaTypeList[mid].content_subtype; return 0; } } return -1; } /*! * \brief Based on the extension, clones an XML string based on type and * content subtype. If content type and sub type are not found, unknown * types are used. * * \return * \li \c 0 on success. * \li \c UPNP_E_OUTOF_MEMORY - on memory allocation failures. */ static UPNP_INLINE int get_content_type( /*! [in] . */ const char *filename, /*! [out] . */ OUT UpnpFileInfo *fileInfo) { const char *extension; const char *type; const char *subtype; int ctype_found = FALSE; char *temp = NULL; size_t length = 0; int rc = 0; UpnpFileInfo_set_ContentType(fileInfo, NULL); /* get ext */ extension = strrchr(filename, '.'); if (extension != NULL) if (search_extension(extension + 1, &type, &subtype) == 0) ctype_found = TRUE; if (!ctype_found) { /* unknown content type */ type = gMediaTypes[APPLICATION_INDEX]; subtype = "octet-stream"; } length = strlen(type) + strlen("/") + strlen(subtype) + 1; temp = malloc(length); if (!temp) return UPNP_E_OUTOF_MEMORY; rc = snprintf(temp, length, "%s/%s", type, subtype); if (rc < 0 || (unsigned int) rc >= length) { free(temp); return UPNP_E_OUTOF_MEMORY; } UpnpFileInfo_set_ContentType(fileInfo, temp); free(temp); if (!UpnpFileInfo_get_ContentType(fileInfo)) return UPNP_E_OUTOF_MEMORY; return 0; } /*! * \brief Initialize the global XML document. Allocate buffers for the XML * document. */ static UPNP_INLINE void glob_alias_init(void) { struct xml_alias_t *alias = &gAliasDoc; membuffer_init(&alias->doc); membuffer_init(&alias->name); alias->ct = NULL; alias->last_modified = 0; } /*! * \brief Check for the validity of the XML object buffer. * * \return BOOLEAN. */ static UPNP_INLINE int is_valid_alias( /*! [in] XML alias object. */ const struct xml_alias_t *alias) { return alias->doc.buf != NULL; } /*! * \brief Copy the contents of the global XML document into the local output * parameter. */ static void alias_grab( /*! [out] XML alias object. */ struct xml_alias_t *alias) { ithread_mutex_lock(&gWebMutex); assert(is_valid_alias(&gAliasDoc)); memcpy(alias, &gAliasDoc, sizeof(struct xml_alias_t)); *alias->ct = *alias->ct + 1; ithread_mutex_unlock(&gWebMutex); } /*! * \brief Release the XML document referred to by the input parameter. Free * the allocated buffers associated with this object. */ static void alias_release( /*! [in] XML alias object. */ struct xml_alias_t *alias) { ithread_mutex_lock(&gWebMutex); /* ignore invalid alias */ if (!is_valid_alias(alias)) { ithread_mutex_unlock(&gWebMutex); return; } assert(*alias->ct > 0); *alias->ct -= 1; if (*alias->ct <= 0) { membuffer_destroy(&alias->doc); membuffer_destroy(&alias->name); free(alias->ct); } ithread_mutex_unlock(&gWebMutex); } int web_server_set_alias(const char *alias_name, const char *alias_content, size_t alias_content_length, time_t last_modified) { int ret_code; struct xml_alias_t alias; alias_release(&gAliasDoc); if (alias_name == NULL) { /* don't serve aliased doc anymore */ return 0; } assert(alias_content != NULL); membuffer_init(&alias.doc); membuffer_init(&alias.name); alias.ct = NULL; do { /* insert leading /, if missing */ if (*alias_name != '/') if (membuffer_assign_str(&alias.name, "/") != 0) break; /* error; out of mem */ ret_code = membuffer_append_str(&alias.name, alias_name); if (ret_code != 0) break; /* error */ if ((alias.ct = (int *)malloc(sizeof(int))) == NULL) break; /* error */ *alias.ct = 1; membuffer_attach(&alias.doc, (char *)alias_content, alias_content_length); alias.last_modified = last_modified; /* save in module var */ ithread_mutex_lock(&gWebMutex); gAliasDoc = alias; ithread_mutex_unlock(&gWebMutex); return 0; } while (FALSE); /* error handler */ /* free temp alias */ membuffer_destroy(&alias.name); membuffer_destroy(&alias.doc); free(alias.ct); return UPNP_E_OUTOF_MEMORY; } int web_server_init() { int ret = 0; if (bWebServerState == WEB_SERVER_DISABLED) { /* decode media list */ media_list_init(); membuffer_init(&gDocumentRootDir); glob_alias_init(); pVirtualDirList = NULL; /* Initialize callbacks */ virtualDirCallback.get_info = NULL; virtualDirCallback.open = NULL; virtualDirCallback.read = NULL; virtualDirCallback.write = NULL; virtualDirCallback.seek = NULL; virtualDirCallback.close = NULL; if (ithread_mutex_init(&gWebMutex, NULL) == -1) ret = UPNP_E_OUTOF_MEMORY; else bWebServerState = WEB_SERVER_ENABLED; } return ret; } void web_server_destroy(void) { if (bWebServerState == WEB_SERVER_ENABLED) { membuffer_destroy(&gDocumentRootDir); alias_release(&gAliasDoc); ithread_mutex_lock(&gWebMutex); memset(&gAliasDoc, 0, sizeof(struct xml_alias_t)); ithread_mutex_unlock(&gWebMutex); ithread_mutex_destroy(&gWebMutex); bWebServerState = WEB_SERVER_DISABLED; } } /*! * \brief Release memory allocated for the global web server root directory * and the global XML document. Resets the flag bWebServerState to * WEB_SERVER_DISABLED. * * \return Integer. */ static int get_file_info( /*! [in] Filename having the description document. */ const char *filename, /*! [out] File information object having file attributes such as filelength, * when was the file last modified, whether a file or a directory and * whether the file or directory is readable. */ OUT UpnpFileInfo *info) { int code; struct stat s; FILE *fp; int rc = 0; time_t aux_LastModified; struct tm date; char buffer[ASCTIME_R_BUFFER_SIZE]; UpnpFileInfo_set_ContentType(info, NULL); code = stat(filename, &s); if (code == -1) return -1; if (S_ISDIR(s.st_mode)) UpnpFileInfo_set_IsDirectory(info, TRUE); else if (S_ISREG(s.st_mode)) UpnpFileInfo_set_IsDirectory(info, FALSE); else return -1; /* check readable */ fp = fopen(filename, "r"); UpnpFileInfo_set_IsReadable(info, fp != NULL); if (fp) fclose(fp); UpnpFileInfo_set_FileLength(info, s.st_size); UpnpFileInfo_set_LastModified(info, s.st_mtime); rc = get_content_type(filename, info); aux_LastModified = UpnpFileInfo_get_LastModified(info); UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "file info: %s, length: %lld, last_mod=%s readable=%d\n", filename, (long long)UpnpFileInfo_get_FileLength(info), web_server_asctime_r(http_gmtime_r(&aux_LastModified, &date), buffer), UpnpFileInfo_get_IsReadable(info)); return rc; } int web_server_set_root_dir(const char *root_dir) { size_t index; int ret; ret = membuffer_assign_str(&gDocumentRootDir, root_dir); if (ret != 0) return ret; /* remove trailing '/', if any */ if (gDocumentRootDir.length > 0) { index = gDocumentRootDir.length - 1; /* last char */ if (gDocumentRootDir.buf[index] == '/') membuffer_delete(&gDocumentRootDir, index, 1); } return 0; } /*! * \brief Compare the files names between the one on the XML alias the one * passed in as the input parameter. If equal extract file information. * * \return * \li \c TRUE - On Success * \li \c FALSE if request is not an alias */ static UPNP_INLINE int get_alias( /*! [in] request file passed in to be compared with. */ const char *request_file, /*! [out] xml alias object which has a file name stored. */ struct xml_alias_t *alias, /*! [out] File information object which will be filled up if the file * comparison succeeds. */ UpnpFileInfo *info) { int cmp = strcmp(alias->name.buf, request_file); if (cmp == 0) { UpnpFileInfo_set_FileLength(info, (off_t)alias->doc.length); UpnpFileInfo_set_IsDirectory(info, FALSE); UpnpFileInfo_set_IsReadable(info, TRUE); UpnpFileInfo_set_LastModified(info, alias->last_modified); } return cmp == 0; } /*! * \brief Compares filePath with paths from the list of virtual directory * lists. * * \return BOOLEAN. */ static int isFileInVirtualDir( /*! [in] Directory path to be tested for virtual directory. */ char *filePath) { virtualDirList *pCurVirtualDir; size_t webDirLen; pCurVirtualDir = pVirtualDirList; while (pCurVirtualDir != NULL) { webDirLen = strlen(pCurVirtualDir->dirName); if (webDirLen) { if (pCurVirtualDir->dirName[webDirLen - 1] == '/') { if (strncmp(pCurVirtualDir->dirName, filePath, webDirLen) == 0) return !0; } else { if (strncmp(pCurVirtualDir->dirName, filePath, webDirLen) == 0 && (filePath[webDirLen] == '/' || filePath[webDirLen] == '\0' || filePath[webDirLen] == '?')) return !0; } } pCurVirtualDir = pCurVirtualDir->next; } return 0; } /*! * \brief Converts input string to upper case. */ static void ToUpperCase( /*! Input string to be converted. */ char *s) { while (*s) { *s = (char)toupper(*s); ++s; } } /*! * \brief Finds a substring from a string in a case insensitive way. * * \return A pointer to the first occurence of s2 in s1. */ static char *StrStr( /*! Input string. */ char *s1, /*! Input sub-string. */ const char *s2) { char *Str1; char *Str2; const char *Ptr; char *ret = NULL; Str1 = strdup(s1); if (!Str1) goto error1; Str2 = strdup(s2); if (!Str2) goto error2; ToUpperCase(Str1); ToUpperCase(Str2); Ptr = strstr(Str1, Str2); if (!Ptr) ret = NULL; else ret = s1 + (Ptr - Str1); free(Str2); error2: free(Str1); error1: return ret; } /*! * \brief Finds next token in a string. * * \return Pointer to the next token. */ static char *StrTok( /*! String containing the token. */ char **Src, /*! Set of delimiter characters. */ const char *Del) { char *TmpPtr; char *RetPtr; if (*Src != NULL) { RetPtr = *Src; TmpPtr = strstr(*Src, Del); if (TmpPtr != NULL) { *TmpPtr = '\0'; *Src = TmpPtr + strlen(Del); } else *Src = NULL; return RetPtr; } return NULL; } /*! * \brief Returns a range of integers from a string. * * \return Always returns 1. */ static int GetNextRange( /*! string containing the token / range. */ char **SrcRangeStr, /*! gets the first byte of the token. */ off_t *FirstByte, /*! gets the last byte of the token. */ off_t *LastByte) { char *Ptr; char *Tok; int i; int64_t F = -1; int64_t L = -1; int Is_Suffix_byte_Range = 1; if (*SrcRangeStr == NULL) return -1; Tok = StrTok(SrcRangeStr, ","); if ((Ptr = strstr(Tok, "-")) == NULL) return -1; *Ptr = ' '; sscanf(Tok, "%" SCNd64 "%" SCNd64, &F, &L); if (F == -1 || L == -1) { *Ptr = '-'; for (i = 0; i < (int)strlen(Tok); i++) { if (Tok[i] == '-') { break; } else if (isdigit(Tok[i])) { Is_Suffix_byte_Range = 0; break; } } if (Is_Suffix_byte_Range) { *FirstByte = (off_t) L; *LastByte = (off_t) F; return 1; } } *FirstByte = (off_t) F; *LastByte = (off_t) L; return 1; } /*! * \brief Fills in the Offset, read size and contents to send out as an HTTP * Range Response. * * \return * \li \c HTTP_BAD_REQUEST * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_REQUEST_RANGE_NOT_SATISFIABLE * \li \c HTTP_OK */ static int CreateHTTPRangeResponseHeader( /*! String containing the range. */ char *ByteRangeSpecifier, /*! Length of the file. */ off_t FileLength, /*! [out] SendInstruction object where the range operations will be stored. */ struct SendInstruction *Instr) { off_t FirstByte, LastByte; char *RangeInput; char *Ptr; int rc = 0; Instr->IsRangeActive = 1; Instr->ReadSendSize = FileLength; if (!ByteRangeSpecifier) return HTTP_BAD_REQUEST; RangeInput = strdup(ByteRangeSpecifier); if (!RangeInput) return HTTP_INTERNAL_SERVER_ERROR; /* CONTENT-RANGE: bytes 222-3333/4000 HTTP_PARTIAL_CONTENT */ if (StrStr(RangeInput, "bytes") == NULL || (Ptr = StrStr(RangeInput, "=")) == NULL) { free(RangeInput); Instr->IsRangeActive = 0; return HTTP_BAD_REQUEST; } /* Jump = */ Ptr = Ptr + 1; if (FileLength < 0) { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } if (GetNextRange(&Ptr, &FirstByte, &LastByte) != -1) { if (FileLength < FirstByte) { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } if (FirstByte >= 0 && LastByte >= 0 && LastByte >= FirstByte) { if (LastByte >= FileLength) LastByte = FileLength - 1; Instr->RangeOffset = FirstByte; Instr->ReadSendSize = LastByte - FirstByte + 1; /* Data between two range. */ rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n", (int64_t)FirstByte, (int64_t)LastByte, (int64_t)FileLength); if (rc < 0 || (unsigned int) rc >= sizeof(Instr->RangeHeader)) { free(RangeInput); return HTTP_INTERNAL_SERVER_ERROR; } } else if (FirstByte >= 0 && LastByte == -1 && FirstByte < FileLength) { Instr->RangeOffset = FirstByte; Instr->ReadSendSize = FileLength - FirstByte; rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n", (int64_t)FirstByte, (int64_t)(FileLength - 1), (int64_t)FileLength); if (rc < 0 || (unsigned int) rc >= sizeof(Instr->RangeHeader)) { free(RangeInput); return HTTP_INTERNAL_SERVER_ERROR; } } else if (FirstByte == -1 && LastByte > 0) { if (LastByte >= FileLength) { Instr->RangeOffset = 0; Instr->ReadSendSize = FileLength; rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes 0-%" PRId64 "/%" PRId64 "\r\n", (int64_t)(FileLength - 1), (int64_t)FileLength); } else { Instr->RangeOffset = FileLength - LastByte; Instr->ReadSendSize = LastByte; rc = snprintf(Instr->RangeHeader, sizeof(Instr->RangeHeader), "CONTENT-RANGE: bytes %" PRId64 "-%" PRId64 "/%" PRId64 "\r\n", (int64_t)(FileLength - LastByte), (int64_t)FileLength - 1, (int64_t)FileLength); } if (rc < 0 || (unsigned int) rc >= sizeof(Instr->RangeHeader)) { free(RangeInput); return HTTP_INTERNAL_SERVER_ERROR; } } else { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } } else { free(RangeInput); return HTTP_REQUEST_RANGE_NOT_SATISFIABLE; } free(RangeInput); return HTTP_OK; } /*! * \brief Get header id from the request parameter and take appropriate * action based on the ids as an HTTP Range Response. * * \return * \li \c HTTP_BAD_REQUEST * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_REQUEST_RANGE_NOT_SATISFIABLE * \li \c HTTP_OK */ static int CheckOtherHTTPHeaders( /*! [in] HTTP Request message. */ http_message_t *Req, /*! [out] Send Instruction object to data for the response. */ struct SendInstruction *RespInstr, /*! Size of the file containing the request document. */ off_t FileSize) { http_header_t *header; ListNode *node; /*NNS: dlist_node* node; */ int index, RetCode = HTTP_OK; char *TmpBuf; size_t TmpBufSize = LINE_SIZE; TmpBuf = (char *)malloc(TmpBufSize); if (!TmpBuf) return HTTP_INTERNAL_SERVER_ERROR; node = ListHead(&Req->headers); while (node != NULL) { header = (http_header_t *) node->item; /* find header type. */ index = map_str_to_int((const char *)header->name.buf, header->name.length, Http_Header_Names, NUM_HTTP_HEADER_NAMES, FALSE); if (header->value.length >= TmpBufSize) { free(TmpBuf); TmpBufSize = header->value.length + 1; TmpBuf = (char *)malloc(TmpBufSize); if (!TmpBuf) return HTTP_INTERNAL_SERVER_ERROR; } memcpy(TmpBuf, header->value.buf, header->value.length); TmpBuf[header->value.length] = '\0'; if (index >= 0) { switch (Http_Header_Names[index].id) { case HDR_TE: { /* Request */ RespInstr->IsChunkActive = 1; if (strlen(TmpBuf) > strlen("gzip")) { /* means client will accept trailer. */ if (StrStr(TmpBuf, "trailers") != NULL) { RespInstr->IsTrailers = 1; } } break; } case HDR_CONTENT_LENGTH: RespInstr->RecvWriteSize = atoi(TmpBuf); break; case HDR_RANGE: RetCode = CreateHTTPRangeResponseHeader(TmpBuf, FileSize, RespInstr); if (RetCode != HTTP_OK) { free(TmpBuf); return RetCode; } break; case HDR_ACCEPT_LANGUAGE: if (header->value.length + 1 > sizeof(RespInstr->AcceptLanguageHeader)) { size_t length = sizeof(RespInstr->AcceptLanguageHeader) - 1; memcpy(RespInstr->AcceptLanguageHeader, TmpBuf, length); RespInstr->AcceptLanguageHeader[length] = '\0'; } else { memcpy(RespInstr->AcceptLanguageHeader, TmpBuf, header->value.length + 1); } break; default: /* TODO */ /* header.value is the value. */ /* case HDR_CONTENT_TYPE: return 1; case HDR_CONTENT_LANGUAGE:return 1; case HDR_LOCATION: return 1; case HDR_CONTENT_LOCATION:return 1; case HDR_ACCEPT: return 1; case HDR_ACCEPT_CHARSET: return 1; case HDR_USER_AGENT: return 1; */ /*Header check for encoding */ /* case HDR_ACCEPT_RANGE: case HDR_CONTENT_RANGE: case HDR_IF_RANGE: */ /*Header check for encoding */ /* case HDR_ACCEPT_ENCODING: if(StrStr(TmpBuf, "identity")) { break; } else return -1; case HDR_CONTENT_ENCODING: case HDR_TRANSFER_ENCODING: */ break; } } node = ListNext(&Req->headers, node); } free(TmpBuf); return RetCode; } /*! * \brief Processes the request and returns the result in the output parameters. * * \return * \li \c HTTP_BAD_REQUEST * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_REQUEST_RANGE_NOT_SATISFIABLE * \li \c HTTP_FORBIDDEN * \li \c HTTP_NOT_FOUND * \li \c HTTP_NOT_ACCEPTABLE * \li \c HTTP_OK */ static int process_request( /*! [in] HTTP Request message. */ http_message_t *req, /*! [out] Tpye of response. */ enum resp_type *rtype, /*! [out] Headers. */ membuffer *headers, /*! [out] Get filename from request document. */ membuffer *filename, /*! [out] Xml alias document from the request document. */ struct xml_alias_t *alias, /*! [out] Send Instruction object where the response is set up. */ struct SendInstruction *RespInstr) { int code; int err_code; char *request_doc; UpnpFileInfo *finfo; time_t aux_LastModified; int using_alias; int using_virtual_dir; uri_type *url; const char *temp_str; int resp_major; int resp_minor; int alias_grabbed; size_t dummy; const char *extra_headers = NULL; print_http_headers(req); url = &req->uri; assert(req->method == HTTPMETHOD_GET || req->method == HTTPMETHOD_HEAD || req->method == HTTPMETHOD_POST || req->method == HTTPMETHOD_SIMPLEGET); /* init */ memset(&finfo, 0, sizeof(finfo)); request_doc = NULL; finfo = UpnpFileInfo_new(); alias_grabbed = FALSE; err_code = HTTP_INTERNAL_SERVER_ERROR; /* default error */ using_virtual_dir = FALSE; using_alias = FALSE; http_CalcResponseVersion(req->major_version, req->minor_version, &resp_major, &resp_minor); /* */ /* remove dots */ /* */ request_doc = malloc(url->pathquery.size + 1); if (request_doc == NULL) { goto error_handler; /* out of mem */ } memcpy(request_doc, url->pathquery.buff, url->pathquery.size); request_doc[url->pathquery.size] = '\0'; dummy = url->pathquery.size; remove_escaped_chars(request_doc, &dummy); code = remove_dots(request_doc, url->pathquery.size); if (code != 0) { err_code = HTTP_FORBIDDEN; goto error_handler; } if (*request_doc != '/') { /* no slash */ err_code = HTTP_BAD_REQUEST; goto error_handler; } if (isFileInVirtualDir(request_doc)) { using_virtual_dir = TRUE; RespInstr->IsVirtualFile = 1; if (membuffer_assign_str(filename, request_doc) != 0) { goto error_handler; } } else { /* try using alias */ if (is_valid_alias(&gAliasDoc)) { alias_grab(alias); alias_grabbed = TRUE; using_alias = get_alias(request_doc, alias, finfo); if (using_alias == TRUE) { UpnpFileInfo_set_ContentType(finfo, "text/xml; charset=\"utf-8\""); if (UpnpFileInfo_get_ContentType(finfo) == NULL) { goto error_handler; } } } } if (using_virtual_dir) { if (req->method != HTTPMETHOD_POST) { /* get file info */ if (virtualDirCallback. get_info(filename->buf, finfo) != 0) { err_code = HTTP_NOT_FOUND; goto error_handler; } /* try index.html if req is a dir */ if (UpnpFileInfo_get_IsDirectory(finfo)) { if (filename->buf[filename->length - 1] == '/') { temp_str = "index.html"; } else { temp_str = "/index.html"; } if (membuffer_append_str(filename, temp_str) != 0) { goto error_handler; } /* get info */ if (virtualDirCallback.get_info(filename->buf, finfo) != UPNP_E_SUCCESS || UpnpFileInfo_get_IsDirectory(finfo)) { err_code = HTTP_NOT_FOUND; goto error_handler; } } /* not readable */ if (!UpnpFileInfo_get_IsReadable(finfo)) { err_code = HTTP_FORBIDDEN; goto error_handler; } /* finally, get content type */ /* if ( get_content_type(filename->buf, &content_type) != 0 ) */ /*{ */ /* goto error_handler; */ /* } */ } } else if (!using_alias) { if (gDocumentRootDir.length == 0) { goto error_handler; } /* */ /* get file name */ /* */ /* filename str */ if (membuffer_assign_str(filename, gDocumentRootDir.buf) != 0 || membuffer_append_str(filename, request_doc) != 0) { goto error_handler; /* out of mem */ } /* remove trailing slashes */ while (filename->length > 0 && filename->buf[filename->length - 1] == '/') { membuffer_delete(filename, filename->length - 1, 1); } if (req->method != HTTPMETHOD_POST) { /* get info on file */ if (get_file_info(filename->buf, finfo) != 0) { err_code = HTTP_NOT_FOUND; goto error_handler; } /* try index.html if req is a dir */ if (UpnpFileInfo_get_IsDirectory(finfo)) { if (filename->buf[filename->length - 1] == '/') { temp_str = "index.html"; } else { temp_str = "/index.html"; } if (membuffer_append_str(filename, temp_str) != 0) { goto error_handler; } /* get info */ if (get_file_info(filename->buf, finfo) != 0 || UpnpFileInfo_get_IsDirectory(finfo)) { err_code = HTTP_NOT_FOUND; goto error_handler; } } /* not readable */ if (!UpnpFileInfo_get_IsReadable(finfo)) { err_code = HTTP_FORBIDDEN; goto error_handler; } } /* finally, get content type */ /* if ( get_content_type(filename->buf, &content_type) != 0 ) */ /* { */ /* goto error_handler; */ /* } */ } RespInstr->ReadSendSize = UpnpFileInfo_get_FileLength(finfo); /* Check other header field. */ code = CheckOtherHTTPHeaders(req, RespInstr, UpnpFileInfo_get_FileLength(finfo)); if (code != HTTP_OK) { err_code = code; goto error_handler; } if (req->method == HTTPMETHOD_POST) { *rtype = RESP_POST; err_code = HTTP_OK; goto error_handler; } extra_headers = UpnpFileInfo_get_ExtraHeaders(finfo); if (!extra_headers) { extra_headers = ""; } /* Check if chunked encoding should be used. */ if (using_virtual_dir && UpnpFileInfo_get_FileLength(finfo) == UPNP_USING_CHUNKED) { /* Chunked encoding is only supported by HTTP 1.1 clients */ if (resp_major == 1 && resp_minor == 1) { RespInstr->IsChunkActive = 1; } else { /* The virtual callback indicates that we should use * chunked encoding however the client doesn't support * it. Return with an internal server error. */ err_code = HTTP_NOT_ACCEPTABLE; goto error_handler; } } aux_LastModified = UpnpFileInfo_get_LastModified(finfo); if (RespInstr->IsRangeActive && RespInstr->IsChunkActive) { /* Content-Range: bytes 222-3333/4000 HTTP_PARTIAL_CONTENT */ /* Transfer-Encoding: chunked */ if (http_MakeMessage(headers, resp_major, resp_minor, "R" "T" "GKLD" "s" "tcS" "Xc" "sCc", HTTP_PARTIAL_CONTENT, /* status code */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* range info */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else if (RespInstr->IsRangeActive && !RespInstr->IsChunkActive) { /* Content-Range: bytes 222-3333/4000 HTTP_PARTIAL_CONTENT */ if (http_MakeMessage(headers, resp_major, resp_minor, "R" "N" "T" "GLD" "s" "tcS" "Xc" "sCc", HTTP_PARTIAL_CONTENT, /* status code */ RespInstr->ReadSendSize, /* content length */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* range info */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else if (!RespInstr->IsRangeActive && RespInstr->IsChunkActive) { /* Transfer-Encoding: chunked */ if (http_MakeMessage(headers, resp_major, resp_minor, "RK" "TLD" "s" "tcS" "Xc" "sCc", HTTP_OK, /* status code */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else { /* !RespInstr->IsRangeActive && !RespInstr->IsChunkActive */ if (RespInstr->ReadSendSize >= 0) { if (http_MakeMessage(headers, resp_major, resp_minor, "R" "N" "TLD" "s" "tcS" "Xc" "sCc", HTTP_OK, /* status code */ RespInstr->ReadSendSize, /* content length */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } else { if (http_MakeMessage(headers, resp_major, resp_minor, "R" "TLD" "s" "tcS" "Xc" "sCc", HTTP_OK, /* status code */ UpnpFileInfo_get_ContentType(finfo), /* content type */ RespInstr, /* language info */ "LAST-MODIFIED: ", &aux_LastModified, X_USER_AGENT, extra_headers) != 0) { goto error_handler; } } } if (req->method == HTTPMETHOD_HEAD) { *rtype = RESP_HEADERS; } else if (using_alias) { /* GET xml */ *rtype = RESP_XMLDOC; } else if (using_virtual_dir) { *rtype = RESP_WEBDOC; } else { /* GET filename */ *rtype = RESP_FILEDOC; } /* simple get http 0.9 as specified in http 1.0 */ /* don't send headers */ if (req->method == HTTPMETHOD_SIMPLEGET) { membuffer_destroy(headers); } err_code = HTTP_OK; error_handler: free(request_doc); UpnpFileInfo_delete(finfo); if (err_code != HTTP_OK && alias_grabbed) { alias_release(alias); } return err_code; } /*! * \brief Receives the HTTP post message. * * \return * \li \c HTTP_INTERNAL_SERVER_ERROR * \li \c HTTP_UNAUTHORIZED * \li \c HTTP_BAD_REQUEST * \li \c HTTP_SERVICE_UNAVAILABLE * \li \c HTTP_OK */ static int http_RecvPostMessage( /*! HTTP Parser object. */ http_parser_t *parser, /*! [in] Socket Information object. */ SOCKINFO *info, /*! File where received data is copied to. */ char *filename, /*! Send Instruction object which gives information whether the file * is a virtual file or not. */ struct SendInstruction *Instr) { size_t Data_Buf_Size = 1024; char Buf[1024]; int Timeout = -1; FILE *Fp; parse_status_t status = PARSE_OK; int ok_on_close = FALSE; size_t entity_offset = 0; int num_read = 0; int ret_code = HTTP_OK; if (Instr && Instr->IsVirtualFile) { Fp = (virtualDirCallback.open) (filename, UPNP_WRITE); if (Fp == NULL) return HTTP_INTERNAL_SERVER_ERROR; } else { Fp = fopen(filename, "wb"); if (Fp == NULL) return HTTP_UNAUTHORIZED; } parser->position = POS_ENTITY; do { /* first parse what has already been gotten */ if (parser->position != POS_COMPLETE) status = parser_parse_entity(parser); if (status == PARSE_INCOMPLETE_ENTITY) { /* read until close */ ok_on_close = TRUE; } else if ((status != PARSE_SUCCESS) && (status != PARSE_CONTINUE_1) && (status != PARSE_INCOMPLETE)) { /* error */ ret_code = HTTP_BAD_REQUEST; goto ExitFunction; } /* read more if necessary entity */ while (entity_offset + Data_Buf_Size > parser->msg.entity.length && parser->position != POS_COMPLETE) { num_read = sock_read(info, Buf, sizeof(Buf), &Timeout); if (num_read > 0) { /* append data to buffer */ if (membuffer_append(&parser->msg.msg, Buf, (size_t)num_read) != 0) { /* set failure status */ parser->http_error_code = HTTP_INTERNAL_SERVER_ERROR; ret_code = HTTP_INTERNAL_SERVER_ERROR; goto ExitFunction; } status = parser_parse_entity(parser); if (status == PARSE_INCOMPLETE_ENTITY) { /* read until close */ ok_on_close = TRUE; } else if ((status != PARSE_SUCCESS) && (status != PARSE_CONTINUE_1) && (status != PARSE_INCOMPLETE)) { ret_code = HTTP_BAD_REQUEST; goto ExitFunction; } } else if (num_read == 0) { if (ok_on_close) { UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "<<< (RECVD) <<<\n%s\n-----------------\n", parser->msg.msg.buf); print_http_headers(&parser->msg); parser->position = POS_COMPLETE; } else { /* partial msg or response */ parser->http_error_code = HTTP_BAD_REQUEST; ret_code = HTTP_BAD_REQUEST; goto ExitFunction; } } else { ret_code = HTTP_SERVICE_UNAVAILABLE; goto ExitFunction; } } if ((entity_offset + Data_Buf_Size) > parser->msg.entity.length) { Data_Buf_Size = parser->msg.entity.length - entity_offset; } memcpy(Buf, &parser->msg.msg.buf[parser->entity_start_position + entity_offset], Data_Buf_Size); entity_offset += Data_Buf_Size; if (Instr && Instr->IsVirtualFile) { int n = virtualDirCallback.write(Fp, Buf, Data_Buf_Size); if (n < 0) { ret_code = HTTP_INTERNAL_SERVER_ERROR; goto ExitFunction; } } else { size_t n = fwrite(Buf, 1, Data_Buf_Size, Fp); if (n != Data_Buf_Size) { ret_code = HTTP_INTERNAL_SERVER_ERROR; goto ExitFunction; } } } while (parser->position != POS_COMPLETE || entity_offset != parser->msg.entity.length); ExitFunction: if (Instr && Instr->IsVirtualFile) { virtualDirCallback.close(Fp); } else { fclose(Fp); } return ret_code; } void web_server_callback(http_parser_t *parser, INOUT http_message_t *req, SOCKINFO *info) { int ret; int timeout = -1; enum resp_type rtype = 0; membuffer headers; membuffer filename; struct xml_alias_t xmldoc; struct SendInstruction RespInstr; /*Initialize instruction header. */ RespInstr.IsVirtualFile = 0; RespInstr.IsChunkActive = 0; RespInstr.IsRangeActive = 0; RespInstr.IsTrailers = 0; memset(RespInstr.AcceptLanguageHeader, 0, sizeof(RespInstr.AcceptLanguageHeader)); /* init */ membuffer_init(&headers); membuffer_init(&filename); /*Process request should create the different kind of header depending on the */ /*the type of request. */ ret = process_request(req, &rtype, &headers, &filename, &xmldoc, &RespInstr); if (ret != HTTP_OK) { /* send error code */ http_SendStatusResponse(info, ret, req->major_version, req->minor_version); } else { /* send response */ switch (rtype) { case RESP_FILEDOC: http_SendMessage(info, &timeout, "Ibf", &RespInstr, headers.buf, headers.length, filename.buf); break; case RESP_XMLDOC: http_SendMessage(info, &timeout, "Ibb", &RespInstr, headers.buf, headers.length, xmldoc.doc.buf, xmldoc.doc.length); alias_release(&xmldoc); break; case RESP_WEBDOC: /*http_SendVirtualDirDoc(info, &timeout, "Ibf", &RespInstr, headers.buf, headers.length, filename.buf);*/ http_SendMessage(info, &timeout, "Ibf", &RespInstr, headers.buf, headers.length, filename.buf); break; case RESP_HEADERS: /* headers only */ http_SendMessage(info, &timeout, "b", headers.buf, headers.length); break; case RESP_POST: /* headers only */ ret = http_RecvPostMessage(parser, info, filename.buf, &RespInstr); /* Send response. */ http_MakeMessage(&headers, 1, 1, "RTLSXcCc", ret, "text/html", &RespInstr, X_USER_AGENT); http_SendMessage(info, &timeout, "b", headers.buf, headers.length); break; default: UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "webserver: Invalid response type received.\n"); assert(0); } } UpnpPrintf(UPNP_INFO, HTTP, __FILE__, __LINE__, "webserver: request processed...\n"); membuffer_destroy(&headers); membuffer_destroy(&filename); } #endif /* EXCLUDE_WEB_SERVER */
./CrossVul/dataset_final_sorted/CWE-284/c/bad_5204_2
crossvul-cpp_data_bad_1571_2
/* Copyright (c) 2006, 2013, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, 51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA */ #include "client_priv.h" #include "my_default.h" #include <sslopt-vars.h> #include "../scripts/mysql_fix_privilege_tables_sql.c" #include <welcome_copyright_notice.h> /* ORACLE_WELCOME_COPYRIGHT_NOTICE */ #define VER "1.1" #ifdef HAVE_SYS_WAIT_H #include <sys/wait.h> #endif #ifndef WEXITSTATUS # ifdef _WIN32 # define WEXITSTATUS(stat_val) (stat_val) # else # define WEXITSTATUS(stat_val) ((unsigned)(stat_val) >> 8) # endif #endif static char mysql_path[FN_REFLEN]; static char mysqlcheck_path[FN_REFLEN]; static my_bool opt_force, opt_verbose, debug_info_flag, debug_check_flag, opt_systables_only, opt_version_check; static uint my_end_arg= 0; static char *opt_user= (char*)"root"; static DYNAMIC_STRING ds_args; static DYNAMIC_STRING conn_args; static char *opt_password= 0; static char *opt_plugin_dir= 0, *opt_default_auth= 0; static my_bool tty_password= 0; static char opt_tmpdir[FN_REFLEN] = ""; #ifndef DBUG_OFF static char *default_dbug_option= (char*) "d:t:O,/tmp/mysql_upgrade.trace"; #endif static char **defaults_argv; static my_bool not_used; /* Can't use GET_BOOL without a value pointer */ static my_bool opt_write_binlog; static struct my_option my_long_options[]= { {"help", '?', "Display this help message and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"character-sets-dir", OPT_CHARSETS_DIR, "Directory for character set files.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"compress", OPT_COMPRESS, "Use compression in server/client protocol.", &not_used, &not_used, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, #ifdef DBUG_OFF {"debug", '#', "This is a non-debug version. Catch this and exit.", 0, 0, 0, GET_DISABLED, OPT_ARG, 0, 0, 0, 0, 0, 0}, #else {"debug", '#', "Output debug log.", &default_dbug_option, &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif {"debug-check", OPT_DEBUG_CHECK, "Check memory and open file usage at exit.", &debug_check_flag, &debug_check_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"debug-info", 'T', "Print some debug info at exit.", &debug_info_flag, &debug_info_flag, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"default-character-set", OPT_DEFAULT_CHARSET, "Set the default character set.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"default_auth", OPT_DEFAULT_AUTH, "Default authentication client-side plugin to use.", &opt_default_auth, &opt_default_auth, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"force", 'f', "Force execution of mysqlcheck even if mysql_upgrade " "has already been executed for the current version of MySQL.", &opt_force, &opt_force, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"host",'h', "Connect to host.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', "Password to use when connecting to server. If password is not given," " it's solicited on the tty.", &opt_password,&opt_password, 0, GET_PASSWORD, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef _WIN32 {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif {"plugin_dir", OPT_PLUGIN_DIR, "Directory for client-side plugins.", &opt_plugin_dir, &opt_plugin_dir, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"port", 'P', "Port number to use for connection or 0 for default to, in " "order of preference, my.cnf, $MYSQL_TCP_PORT, " #if MYSQL_PORT_DEFAULT == 0 "/etc/services, " #endif "built-in default (" STRINGIFY_ARG(MYSQL_PORT) ").", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"protocol", OPT_MYSQL_PROTOCOL, "The protocol to use for connection (tcp, socket, pipe, memory).", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #if defined (_WIN32) && !defined (EMBEDDED_LIBRARY) {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"version-check", 'k', "Run this program only if its \'server version\' " "matches the version of the server to which it's connecting, (enabled by " "default); use --skip-version-check to avoid this check. Note: the \'server " "version\' of the program is the version of the MySQL server with which it " "was built/distributed.", &opt_version_check, &opt_version_check, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"socket", 'S', "The socket file to use for connection.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #include <sslopt-longopts.h> {"tmpdir", 't', "Directory for temporary files.", 0, 0, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"upgrade-system-tables", 's', "Only upgrade the system tables " "do not try to upgrade the data.", &opt_systables_only, &opt_systables_only, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"user", 'u', "User for login if not current user.", &opt_user, &opt_user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"verbose", 'v', "Display more output about the process.", &opt_verbose, &opt_verbose, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, {"write-binlog", OPT_WRITE_BINLOG, "All commands including mysqlcheck are binlogged. Disabled by default; " "use when commands should be sent to replication slaves.", &opt_write_binlog, &opt_write_binlog, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; static void free_used_memory(void) { /* Free memory allocated by 'load_defaults' */ free_defaults(defaults_argv); dynstr_free(&ds_args); dynstr_free(&conn_args); } static void die(const char *fmt, ...) { va_list args; DBUG_ENTER("die"); /* Print the error message */ va_start(args, fmt); if (fmt) { fprintf(stderr, "FATAL ERROR: "); vfprintf(stderr, fmt, args); fprintf(stderr, "\n"); fflush(stderr); } va_end(args); DBUG_LEAVE; free_used_memory(); my_end(my_end_arg); exit(1); } static void verbose(const char *fmt, ...) { va_list args; if (!opt_verbose) return; /* Print the verbose message */ va_start(args, fmt); if (fmt) { vfprintf(stdout, fmt, args); fprintf(stdout, "\n"); fflush(stdout); } va_end(args); } /* Add one option - passed to mysql_upgrade on command line or by defaults file(my.cnf) - to a dynamic string, in this way we pass the same arguments on to mysql and mysql_check */ static void add_one_option(DYNAMIC_STRING* ds, const struct my_option *opt, const char* argument) { const char* eq= NullS; const char* arg= NullS; if (opt->arg_type != NO_ARG) { eq= "="; switch (opt->var_type & GET_TYPE_MASK) { case GET_STR: case GET_PASSWORD: arg= argument; break; case GET_BOOL: arg= (*(my_bool *)opt->value) ? "1" : "0"; break; default: die("internal error at %s: %d",__FILE__, __LINE__); } } dynstr_append_os_quoted(ds, "--", opt->name, eq, arg, NullS); dynstr_append(ds, " "); } static my_bool get_one_option(int optid, const struct my_option *opt, char *argument) { my_bool add_option= TRUE; switch (optid) { case '?': printf("%s Ver %s Distrib %s, for %s (%s)\n", my_progname, VER, MYSQL_SERVER_VERSION, SYSTEM_TYPE, MACHINE_TYPE); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); puts("MySQL utility for upgrading databases to new MySQL versions.\n"); my_print_help(my_long_options); exit(0); break; case '#': DBUG_PUSH(argument ? argument : default_dbug_option); add_option= FALSE; debug_check_flag= 1; break; case 'p': if (argument == disabled_my_option) argument= (char*) ""; /* Don't require password */ tty_password= 1; add_option= FALSE; if (argument) { /* Add password to ds_args before overwriting the arg with x's */ add_one_option(&ds_args, opt, argument); while (*argument) *argument++= 'x'; /* Destroy argument */ tty_password= 0; } break; case 't': my_stpnmov(opt_tmpdir, argument, sizeof(opt_tmpdir)); add_option= FALSE; break; case 'k': /* --version-check */ case 'v': /* --verbose */ case 'f': /* --force */ case 's': /* --upgrade-system-tables */ case OPT_WRITE_BINLOG: /* --write-binlog */ add_option= FALSE; break; case 'h': /* --host */ case 'W': /* --pipe */ case 'P': /* --port */ case 'S': /* --socket */ case OPT_MYSQL_PROTOCOL: /* --protocol */ case OPT_SHARED_MEMORY_BASE_NAME: /* --shared-memory-base-name */ case OPT_PLUGIN_DIR: /* --plugin-dir */ case OPT_DEFAULT_AUTH: /* --default-auth */ add_one_option(&conn_args, opt, argument); break; } if (add_option) { /* This is an option that is accpted by mysql_upgrade just so it can be passed on to "mysql" and "mysqlcheck" Save it in the ds_args string */ add_one_option(&ds_args, opt, argument); } return 0; } /** Run a command using the shell, storing its output in the supplied dynamic string. */ static int run_command(char* cmd, DYNAMIC_STRING *ds_res) { char buf[512]= {0}; FILE *res_file; int error; if (! ds_res) { fflush(stdout); fflush(stderr); } if (!(res_file= popen(cmd, "r"))) die("popen(\"%s\", \"r\") failed", cmd); while (fgets(buf, sizeof(buf), res_file)) { DBUG_PRINT("info", ("buf: %s", buf)); if(ds_res) { /* Save the output of this command in the supplied string */ dynstr_append(ds_res, buf); } else { /* Print it directly on screen */ fprintf(stdout, "%s", buf); } } if (! ds_res) { fflush(stdout); fflush(stderr); } error= pclose(res_file); return WEXITSTATUS(error); } static int run_tool(char *tool_path, DYNAMIC_STRING *ds_res, ...) { int ret; const char* arg; va_list args; DYNAMIC_STRING ds_cmdline; DBUG_ENTER("run_tool"); DBUG_PRINT("enter", ("tool_path: %s", tool_path)); if (init_dynamic_string(&ds_cmdline, IF_WIN("\"", ""), FN_REFLEN, FN_REFLEN)) die("Out of memory"); dynstr_append_os_quoted(&ds_cmdline, tool_path, NullS); dynstr_append(&ds_cmdline, " "); va_start(args, ds_res); while ((arg= va_arg(args, char *))) { /* Options should be os quoted */ if (strncmp(arg, "--", 2) == 0) dynstr_append_os_quoted(&ds_cmdline, arg, NullS); else dynstr_append(&ds_cmdline, arg); dynstr_append(&ds_cmdline, " "); } va_end(args); #ifdef _WIN32 dynstr_append(&ds_cmdline, "\""); #endif DBUG_PRINT("info", ("Running: %s", ds_cmdline.str)); ret= run_command(ds_cmdline.str, ds_res); DBUG_PRINT("exit", ("ret: %d", ret)); dynstr_free(&ds_cmdline); DBUG_RETURN(ret); } /** Look for the filename of given tool, with the presumption that it is in the same directory as mysql_upgrade and that the same executable-searching mechanism will be used when we run our sub-shells with popen() later. */ static void find_tool(char *tool_executable_name, const char *tool_name, const char *self_name) { char *last_fn_libchar; DYNAMIC_STRING ds_tmp; DBUG_ENTER("find_tool"); DBUG_PRINT("enter", ("progname: %s", my_progname)); if (init_dynamic_string(&ds_tmp, "", 32, 32)) die("Out of memory"); last_fn_libchar= strrchr(self_name, FN_LIBCHAR); if (last_fn_libchar == NULL) { /* mysql_upgrade was found by the shell searching the path. A sibling next to us should be found the same way. */ strncpy(tool_executable_name, tool_name, FN_REFLEN); } else { int len; /* mysql_upgrade was run absolutely or relatively. We can find a sibling by replacing our name after the LIBCHAR with the new tool name. */ /* When running in a not yet installed build and using libtool, the program(mysql_upgrade) will be in .libs/ and executed through a libtool wrapper in order to use the dynamic libraries from this build. The same must be done for the tools(mysql and mysqlcheck). Thus if path ends in .libs/, step up one directory and execute the tools from there */ if (((last_fn_libchar - 6) >= self_name) && (strncmp(last_fn_libchar - 5, ".libs", 5) == 0) && (*(last_fn_libchar - 6) == FN_LIBCHAR)) { DBUG_PRINT("info", ("Chopping off \".libs\" from end of path")); last_fn_libchar -= 6; } len= last_fn_libchar - self_name; my_snprintf(tool_executable_name, FN_REFLEN, "%.*s%c%s", len, self_name, FN_LIBCHAR, tool_name); } verbose("Looking for '%s' as: %s", tool_name, tool_executable_name); /* Make sure it can be executed */ if (run_tool(tool_executable_name, &ds_tmp, /* Get output from command, discard*/ "--help", "2>&1", IF_WIN("> NUL", "> /dev/null"), NULL)) die("Can't execute '%s'", tool_executable_name); dynstr_free(&ds_tmp); DBUG_VOID_RETURN; } /* Run query using "mysql" */ static int run_query(const char *query, DYNAMIC_STRING *ds_res, my_bool force) { int ret; File fd; char query_file_path[FN_REFLEN]; const uchar sql_log_bin[]= "SET SQL_LOG_BIN=0;"; DBUG_ENTER("run_query"); DBUG_PRINT("enter", ("query: %s", query)); if ((fd= create_temp_file(query_file_path, opt_tmpdir[0] ? opt_tmpdir : NULL, "sql", O_CREAT | O_SHARE | O_RDWR, MYF(MY_WME))) < 0) die("Failed to create temporary file for defaults"); /* Master and slave should be upgraded separately. All statements executed by mysql_upgrade will not be binlogged. 'SET SQL_LOG_BIN=0' is executed before any other statements. */ if (!opt_write_binlog) { if (my_write(fd, sql_log_bin, sizeof(sql_log_bin)-1, MYF(MY_FNABP | MY_WME))) { my_close(fd, MYF(0)); my_delete(query_file_path, MYF(0)); die("Failed to write to '%s'", query_file_path); } } if (my_write(fd, (uchar*) query, strlen(query), MYF(MY_FNABP | MY_WME))) { my_close(fd, MYF(0)); my_delete(query_file_path, MYF(0)); die("Failed to write to '%s'", query_file_path); } ret= run_tool(mysql_path, ds_res, "--no-defaults", ds_args.str, "--database=mysql", "--batch", /* Turns off pager etc. */ force ? "--force": "--skip-force", ds_res ? "--silent": "", "<", query_file_path, "2>&1", NULL); my_close(fd, MYF(0)); my_delete(query_file_path, MYF(0)); DBUG_RETURN(ret); } /* Extract the value returned from result of "show variable like ..." */ static int extract_variable_from_show(DYNAMIC_STRING* ds, char* value) { char *value_start, *value_end; size_t len; /* The query returns "datadir\t<datadir>\n", skip past the tab */ if ((value_start= strchr(ds->str, '\t')) == NULL) return 1; /* Unexpected result */ value_start++; /* Don't copy the ending newline */ if ((value_end= strchr(value_start, '\n')) == NULL) return 1; /* Unexpected result */ len= (size_t) MY_MIN(FN_REFLEN, value_end-value_start); strncpy(value, value_start, len); value[len]= '\0'; return 0; } static int get_upgrade_info_file_name(char* name) { DYNAMIC_STRING ds_datadir; DBUG_ENTER("get_upgrade_info_file_name"); if (init_dynamic_string(&ds_datadir, NULL, 32, 32)) die("Out of memory"); if (run_query("show variables like 'datadir'", &ds_datadir, FALSE) || extract_variable_from_show(&ds_datadir, name)) { dynstr_free(&ds_datadir); DBUG_RETURN(1); /* Query failed */ } dynstr_free(&ds_datadir); fn_format(name, "mysql_upgrade_info", name, "", MYF(0)); DBUG_PRINT("exit", ("name: %s", name)); DBUG_RETURN(0); } /* Read the content of mysql_upgrade_info file and compare the version number form file against version number wich mysql_upgrade was compiled for NOTE This is an optimization to avoid running mysql_upgrade when it's already been performed for the particular version of MySQL. In case the MySQL server can't return the upgrade info file it's always better to report that the upgrade hasn't been performed. */ static int upgrade_already_done(void) { FILE *in; char upgrade_info_file[FN_REFLEN]= {0}; char buf[sizeof(MYSQL_SERVER_VERSION)+1]; char *res; if (get_upgrade_info_file_name(upgrade_info_file)) return 0; /* Could not get filename => not sure */ if (!(in= my_fopen(upgrade_info_file, O_RDONLY, MYF(0)))) return 0; /* Could not open file => not sure */ /* Read from file, don't care if it fails since it will be detected by the strncmp */ memset(buf, 0, sizeof(buf)); res= fgets(buf, sizeof(buf), in); my_fclose(in, MYF(0)); if (!res) return 0; /* Could not read from file => not sure */ return (strncmp(res, MYSQL_SERVER_VERSION, sizeof(MYSQL_SERVER_VERSION)-1)==0); } /* Write mysql_upgrade_info file in servers data dir indicating that upgrade has been done for this version NOTE This might very well fail but since it's just an optimization to run mysql_upgrade only when necessary the error can be ignored. */ static void create_mysql_upgrade_info_file(void) { FILE *out; char upgrade_info_file[FN_REFLEN]= {0}; if (get_upgrade_info_file_name(upgrade_info_file)) return; /* Could not get filename => skip */ if (!(out= my_fopen(upgrade_info_file, O_TRUNC | O_WRONLY, MYF(0)))) { fprintf(stderr, "Could not create the upgrade info file '%s' in " "the MySQL Servers datadir, errno: %d\n", upgrade_info_file, errno); return; } /* Write new version to file */ fputs(MYSQL_SERVER_VERSION, out); my_fclose(out, MYF(0)); /* Check if the upgrad_info_file was properly created/updated It's not a fatal error -> just print a message if it fails */ if (!upgrade_already_done()) fprintf(stderr, "Could not write to the upgrade info file '%s' in " "the MySQL Servers datadir, errno: %d\n", upgrade_info_file, errno); return; } /* Print connection-related arguments. */ static void print_conn_args(const char *tool_name) { if (conn_args.str[0]) verbose("Running '%s' with connection arguments: %s", tool_name, conn_args.str); else verbose("Running '%s with default connection arguments", tool_name); } /* Check and upgrade(if neccessary) all tables in the server using "mysqlcheck --check-upgrade .." */ static int run_mysqlcheck_upgrade(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--check-upgrade", "--all-databases", "--skip-database=mysql", "--auto-repair", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", NULL); } static int run_mysqlcheck_fixnames(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--all-databases", "--skip-database=mysql", "--fix-db-names", "--fix-table-names", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", NULL); } /** performs the same operation as mysqlcheck_upgrade, but on the mysql db */ static int run_mysqlcheck_mysql_db_upgrade(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--check-upgrade", "--databases", "--auto-repair", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", "mysql", NULL); } /** performs the same operation as mysqlcheck_upgrade, but on the mysql db */ static int run_mysqlcheck_mysql_db_fixnames(void) { print_conn_args("mysqlcheck"); return run_tool(mysqlcheck_path, NULL, /* Send output from mysqlcheck directly to screen */ "--no-defaults", ds_args.str, "--databases", "--fix-db-names", "--fix-table-names", opt_write_binlog ? "--write-binlog" : "--skip-write-binlog", "mysql", NULL); } static const char *expected_errors[]= { "ERROR 1060", /* Duplicate column name */ "ERROR 1061", /* Duplicate key name */ "ERROR 1054", /* Unknown column */ 0 }; static my_bool is_expected_error(const char* line) { const char** error= expected_errors; while (*error) { /* Check if lines starting with ERROR are in the list of expected errors */ if (strncmp(line, "ERROR", 5) != 0 || strncmp(line, *error, strlen(*error)) == 0) return 1; /* Found expected error */ error++; } return 0; } static char* get_line(char* line) { while (*line && *line != '\n') line++; if (*line) line++; return line; } /* Print the current line to stderr */ static void print_line(char* line) { while (*line && *line != '\n') { fputc(*line, stderr); line++; } fputc('\n', stderr); } /* Update all system tables in MySQL Server to current version using "mysql" to execute all the SQL commands compiled into the mysql_fix_privilege_tables array */ static int run_sql_fix_privilege_tables(void) { int found_real_errors= 0; const char **query_ptr; DYNAMIC_STRING ds_script; DYNAMIC_STRING ds_result; DBUG_ENTER("run_sql_fix_privilege_tables"); if (init_dynamic_string(&ds_script, "", 65536, 1024)) die("Out of memory"); if (init_dynamic_string(&ds_result, "", 512, 512)) die("Out of memory"); verbose("Running 'mysql_fix_privilege_tables'..."); /* Individual queries can not be executed independently by invoking a forked mysql client, because the script uses session variables and prepared statements. */ for ( query_ptr= &mysql_fix_privilege_tables[0]; *query_ptr != NULL; query_ptr++ ) { dynstr_append(&ds_script, *query_ptr); } run_query(ds_script.str, &ds_result, /* Collect result */ TRUE); { /* Scan each line of the result for real errors and ignore the expected one(s) like "Duplicate column name", "Unknown column" and "Duplicate key name" since they just indicate the system tables are already up to date */ char *line= ds_result.str; do { if (!is_expected_error(line)) { /* Something unexpected failed, dump error line to screen */ found_real_errors++; print_line(line); } else { char *c; /* We process the output of the child process here. Basically, if a line contains a warning, we'll print it, otherwise, we won't. The first branch handles new-style tools that print their name, then the severity in brackets, the second branch handles old-style tools that just print a severity. */ if ((c= strstr(line, ": ")) && (c < strchr(line, ' ')) && (strncmp(c + 2, "[Warning] ", 10) == 0)) print_line(line); else if ((strncmp(line, "WARNING", 7) == 0) || (strncmp(line, "Warning", 7) == 0)) print_line(line); } } while ((line= get_line(line)) && *line); } dynstr_free(&ds_result); dynstr_free(&ds_script); DBUG_RETURN(found_real_errors); } static const char *load_default_groups[]= { "client", /* Read settings how to connect to server */ "mysql_upgrade", /* Read special settings for mysql_upgrade*/ 0 }; /* Convert the specified version string into the numeric format. */ static ulong STDCALL calc_server_version(char *some_version) { uint major, minor, version; char *point= some_version, *end_point; major= (uint) strtoul(point, &end_point, 10); point=end_point+1; minor= (uint) strtoul(point, &end_point, 10); point=end_point+1; version= (uint) strtoul(point, &end_point, 10); return (ulong) major * 10000L + (ulong)(minor * 100 + version); } /** Check if the server version matches with the server version mysql_upgrade was compiled with. @return 0 match successful 1 failed */ static int check_version_match(void) { DYNAMIC_STRING ds_version; char version_str[NAME_CHAR_LEN + 1]; if (init_dynamic_string(&ds_version, NULL, NAME_CHAR_LEN, NAME_CHAR_LEN)) die("Out of memory"); if (run_query("show variables like 'version'", &ds_version, FALSE) || extract_variable_from_show(&ds_version, version_str)) { dynstr_free(&ds_version); return 1; /* Query failed */ } dynstr_free(&ds_version); if (calc_server_version((char *) version_str) != MYSQL_VERSION_ID) { fprintf(stderr, "Error: Server version (%s) does not match with the " "version of\nthe server (%s) with which this program was built/" "distributed. You can\nuse --skip-version-check to skip this " "check.\n", version_str, MYSQL_SERVER_VERSION); return 1; } else return 0; } int main(int argc, char **argv) { char self_name[FN_REFLEN]; MY_INIT(argv[0]); #if _WIN32 if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0) #endif { strncpy(self_name, argv[0], FN_REFLEN); } if (init_dynamic_string(&ds_args, "", 512, 256) || init_dynamic_string(&conn_args, "", 512, 256)) die("Out of memory"); my_getopt_use_args_separator= TRUE; if (load_defaults("my", load_default_groups, &argc, &argv)) die(NULL); my_getopt_use_args_separator= FALSE; defaults_argv= argv; /* Must be freed by 'free_defaults' */ if (handle_options(&argc, &argv, my_long_options, get_one_option)) die(NULL); if (debug_info_flag) my_end_arg= MY_CHECK_ERROR | MY_GIVE_INFO; if (debug_check_flag) my_end_arg= MY_CHECK_ERROR; if (tty_password) { opt_password= get_tty_password(NullS); /* add password to defaults file */ dynstr_append_os_quoted(&ds_args, "--password=", opt_password, NullS); dynstr_append(&ds_args, " "); } /* add user to defaults file */ dynstr_append_os_quoted(&ds_args, "--user=", opt_user, NullS); dynstr_append(&ds_args, " "); /* Find mysql */ find_tool(mysql_path, IF_WIN("mysql.exe", "mysql"), self_name); if (!opt_systables_only) { /* Find mysqlcheck */ find_tool(mysqlcheck_path, IF_WIN("mysqlcheck.exe", "mysqlcheck"), self_name); } else { printf("The --upgrade-system-tables option was used, databases won't be touched.\n"); } /* Read the mysql_upgrade_info file to check if mysql_upgrade already has been run for this installation of MySQL */ if (!opt_force && upgrade_already_done()) { printf("This installation of MySQL is already upgraded to %s, " "use --force if you still need to run mysql_upgrade\n", MYSQL_SERVER_VERSION); die(NULL); } if (opt_version_check && check_version_match()) die("Upgrade failed"); /* Run "mysqlcheck" and "mysql_fix_privilege_tables.sql" First run mysqlcheck on the system database. Then do the upgrade. And then run mysqlcheck on all tables. */ if ((!opt_systables_only && (run_mysqlcheck_mysql_db_fixnames() || run_mysqlcheck_mysql_db_upgrade())) || run_sql_fix_privilege_tables() || (!opt_systables_only && (run_mysqlcheck_fixnames() || run_mysqlcheck_upgrade()))) { /* The upgrade failed to complete in some way or another, significant error message should have been printed to the screen */ die("Upgrade failed" ); } verbose("OK"); /* Create a file indicating upgrade has been performed */ create_mysql_upgrade_info_file(); free_used_memory(); my_end(my_end_arg); exit(0); }
./CrossVul/dataset_final_sorted/CWE-284/c/bad_1571_2
crossvul-cpp_data_good_5346_0
/* * IPv4 over IEEE 1394, per RFC 2734 * IPv6 over IEEE 1394, per RFC 3146 * * Copyright (C) 2009 Jay Fenlason <fenlason@redhat.com> * * based on eth1394 by Ben Collins et al */ #include <linux/bug.h> #include <linux/compiler.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/highmem.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/jiffies.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/mutex.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <net/arp.h> #include <net/firewire.h> /* rx limits */ #define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */ #define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2) /* tx limits */ #define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */ #define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */ #define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */ #define IEEE1394_BROADCAST_CHANNEL 31 #define IEEE1394_ALL_NODES (0xffc0 | 0x003f) #define IEEE1394_MAX_PAYLOAD_S100 512 #define FWNET_NO_FIFO_ADDR (~0ULL) #define IANA_SPECIFIER_ID 0x00005eU #define RFC2734_SW_VERSION 0x000001U #define RFC3146_SW_VERSION 0x000002U #define IEEE1394_GASP_HDR_SIZE 8 #define RFC2374_UNFRAG_HDR_SIZE 4 #define RFC2374_FRAG_HDR_SIZE 8 #define RFC2374_FRAG_OVERHEAD 4 #define RFC2374_HDR_UNFRAG 0 /* unfragmented */ #define RFC2374_HDR_FIRSTFRAG 1 /* first fragment */ #define RFC2374_HDR_LASTFRAG 2 /* last fragment */ #define RFC2374_HDR_INTFRAG 3 /* interior fragment */ static bool fwnet_hwaddr_is_multicast(u8 *ha) { return !!(*ha & 1); } /* IPv4 and IPv6 encapsulation header */ struct rfc2734_header { u32 w0; u32 w1; }; #define fwnet_get_hdr_lf(h) (((h)->w0 & 0xc0000000) >> 30) #define fwnet_get_hdr_ether_type(h) (((h)->w0 & 0x0000ffff)) #define fwnet_get_hdr_dg_size(h) (((h)->w0 & 0x0fff0000) >> 16) #define fwnet_get_hdr_fg_off(h) (((h)->w0 & 0x00000fff)) #define fwnet_get_hdr_dgl(h) (((h)->w1 & 0xffff0000) >> 16) #define fwnet_set_hdr_lf(lf) ((lf) << 30) #define fwnet_set_hdr_ether_type(et) (et) #define fwnet_set_hdr_dg_size(dgs) ((dgs) << 16) #define fwnet_set_hdr_fg_off(fgo) (fgo) #define fwnet_set_hdr_dgl(dgl) ((dgl) << 16) static inline void fwnet_make_uf_hdr(struct rfc2734_header *hdr, unsigned ether_type) { hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_UNFRAG) | fwnet_set_hdr_ether_type(ether_type); } static inline void fwnet_make_ff_hdr(struct rfc2734_header *hdr, unsigned ether_type, unsigned dg_size, unsigned dgl) { hdr->w0 = fwnet_set_hdr_lf(RFC2374_HDR_FIRSTFRAG) | fwnet_set_hdr_dg_size(dg_size) | fwnet_set_hdr_ether_type(ether_type); hdr->w1 = fwnet_set_hdr_dgl(dgl); } static inline void fwnet_make_sf_hdr(struct rfc2734_header *hdr, unsigned lf, unsigned dg_size, unsigned fg_off, unsigned dgl) { hdr->w0 = fwnet_set_hdr_lf(lf) | fwnet_set_hdr_dg_size(dg_size) | fwnet_set_hdr_fg_off(fg_off); hdr->w1 = fwnet_set_hdr_dgl(dgl); } /* This list keeps track of what parts of the datagram have been filled in */ struct fwnet_fragment_info { struct list_head fi_link; u16 offset; u16 len; }; struct fwnet_partial_datagram { struct list_head pd_link; struct list_head fi_list; struct sk_buff *skb; /* FIXME Why not use skb->data? */ char *pbuf; u16 datagram_label; u16 ether_type; u16 datagram_size; }; static DEFINE_MUTEX(fwnet_device_mutex); static LIST_HEAD(fwnet_device_list); struct fwnet_device { struct list_head dev_link; spinlock_t lock; enum { FWNET_BROADCAST_ERROR, FWNET_BROADCAST_RUNNING, FWNET_BROADCAST_STOPPED, } broadcast_state; struct fw_iso_context *broadcast_rcv_context; struct fw_iso_buffer broadcast_rcv_buffer; void **broadcast_rcv_buffer_ptrs; unsigned broadcast_rcv_next_ptr; unsigned num_broadcast_rcv_ptrs; unsigned rcv_buffer_size; /* * This value is the maximum unfragmented datagram size that can be * sent by the hardware. It already has the GASP overhead and the * unfragmented datagram header overhead calculated into it. */ unsigned broadcast_xmt_max_payload; u16 broadcast_xmt_datagramlabel; /* * The CSR address that remote nodes must send datagrams to for us to * receive them. */ struct fw_address_handler handler; u64 local_fifo; /* Number of tx datagrams that have been queued but not yet acked */ int queued_datagrams; int peer_count; struct list_head peer_list; struct fw_card *card; struct net_device *netdev; }; struct fwnet_peer { struct list_head peer_link; struct fwnet_device *dev; u64 guid; /* guarded by dev->lock */ struct list_head pd_list; /* received partial datagrams */ unsigned pdg_size; /* pd_list size */ u16 datagram_label; /* outgoing datagram label */ u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ int node_id; int generation; unsigned speed; }; /* This is our task struct. It's used for the packet complete callback. */ struct fwnet_packet_task { struct fw_transaction transaction; struct rfc2734_header hdr; struct sk_buff *skb; struct fwnet_device *dev; int outstanding_pkts; u64 fifo_addr; u16 dest_node; u16 max_payload; u8 generation; u8 speed; u8 enqueued; }; /* * Get fifo address embedded in hwaddr */ static __u64 fwnet_hwaddr_fifo(union fwnet_hwaddr *ha) { return (u64)get_unaligned_be16(&ha->uc.fifo_hi) << 32 | get_unaligned_be32(&ha->uc.fifo_lo); } /* * saddr == NULL means use device source address. * daddr == NULL means leave destination address (eg unresolved arp). */ static int fwnet_header_create(struct sk_buff *skb, struct net_device *net, unsigned short type, const void *daddr, const void *saddr, unsigned len) { struct fwnet_header *h; h = (struct fwnet_header *)skb_push(skb, sizeof(*h)); put_unaligned_be16(type, &h->h_proto); if (net->flags & (IFF_LOOPBACK | IFF_NOARP)) { memset(h->h_dest, 0, net->addr_len); return net->hard_header_len; } if (daddr) { memcpy(h->h_dest, daddr, net->addr_len); return net->hard_header_len; } return -net->hard_header_len; } static int fwnet_header_cache(const struct neighbour *neigh, struct hh_cache *hh, __be16 type) { struct net_device *net; struct fwnet_header *h; if (type == cpu_to_be16(ETH_P_802_3)) return -1; net = neigh->dev; h = (struct fwnet_header *)((u8 *)hh->hh_data + HH_DATA_OFF(sizeof(*h))); h->h_proto = type; memcpy(h->h_dest, neigh->ha, net->addr_len); hh->hh_len = FWNET_HLEN; return 0; } /* Called by Address Resolution module to notify changes in address. */ static void fwnet_header_cache_update(struct hh_cache *hh, const struct net_device *net, const unsigned char *haddr) { memcpy((u8 *)hh->hh_data + HH_DATA_OFF(FWNET_HLEN), haddr, net->addr_len); } static int fwnet_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb->dev->dev_addr, FWNET_ALEN); return FWNET_ALEN; } static const struct header_ops fwnet_header_ops = { .create = fwnet_header_create, .cache = fwnet_header_cache, .cache_update = fwnet_header_cache_update, .parse = fwnet_header_parse, }; /* FIXME: is this correct for all cases? */ static bool fwnet_frag_overlap(struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) { struct fwnet_fragment_info *fi; unsigned end = offset + len; list_for_each_entry(fi, &pd->fi_list, fi_link) if (offset < fi->offset + fi->len && end > fi->offset) return true; return false; } /* Assumes that new fragment does not overlap any existing fragments */ static struct fwnet_fragment_info *fwnet_frag_new( struct fwnet_partial_datagram *pd, unsigned offset, unsigned len) { struct fwnet_fragment_info *fi, *fi2, *new; struct list_head *list; list = &pd->fi_list; list_for_each_entry(fi, &pd->fi_list, fi_link) { if (fi->offset + fi->len == offset) { /* The new fragment can be tacked on to the end */ /* Did the new fragment plug a hole? */ fi2 = list_entry(fi->fi_link.next, struct fwnet_fragment_info, fi_link); if (fi->offset + fi->len == fi2->offset) { /* glue fragments together */ fi->len += len + fi2->len; list_del(&fi2->fi_link); kfree(fi2); } else { fi->len += len; } return fi; } if (offset + len == fi->offset) { /* The new fragment can be tacked on to the beginning */ /* Did the new fragment plug a hole? */ fi2 = list_entry(fi->fi_link.prev, struct fwnet_fragment_info, fi_link); if (fi2->offset + fi2->len == fi->offset) { /* glue fragments together */ fi2->len += fi->len + len; list_del(&fi->fi_link); kfree(fi); return fi2; } fi->offset = offset; fi->len += len; return fi; } if (offset > fi->offset + fi->len) { list = &fi->fi_link; break; } if (offset + len < fi->offset) { list = fi->fi_link.prev; break; } } new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) return NULL; new->offset = offset; new->len = len; list_add(&new->fi_link, list); return new; } static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size, void *frag_buf, unsigned frag_off, unsigned frag_len) { struct fwnet_partial_datagram *new; struct fwnet_fragment_info *fi; new = kmalloc(sizeof(*new), GFP_ATOMIC); if (!new) goto fail; INIT_LIST_HEAD(&new->fi_list); fi = fwnet_frag_new(new, frag_off, frag_len); if (fi == NULL) goto fail_w_new; new->datagram_label = datagram_label; new->datagram_size = dg_size; new->skb = dev_alloc_skb(dg_size + LL_RESERVED_SPACE(net)); if (new->skb == NULL) goto fail_w_fi; skb_reserve(new->skb, LL_RESERVED_SPACE(net)); new->pbuf = skb_put(new->skb, dg_size); memcpy(new->pbuf + frag_off, frag_buf, frag_len); list_add_tail(&new->pd_link, &peer->pd_list); return new; fail_w_fi: kfree(fi); fail_w_new: kfree(new); fail: return NULL; } static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer, u16 datagram_label) { struct fwnet_partial_datagram *pd; list_for_each_entry(pd, &peer->pd_list, pd_link) if (pd->datagram_label == datagram_label) return pd; return NULL; } static void fwnet_pd_delete(struct fwnet_partial_datagram *old) { struct fwnet_fragment_info *fi, *n; list_for_each_entry_safe(fi, n, &old->fi_list, fi_link) kfree(fi); list_del(&old->pd_link); dev_kfree_skb_any(old->skb); kfree(old); } static bool fwnet_pd_update(struct fwnet_peer *peer, struct fwnet_partial_datagram *pd, void *frag_buf, unsigned frag_off, unsigned frag_len) { if (fwnet_frag_new(pd, frag_off, frag_len) == NULL) return false; memcpy(pd->pbuf + frag_off, frag_buf, frag_len); /* * Move list entry to beginning of list so that oldest partial * datagrams percolate to the end of the list */ list_move_tail(&pd->pd_link, &peer->pd_list); return true; } static bool fwnet_pd_is_complete(struct fwnet_partial_datagram *pd) { struct fwnet_fragment_info *fi; fi = list_entry(pd->fi_list.next, struct fwnet_fragment_info, fi_link); return fi->len == pd->datagram_size; } /* caller must hold dev->lock */ static struct fwnet_peer *fwnet_peer_find_by_guid(struct fwnet_device *dev, u64 guid) { struct fwnet_peer *peer; list_for_each_entry(peer, &dev->peer_list, peer_link) if (peer->guid == guid) return peer; return NULL; } /* caller must hold dev->lock */ static struct fwnet_peer *fwnet_peer_find_by_node_id(struct fwnet_device *dev, int node_id, int generation) { struct fwnet_peer *peer; list_for_each_entry(peer, &dev->peer_list, peer_link) if (peer->node_id == node_id && peer->generation == generation) return peer; return NULL; } /* See IEEE 1394-2008 table 6-4, table 8-8, table 16-18. */ static unsigned fwnet_max_payload(unsigned max_rec, unsigned speed) { max_rec = min(max_rec, speed + 8); max_rec = clamp(max_rec, 8U, 11U); /* 512...4096 */ return (1 << (max_rec + 1)) - RFC2374_FRAG_HDR_SIZE; } static int fwnet_finish_incoming_packet(struct net_device *net, struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type) { struct fwnet_device *dev; int status; __be64 guid; switch (ether_type) { case ETH_P_ARP: case ETH_P_IP: #if IS_ENABLED(CONFIG_IPV6) case ETH_P_IPV6: #endif break; default: goto err; } dev = netdev_priv(net); /* Write metadata, and then pass to the receive level */ skb->dev = net; skb->ip_summed = CHECKSUM_NONE; /* * Parse the encapsulation header. This actually does the job of * converting to an ethernet-like pseudo frame header. */ guid = cpu_to_be64(dev->card->guid); if (dev_hard_header(skb, net, ether_type, is_broadcast ? net->broadcast : net->dev_addr, NULL, skb->len) >= 0) { struct fwnet_header *eth; u16 *rawp; __be16 protocol; skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = (struct fwnet_header *)skb_mac_header(skb); if (fwnet_hwaddr_is_multicast(eth->h_dest)) { if (memcmp(eth->h_dest, net->broadcast, net->addr_len) == 0) skb->pkt_type = PACKET_BROADCAST; #if 0 else skb->pkt_type = PACKET_MULTICAST; #endif } else { if (memcmp(eth->h_dest, net->dev_addr, net->addr_len)) skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN) { protocol = eth->h_proto; } else { rawp = (u16 *)skb->data; if (*rawp == 0xffff) protocol = htons(ETH_P_802_3); else protocol = htons(ETH_P_802_2); } skb->protocol = protocol; } status = netif_rx(skb); if (status == NET_RX_DROP) { net->stats.rx_errors++; net->stats.rx_dropped++; } else { net->stats.rx_packets++; net->stats.rx_bytes += skb->len; } return 0; err: net->stats.rx_errors++; net->stats.rx_dropped++; dev_kfree_skb_any(skb); return -ENOENT; } static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, int source_node_id, int generation, bool is_broadcast) { struct sk_buff *skb; struct net_device *net = dev->netdev; struct rfc2734_header hdr; unsigned lf; unsigned long flags; struct fwnet_peer *peer; struct fwnet_partial_datagram *pd; int fg_off; int dg_size; u16 datagram_label; int retval; u16 ether_type; if (len <= RFC2374_UNFRAG_HDR_SIZE) return 0; hdr.w0 = be32_to_cpu(buf[0]); lf = fwnet_get_hdr_lf(&hdr); if (lf == RFC2374_HDR_UNFRAG) { /* * An unfragmented datagram has been received by the ieee1394 * bus. Build an skbuff around it so we can pass it to the * high level network layer. */ ether_type = fwnet_get_hdr_ether_type(&hdr); buf++; len -= RFC2374_UNFRAG_HDR_SIZE; skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); if (unlikely(!skb)) { net->stats.rx_dropped++; return -ENOMEM; } skb_reserve(skb, LL_RESERVED_SPACE(net)); memcpy(skb_put(skb, len), buf, len); return fwnet_finish_incoming_packet(net, skb, source_node_id, is_broadcast, ether_type); } /* A datagram fragment has been received, now the fun begins. */ if (len <= RFC2374_FRAG_HDR_SIZE) return 0; hdr.w1 = ntohl(buf[1]); buf += 2; len -= RFC2374_FRAG_HDR_SIZE; if (lf == RFC2374_HDR_FIRSTFRAG) { ether_type = fwnet_get_hdr_ether_type(&hdr); fg_off = 0; } else { ether_type = 0; fg_off = fwnet_get_hdr_fg_off(&hdr); } datagram_label = fwnet_get_hdr_dgl(&hdr); dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */ if (fg_off + len > dg_size) return 0; spin_lock_irqsave(&dev->lock, flags); peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation); if (!peer) { retval = -ENOENT; goto fail; } pd = fwnet_pd_find(peer, datagram_label); if (pd == NULL) { while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) { /* remove the oldest */ fwnet_pd_delete(list_first_entry(&peer->pd_list, struct fwnet_partial_datagram, pd_link)); peer->pdg_size--; } pd = fwnet_pd_new(net, peer, datagram_label, dg_size, buf, fg_off, len); if (pd == NULL) { retval = -ENOMEM; goto fail; } peer->pdg_size++; } else { if (fwnet_frag_overlap(pd, fg_off, len) || pd->datagram_size != dg_size) { /* * Differing datagram sizes or overlapping fragments, * discard old datagram and start a new one. */ fwnet_pd_delete(pd); pd = fwnet_pd_new(net, peer, datagram_label, dg_size, buf, fg_off, len); if (pd == NULL) { peer->pdg_size--; retval = -ENOMEM; goto fail; } } else { if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) { /* * Couldn't save off fragment anyway * so might as well obliterate the * datagram now. */ fwnet_pd_delete(pd); peer->pdg_size--; retval = -ENOMEM; goto fail; } } } /* new datagram or add to existing one */ if (lf == RFC2374_HDR_FIRSTFRAG) pd->ether_type = ether_type; if (fwnet_pd_is_complete(pd)) { ether_type = pd->ether_type; peer->pdg_size--; skb = skb_get(pd->skb); fwnet_pd_delete(pd); spin_unlock_irqrestore(&dev->lock, flags); return fwnet_finish_incoming_packet(net, skb, source_node_id, false, ether_type); } /* * Datagram is not complete, we're done for the * moment. */ retval = 0; fail: spin_unlock_irqrestore(&dev->lock, flags); return retval; } static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r, int tcode, int destination, int source, int generation, unsigned long long offset, void *payload, size_t length, void *callback_data) { struct fwnet_device *dev = callback_data; int rcode; if (destination == IEEE1394_ALL_NODES) { kfree(r); return; } if (offset != dev->handler.offset) rcode = RCODE_ADDRESS_ERROR; else if (tcode != TCODE_WRITE_BLOCK_REQUEST) rcode = RCODE_TYPE_ERROR; else if (fwnet_incoming_packet(dev, payload, length, source, generation, false) != 0) { dev_err(&dev->netdev->dev, "incoming packet failure\n"); rcode = RCODE_CONFLICT_ERROR; } else rcode = RCODE_COMPLETE; fw_send_response(card, r, rcode); } static int gasp_source_id(__be32 *p) { return be32_to_cpu(p[0]) >> 16; } static u32 gasp_specifier_id(__be32 *p) { return (be32_to_cpu(p[0]) & 0xffff) << 8 | (be32_to_cpu(p[1]) & 0xff000000) >> 24; } static u32 gasp_version(__be32 *p) { return be32_to_cpu(p[1]) & 0xffffff; } static void fwnet_receive_broadcast(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct fwnet_device *dev; struct fw_iso_packet packet; __be16 *hdr_ptr; __be32 *buf_ptr; int retval; u32 length; unsigned long offset; unsigned long flags; dev = data; hdr_ptr = header; length = be16_to_cpup(hdr_ptr); spin_lock_irqsave(&dev->lock, flags); offset = dev->rcv_buffer_size * dev->broadcast_rcv_next_ptr; buf_ptr = dev->broadcast_rcv_buffer_ptrs[dev->broadcast_rcv_next_ptr++]; if (dev->broadcast_rcv_next_ptr == dev->num_broadcast_rcv_ptrs) dev->broadcast_rcv_next_ptr = 0; spin_unlock_irqrestore(&dev->lock, flags); if (length > IEEE1394_GASP_HDR_SIZE && gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID && (gasp_version(buf_ptr) == RFC2734_SW_VERSION #if IS_ENABLED(CONFIG_IPV6) || gasp_version(buf_ptr) == RFC3146_SW_VERSION #endif )) fwnet_incoming_packet(dev, buf_ptr + 2, length - IEEE1394_GASP_HDR_SIZE, gasp_source_id(buf_ptr), context->card->generation, true); packet.payload_length = dev->rcv_buffer_size; packet.interrupt = 1; packet.skip = 0; packet.tag = 3; packet.sy = 0; packet.header_length = IEEE1394_GASP_HDR_SIZE; spin_lock_irqsave(&dev->lock, flags); retval = fw_iso_context_queue(dev->broadcast_rcv_context, &packet, &dev->broadcast_rcv_buffer, offset); spin_unlock_irqrestore(&dev->lock, flags); if (retval >= 0) fw_iso_context_queue_flush(dev->broadcast_rcv_context); else dev_err(&dev->netdev->dev, "requeue failed\n"); } static struct kmem_cache *fwnet_packet_task_cache; static void fwnet_free_ptask(struct fwnet_packet_task *ptask) { dev_kfree_skb_any(ptask->skb); kmem_cache_free(fwnet_packet_task_cache, ptask); } /* Caller must hold dev->lock. */ static void dec_queued_datagrams(struct fwnet_device *dev) { if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS) netif_wake_queue(dev->netdev); } static int fwnet_send_packet(struct fwnet_packet_task *ptask); static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) { struct fwnet_device *dev = ptask->dev; struct sk_buff *skb = ptask->skb; unsigned long flags; bool free; spin_lock_irqsave(&dev->lock, flags); ptask->outstanding_pkts--; /* Check whether we or the networking TX soft-IRQ is last user. */ free = (ptask->outstanding_pkts == 0 && ptask->enqueued); if (free) dec_queued_datagrams(dev); if (ptask->outstanding_pkts == 0) { dev->netdev->stats.tx_packets++; dev->netdev->stats.tx_bytes += skb->len; } spin_unlock_irqrestore(&dev->lock, flags); if (ptask->outstanding_pkts > 0) { u16 dg_size; u16 fg_off; u16 datagram_label; u16 lf; /* Update the ptask to point to the next fragment and send it */ lf = fwnet_get_hdr_lf(&ptask->hdr); switch (lf) { case RFC2374_HDR_LASTFRAG: case RFC2374_HDR_UNFRAG: default: dev_err(&dev->netdev->dev, "outstanding packet %x lf %x, header %x,%x\n", ptask->outstanding_pkts, lf, ptask->hdr.w0, ptask->hdr.w1); BUG(); case RFC2374_HDR_FIRSTFRAG: /* Set frag type here for future interior fragments */ dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); fg_off = ptask->max_payload - RFC2374_FRAG_HDR_SIZE; datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break; case RFC2374_HDR_INTFRAG: dg_size = fwnet_get_hdr_dg_size(&ptask->hdr); fg_off = fwnet_get_hdr_fg_off(&ptask->hdr) + ptask->max_payload - RFC2374_FRAG_HDR_SIZE; datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); break; } if (ptask->dest_node == IEEE1394_ALL_NODES) { skb_pull(skb, ptask->max_payload + IEEE1394_GASP_HDR_SIZE); } else { skb_pull(skb, ptask->max_payload); } if (ptask->outstanding_pkts > 1) { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, dg_size, fg_off, datagram_label); } else { fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_LASTFRAG, dg_size, fg_off, datagram_label); ptask->max_payload = skb->len + RFC2374_FRAG_HDR_SIZE; } fwnet_send_packet(ptask); } if (free) fwnet_free_ptask(ptask); } static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask) { struct fwnet_device *dev = ptask->dev; unsigned long flags; bool free; spin_lock_irqsave(&dev->lock, flags); /* One fragment failed; don't try to send remaining fragments. */ ptask->outstanding_pkts = 0; /* Check whether we or the networking TX soft-IRQ is last user. */ free = ptask->enqueued; if (free) dec_queued_datagrams(dev); dev->netdev->stats.tx_dropped++; dev->netdev->stats.tx_errors++; spin_unlock_irqrestore(&dev->lock, flags); if (free) fwnet_free_ptask(ptask); } static void fwnet_write_complete(struct fw_card *card, int rcode, void *payload, size_t length, void *data) { struct fwnet_packet_task *ptask = data; static unsigned long j; static int last_rcode, errors_skipped; if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); } else { if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { dev_err(&ptask->dev->netdev->dev, "fwnet_write_complete failed: %x (skipped %d)\n", rcode, errors_skipped); errors_skipped = 0; last_rcode = rcode; } else { errors_skipped++; } fwnet_transmit_packet_failed(ptask); } } static int fwnet_send_packet(struct fwnet_packet_task *ptask) { struct fwnet_device *dev; unsigned tx_len; struct rfc2734_header *bufhdr; unsigned long flags; bool free; dev = ptask->dev; tx_len = ptask->max_payload; switch (fwnet_get_hdr_lf(&ptask->hdr)) { case RFC2374_HDR_UNFRAG: bufhdr = (struct rfc2734_header *) skb_push(ptask->skb, RFC2374_UNFRAG_HDR_SIZE); put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); break; case RFC2374_HDR_FIRSTFRAG: case RFC2374_HDR_INTFRAG: case RFC2374_HDR_LASTFRAG: bufhdr = (struct rfc2734_header *) skb_push(ptask->skb, RFC2374_FRAG_HDR_SIZE); put_unaligned_be32(ptask->hdr.w0, &bufhdr->w0); put_unaligned_be32(ptask->hdr.w1, &bufhdr->w1); break; default: BUG(); } if (ptask->dest_node == IEEE1394_ALL_NODES) { u8 *p; int generation; int node_id; unsigned int sw_version; /* ptask->generation may not have been set yet */ generation = dev->card->generation; smp_rmb(); node_id = dev->card->node_id; switch (ptask->skb->protocol) { default: sw_version = RFC2734_SW_VERSION; break; #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): sw_version = RFC3146_SW_VERSION; #endif } p = skb_push(ptask->skb, IEEE1394_GASP_HDR_SIZE); put_unaligned_be32(node_id << 16 | IANA_SPECIFIER_ID >> 8, p); put_unaligned_be32((IANA_SPECIFIER_ID & 0xff) << 24 | sw_version, &p[4]); /* We should not transmit if broadcast_channel.valid == 0. */ fw_send_request(dev->card, &ptask->transaction, TCODE_STREAM_DATA, fw_stream_packet_destination_id(3, IEEE1394_BROADCAST_CHANNEL, 0), generation, SCODE_100, 0ULL, ptask->skb->data, tx_len + 8, fwnet_write_complete, ptask); spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) ptask->enqueued = true; else dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); goto out; } fw_send_request(dev->card, &ptask->transaction, TCODE_WRITE_BLOCK_REQUEST, ptask->dest_node, ptask->generation, ptask->speed, ptask->fifo_addr, ptask->skb->data, tx_len, fwnet_write_complete, ptask); spin_lock_irqsave(&dev->lock, flags); /* If the AT tasklet already ran, we may be last user. */ free = (ptask->outstanding_pkts == 0 && !ptask->enqueued); if (!free) ptask->enqueued = true; else dec_queued_datagrams(dev); spin_unlock_irqrestore(&dev->lock, flags); netif_trans_update(dev->netdev); out: if (free) fwnet_free_ptask(ptask); return 0; } static void fwnet_fifo_stop(struct fwnet_device *dev) { if (dev->local_fifo == FWNET_NO_FIFO_ADDR) return; fw_core_remove_address_handler(&dev->handler); dev->local_fifo = FWNET_NO_FIFO_ADDR; } static int fwnet_fifo_start(struct fwnet_device *dev) { int retval; if (dev->local_fifo != FWNET_NO_FIFO_ADDR) return 0; dev->handler.length = 4096; dev->handler.address_callback = fwnet_receive_packet; dev->handler.callback_data = dev; retval = fw_core_add_address_handler(&dev->handler, &fw_high_memory_region); if (retval < 0) return retval; dev->local_fifo = dev->handler.offset; return 0; } static void __fwnet_broadcast_stop(struct fwnet_device *dev) { unsigned u; if (dev->broadcast_state != FWNET_BROADCAST_ERROR) { for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) kunmap(dev->broadcast_rcv_buffer.pages[u]); fw_iso_buffer_destroy(&dev->broadcast_rcv_buffer, dev->card); } if (dev->broadcast_rcv_context) { fw_iso_context_destroy(dev->broadcast_rcv_context); dev->broadcast_rcv_context = NULL; } kfree(dev->broadcast_rcv_buffer_ptrs); dev->broadcast_rcv_buffer_ptrs = NULL; dev->broadcast_state = FWNET_BROADCAST_ERROR; } static void fwnet_broadcast_stop(struct fwnet_device *dev) { if (dev->broadcast_state == FWNET_BROADCAST_ERROR) return; fw_iso_context_stop(dev->broadcast_rcv_context); __fwnet_broadcast_stop(dev); } static int fwnet_broadcast_start(struct fwnet_device *dev) { struct fw_iso_context *context; int retval; unsigned num_packets; unsigned max_receive; struct fw_iso_packet packet; unsigned long offset; void **ptrptr; unsigned u; if (dev->broadcast_state != FWNET_BROADCAST_ERROR) return 0; max_receive = 1U << (dev->card->max_receive + 1); num_packets = (FWNET_ISO_PAGE_COUNT * PAGE_SIZE) / max_receive; ptrptr = kmalloc(sizeof(void *) * num_packets, GFP_KERNEL); if (!ptrptr) { retval = -ENOMEM; goto failed; } dev->broadcast_rcv_buffer_ptrs = ptrptr; context = fw_iso_context_create(dev->card, FW_ISO_CONTEXT_RECEIVE, IEEE1394_BROADCAST_CHANNEL, dev->card->link_speed, 8, fwnet_receive_broadcast, dev); if (IS_ERR(context)) { retval = PTR_ERR(context); goto failed; } retval = fw_iso_buffer_init(&dev->broadcast_rcv_buffer, dev->card, FWNET_ISO_PAGE_COUNT, DMA_FROM_DEVICE); if (retval < 0) goto failed; dev->broadcast_state = FWNET_BROADCAST_STOPPED; for (u = 0; u < FWNET_ISO_PAGE_COUNT; u++) { void *ptr; unsigned v; ptr = kmap(dev->broadcast_rcv_buffer.pages[u]); for (v = 0; v < num_packets / FWNET_ISO_PAGE_COUNT; v++) *ptrptr++ = (void *) ((char *)ptr + v * max_receive); } dev->broadcast_rcv_context = context; packet.payload_length = max_receive; packet.interrupt = 1; packet.skip = 0; packet.tag = 3; packet.sy = 0; packet.header_length = IEEE1394_GASP_HDR_SIZE; offset = 0; for (u = 0; u < num_packets; u++) { retval = fw_iso_context_queue(context, &packet, &dev->broadcast_rcv_buffer, offset); if (retval < 0) goto failed; offset += max_receive; } dev->num_broadcast_rcv_ptrs = num_packets; dev->rcv_buffer_size = max_receive; dev->broadcast_rcv_next_ptr = 0U; retval = fw_iso_context_start(context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS); /* ??? sync */ if (retval < 0) goto failed; /* FIXME: adjust it according to the min. speed of all known peers? */ dev->broadcast_xmt_max_payload = IEEE1394_MAX_PAYLOAD_S100 - IEEE1394_GASP_HDR_SIZE - RFC2374_UNFRAG_HDR_SIZE; dev->broadcast_state = FWNET_BROADCAST_RUNNING; return 0; failed: __fwnet_broadcast_stop(dev); return retval; } static void set_carrier_state(struct fwnet_device *dev) { if (dev->peer_count > 1) netif_carrier_on(dev->netdev); else netif_carrier_off(dev->netdev); } /* ifup */ static int fwnet_open(struct net_device *net) { struct fwnet_device *dev = netdev_priv(net); int ret; ret = fwnet_broadcast_start(dev); if (ret) return ret; netif_start_queue(net); spin_lock_irq(&dev->lock); set_carrier_state(dev); spin_unlock_irq(&dev->lock); return 0; } /* ifdown */ static int fwnet_stop(struct net_device *net) { struct fwnet_device *dev = netdev_priv(net); netif_stop_queue(net); fwnet_broadcast_stop(dev); return 0; } static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net) { struct fwnet_header hdr_buf; struct fwnet_device *dev = netdev_priv(net); __be16 proto; u16 dest_node; unsigned max_payload; u16 dg_size; u16 *datagram_label_ptr; struct fwnet_packet_task *ptask; struct fwnet_peer *peer; unsigned long flags; spin_lock_irqsave(&dev->lock, flags); /* Can this happen? */ if (netif_queue_stopped(dev->netdev)) { spin_unlock_irqrestore(&dev->lock, flags); return NETDEV_TX_BUSY; } ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); if (ptask == NULL) goto fail; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto fail; /* * Make a copy of the driver-specific header. * We might need to rebuild the header on tx failure. */ memcpy(&hdr_buf, skb->data, sizeof(hdr_buf)); proto = hdr_buf.h_proto; switch (proto) { case htons(ETH_P_ARP): case htons(ETH_P_IP): #if IS_ENABLED(CONFIG_IPV6) case htons(ETH_P_IPV6): #endif break; default: goto fail; } skb_pull(skb, sizeof(hdr_buf)); dg_size = skb->len; /* * Set the transmission type for the packet. ARP packets and IP * broadcast packets are sent via GASP. */ if (fwnet_hwaddr_is_multicast(hdr_buf.h_dest)) { max_payload = dev->broadcast_xmt_max_payload; datagram_label_ptr = &dev->broadcast_xmt_datagramlabel; ptask->fifo_addr = FWNET_NO_FIFO_ADDR; ptask->generation = 0; ptask->dest_node = IEEE1394_ALL_NODES; ptask->speed = SCODE_100; } else { union fwnet_hwaddr *ha = (union fwnet_hwaddr *)hdr_buf.h_dest; __be64 guid = get_unaligned(&ha->uc.uniq_id); u8 generation; peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); if (!peer) goto fail; generation = peer->generation; dest_node = peer->node_id; max_payload = peer->max_payload; datagram_label_ptr = &peer->datagram_label; ptask->fifo_addr = fwnet_hwaddr_fifo(ha); ptask->generation = generation; ptask->dest_node = dest_node; ptask->speed = peer->speed; } ptask->hdr.w0 = 0; ptask->hdr.w1 = 0; ptask->skb = skb; ptask->dev = dev; /* Does it all fit in one packet? */ if (dg_size <= max_payload) { fwnet_make_uf_hdr(&ptask->hdr, ntohs(proto)); ptask->outstanding_pkts = 1; max_payload = dg_size + RFC2374_UNFRAG_HDR_SIZE; } else { u16 datagram_label; max_payload -= RFC2374_FRAG_OVERHEAD; datagram_label = (*datagram_label_ptr)++; fwnet_make_ff_hdr(&ptask->hdr, ntohs(proto), dg_size, datagram_label); ptask->outstanding_pkts = DIV_ROUND_UP(dg_size, max_payload); max_payload += RFC2374_FRAG_HDR_SIZE; } if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS) netif_stop_queue(dev->netdev); spin_unlock_irqrestore(&dev->lock, flags); ptask->max_payload = max_payload; ptask->enqueued = 0; fwnet_send_packet(ptask); return NETDEV_TX_OK; fail: spin_unlock_irqrestore(&dev->lock, flags); if (ptask) kmem_cache_free(fwnet_packet_task_cache, ptask); if (skb != NULL) dev_kfree_skb(skb); net->stats.tx_dropped++; net->stats.tx_errors++; /* * FIXME: According to a patch from 2003-02-26, "returning non-zero * causes serious problems" here, allegedly. Before that patch, * -ERRNO was returned which is not appropriate under Linux 2.6. * Perhaps more needs to be done? Stop the queue in serious * conditions and restart it elsewhere? */ return NETDEV_TX_OK; } static int fwnet_change_mtu(struct net_device *net, int new_mtu) { if (new_mtu < 68) return -EINVAL; net->mtu = new_mtu; return 0; } static const struct ethtool_ops fwnet_ethtool_ops = { .get_link = ethtool_op_get_link, }; static const struct net_device_ops fwnet_netdev_ops = { .ndo_open = fwnet_open, .ndo_stop = fwnet_stop, .ndo_start_xmit = fwnet_tx, .ndo_change_mtu = fwnet_change_mtu, }; static void fwnet_init_dev(struct net_device *net) { net->header_ops = &fwnet_header_ops; net->netdev_ops = &fwnet_netdev_ops; net->watchdog_timeo = 2 * HZ; net->flags = IFF_BROADCAST | IFF_MULTICAST; net->features = NETIF_F_HIGHDMA; net->addr_len = FWNET_ALEN; net->hard_header_len = FWNET_HLEN; net->type = ARPHRD_IEEE1394; net->tx_queue_len = FWNET_TX_QUEUE_LEN; net->ethtool_ops = &fwnet_ethtool_ops; } /* caller must hold fwnet_device_mutex */ static struct fwnet_device *fwnet_dev_find(struct fw_card *card) { struct fwnet_device *dev; list_for_each_entry(dev, &fwnet_device_list, dev_link) if (dev->card == card) return dev; return NULL; } static int fwnet_add_peer(struct fwnet_device *dev, struct fw_unit *unit, struct fw_device *device) { struct fwnet_peer *peer; peer = kmalloc(sizeof(*peer), GFP_KERNEL); if (!peer) return -ENOMEM; dev_set_drvdata(&unit->device, peer); peer->dev = dev; peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4]; INIT_LIST_HEAD(&peer->pd_list); peer->pdg_size = 0; peer->datagram_label = 0; peer->speed = device->max_speed; peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed); peer->generation = device->generation; smp_rmb(); peer->node_id = device->node_id; spin_lock_irq(&dev->lock); list_add_tail(&peer->peer_link, &dev->peer_list); dev->peer_count++; set_carrier_state(dev); spin_unlock_irq(&dev->lock); return 0; } static int fwnet_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) { struct fw_device *device = fw_parent_device(unit); struct fw_card *card = device->card; struct net_device *net; bool allocated_netdev = false; struct fwnet_device *dev; unsigned max_mtu; int ret; union fwnet_hwaddr *ha; mutex_lock(&fwnet_device_mutex); dev = fwnet_dev_find(card); if (dev) { net = dev->netdev; goto have_dev; } net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN, fwnet_init_dev); if (net == NULL) { mutex_unlock(&fwnet_device_mutex); return -ENOMEM; } allocated_netdev = true; SET_NETDEV_DEV(net, card->device); dev = netdev_priv(net); spin_lock_init(&dev->lock); dev->broadcast_state = FWNET_BROADCAST_ERROR; dev->broadcast_rcv_context = NULL; dev->broadcast_xmt_max_payload = 0; dev->broadcast_xmt_datagramlabel = 0; dev->local_fifo = FWNET_NO_FIFO_ADDR; dev->queued_datagrams = 0; INIT_LIST_HEAD(&dev->peer_list); dev->card = card; dev->netdev = net; ret = fwnet_fifo_start(dev); if (ret < 0) goto out; dev->local_fifo = dev->handler.offset; /* * Use the RFC 2734 default 1500 octets or the maximum payload * as initial MTU */ max_mtu = (1 << (card->max_receive + 1)) - sizeof(struct rfc2734_header) - IEEE1394_GASP_HDR_SIZE; net->mtu = min(1500U, max_mtu); /* Set our hardware address while we're at it */ ha = (union fwnet_hwaddr *)net->dev_addr; put_unaligned_be64(card->guid, &ha->uc.uniq_id); ha->uc.max_rec = dev->card->max_receive; ha->uc.sspd = dev->card->link_speed; put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi); put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo); memset(net->broadcast, -1, net->addr_len); ret = register_netdev(net); if (ret) goto out; list_add_tail(&dev->dev_link, &fwnet_device_list); dev_notice(&net->dev, "IP over IEEE 1394 on card %s\n", dev_name(card->device)); have_dev: ret = fwnet_add_peer(dev, unit, device); if (ret && allocated_netdev) { unregister_netdev(net); list_del(&dev->dev_link); out: fwnet_fifo_stop(dev); free_netdev(net); } mutex_unlock(&fwnet_device_mutex); return ret; } /* * FIXME abort partially sent fragmented datagrams, * discard partially received fragmented datagrams */ static void fwnet_update(struct fw_unit *unit) { struct fw_device *device = fw_parent_device(unit); struct fwnet_peer *peer = dev_get_drvdata(&unit->device); int generation; generation = device->generation; spin_lock_irq(&peer->dev->lock); peer->node_id = device->node_id; peer->generation = generation; spin_unlock_irq(&peer->dev->lock); } static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev) { struct fwnet_partial_datagram *pd, *pd_next; spin_lock_irq(&dev->lock); list_del(&peer->peer_link); dev->peer_count--; set_carrier_state(dev); spin_unlock_irq(&dev->lock); list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link) fwnet_pd_delete(pd); kfree(peer); } static void fwnet_remove(struct fw_unit *unit) { struct fwnet_peer *peer = dev_get_drvdata(&unit->device); struct fwnet_device *dev = peer->dev; struct net_device *net; int i; mutex_lock(&fwnet_device_mutex); net = dev->netdev; fwnet_remove_peer(peer, dev); if (list_empty(&dev->peer_list)) { unregister_netdev(net); fwnet_fifo_stop(dev); for (i = 0; dev->queued_datagrams && i < 5; i++) ssleep(1); WARN_ON(dev->queued_datagrams); list_del(&dev->dev_link); free_netdev(net); } mutex_unlock(&fwnet_device_mutex); } static const struct ieee1394_device_id fwnet_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = IANA_SPECIFIER_ID, .version = RFC2734_SW_VERSION, }, #if IS_ENABLED(CONFIG_IPV6) { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = IANA_SPECIFIER_ID, .version = RFC3146_SW_VERSION, }, #endif { } }; static struct fw_driver fwnet_driver = { .driver = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, .bus = &fw_bus_type, }, .probe = fwnet_probe, .update = fwnet_update, .remove = fwnet_remove, .id_table = fwnet_id_table, }; static const u32 rfc2374_unit_directory_data[] = { 0x00040000, /* directory_length */ 0x1200005e, /* unit_specifier_id: IANA */ 0x81000003, /* textual descriptor offset */ 0x13000001, /* unit_sw_version: RFC 2734 */ 0x81000005, /* textual descriptor offset */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49414e41, /* I A N A */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49507634, /* I P v 4 */ }; static struct fw_descriptor rfc2374_unit_directory = { .length = ARRAY_SIZE(rfc2374_unit_directory_data), .key = (CSR_DIRECTORY | CSR_UNIT) << 24, .data = rfc2374_unit_directory_data }; #if IS_ENABLED(CONFIG_IPV6) static const u32 rfc3146_unit_directory_data[] = { 0x00040000, /* directory_length */ 0x1200005e, /* unit_specifier_id: IANA */ 0x81000003, /* textual descriptor offset */ 0x13000002, /* unit_sw_version: RFC 3146 */ 0x81000005, /* textual descriptor offset */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49414e41, /* I A N A */ 0x00030000, /* descriptor_length */ 0x00000000, /* text */ 0x00000000, /* minimal ASCII, en */ 0x49507636, /* I P v 6 */ }; static struct fw_descriptor rfc3146_unit_directory = { .length = ARRAY_SIZE(rfc3146_unit_directory_data), .key = (CSR_DIRECTORY | CSR_UNIT) << 24, .data = rfc3146_unit_directory_data }; #endif static int __init fwnet_init(void) { int err; err = fw_core_add_descriptor(&rfc2374_unit_directory); if (err) return err; #if IS_ENABLED(CONFIG_IPV6) err = fw_core_add_descriptor(&rfc3146_unit_directory); if (err) goto out; #endif fwnet_packet_task_cache = kmem_cache_create("packet_task", sizeof(struct fwnet_packet_task), 0, 0, NULL); if (!fwnet_packet_task_cache) { err = -ENOMEM; goto out2; } err = driver_register(&fwnet_driver.driver); if (!err) return 0; kmem_cache_destroy(fwnet_packet_task_cache); out2: #if IS_ENABLED(CONFIG_IPV6) fw_core_remove_descriptor(&rfc3146_unit_directory); out: #endif fw_core_remove_descriptor(&rfc2374_unit_directory); return err; } module_init(fwnet_init); static void __exit fwnet_cleanup(void) { driver_unregister(&fwnet_driver.driver); kmem_cache_destroy(fwnet_packet_task_cache); #if IS_ENABLED(CONFIG_IPV6) fw_core_remove_descriptor(&rfc3146_unit_directory); #endif fw_core_remove_descriptor(&rfc2374_unit_directory); } module_exit(fwnet_cleanup); MODULE_AUTHOR("Jay Fenlason <fenlason@redhat.com>"); MODULE_DESCRIPTION("IP over IEEE1394 as per RFC 2734/3146"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(ieee1394, fwnet_id_table);
./CrossVul/dataset_final_sorted/CWE-284/c/good_5346_0
crossvul-cpp_data_bad_4896_0
/* * Process version 2 NFSACL requests. * * Copyright (C) 2002-2003 Andreas Gruenbacher <agruen@suse.de> */ #include "nfsd.h" /* FIXME: nfsacl.h is a broken header */ #include <linux/nfsacl.h> #include <linux/gfp.h> #include "cache.h" #include "xdr3.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC #define RETURN_STATUS(st) { resp->status = (st); return (st); } /* * NULL call. */ static __be32 nfsacld_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { return nfs_ok; } /* * Get the Access and/or Default ACL of a file. */ static __be32 nfsacld_proc_getacl(struct svc_rqst * rqstp, struct nfsd3_getaclargs *argp, struct nfsd3_getaclres *resp) { struct posix_acl *acl; struct inode *inode; svc_fh *fh; __be32 nfserr = 0; dprintk("nfsd: GETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); if (nfserr) RETURN_STATUS(nfserr); inode = d_inode(fh->fh_dentry); if (argp->mask & ~NFS_ACL_MASK) RETURN_STATUS(nfserr_inval); resp->mask = argp->mask; nfserr = fh_getattr(fh, &resp->stat); if (nfserr) RETURN_STATUS(nfserr); if (resp->mask & (NFS_ACL|NFS_ACLCNT)) { acl = get_acl(inode, ACL_TYPE_ACCESS); if (acl == NULL) { /* Solaris returns the inode's minimum ACL. */ acl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL); } if (IS_ERR(acl)) { nfserr = nfserrno(PTR_ERR(acl)); goto fail; } resp->acl_access = acl; } if (resp->mask & (NFS_DFACL|NFS_DFACLCNT)) { /* Check how Solaris handles requests for the Default ACL of a non-directory! */ acl = get_acl(inode, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) { nfserr = nfserrno(PTR_ERR(acl)); goto fail; } resp->acl_default = acl; } /* resp->acl_{access,default} are released in nfssvc_release_getacl. */ RETURN_STATUS(0); fail: posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); RETURN_STATUS(nfserr); } /* * Set the Access and/or Default ACL of a file. */ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp, struct nfsd3_setaclargs *argp, struct nfsd_attrstat *resp) { struct inode *inode; svc_fh *fh; __be32 nfserr = 0; int error; dprintk("nfsd: SETACL(2acl) %s\n", SVCFH_fmt(&argp->fh)); fh = fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_SATTR); if (nfserr) goto out; inode = d_inode(fh->fh_dentry); if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) { error = -EOPNOTSUPP; goto out_errno; } error = fh_want_write(fh); if (error) goto out_errno; error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS); if (error) goto out_drop_write; error = inode->i_op->set_acl(inode, argp->acl_default, ACL_TYPE_DEFAULT); if (error) goto out_drop_write; fh_drop_write(fh); nfserr = fh_getattr(fh, &resp->stat); out: /* argp->acl_{access,default} may have been allocated in nfssvc_decode_setaclargs. */ posix_acl_release(argp->acl_access); posix_acl_release(argp->acl_default); return nfserr; out_drop_write: fh_drop_write(fh); out_errno: nfserr = nfserrno(error); goto out; } /* * Check file attributes */ static __be32 nfsacld_proc_getattr(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, struct nfsd_attrstat *resp) { __be32 nfserr; dprintk("nfsd: GETATTR %s\n", SVCFH_fmt(&argp->fh)); fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP); if (nfserr) return nfserr; nfserr = fh_getattr(&resp->fh, &resp->stat); return nfserr; } /* * Check file access */ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp, struct nfsd3_accessres *resp) { __be32 nfserr; dprintk("nfsd: ACCESS(2acl) %s 0x%x\n", SVCFH_fmt(&argp->fh), argp->access); fh_copy(&resp->fh, &argp->fh); resp->access = argp->access; nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL); if (nfserr) return nfserr; nfserr = fh_getattr(&resp->fh, &resp->stat); return nfserr; } /* * XDR decode functions */ static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclargs *argp) { p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; argp->mask = ntohl(*p); p++; return xdr_argsize_check(rqstp, p); } static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_setaclargs *argp) { struct kvec *head = rqstp->rq_arg.head; unsigned int base; int n; p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; argp->mask = ntohl(*p++); if (argp->mask & ~NFS_ACL_MASK || !xdr_argsize_check(rqstp, p)) return 0; base = (char *)p - (char *)head->iov_base; n = nfsacl_decode(&rqstp->rq_arg, base, NULL, (argp->mask & NFS_ACL) ? &argp->acl_access : NULL); if (n > 0) n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL, (argp->mask & NFS_DFACL) ? &argp->acl_default : NULL); return (n > 0); } static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_fhandle *argp) { p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; return xdr_argsize_check(rqstp, p); } static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessargs *argp) { p = nfs2svc_decode_fh(p, &argp->fh); if (!p) return 0; argp->access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } /* * XDR encode functions */ /* * There must be an encoding function for void results so svc_process * will work properly. */ static int nfsaclsvc_encode_voidres(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); } /* GETACL */ static int nfsaclsvc_encode_getaclres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { struct dentry *dentry = resp->fh.fh_dentry; struct inode *inode; struct kvec *head = rqstp->rq_res.head; unsigned int base; int n; int w; /* * Since this is version 2, the check for nfserr in * nfsd_dispatch actually ensures the following cannot happen. * However, it seems fragile to depend on that. */ if (dentry == NULL || d_really_is_negative(dentry)) return 0; inode = d_inode(dentry); p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->mask); if (!xdr_ressize_check(rqstp, p)) return 0; base = (char *)p - (char *)head->iov_base; rqstp->rq_res.page_len = w = nfsacl_size( (resp->mask & NFS_ACL) ? resp->acl_access : NULL, (resp->mask & NFS_DFACL) ? resp->acl_default : NULL); while (w > 0) { if (!*(rqstp->rq_next_page++)) return 0; w -= PAGE_SIZE; } n = nfsacl_encode(&rqstp->rq_res, base, inode, resp->acl_access, resp->mask & NFS_ACL, 0); if (n > 0) n = nfsacl_encode(&rqstp->rq_res, base + n, inode, resp->acl_default, resp->mask & NFS_DFACL, NFS_ACL_DEFAULT); return (n > 0); } static int nfsaclsvc_encode_attrstatres(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); return xdr_ressize_check(rqstp, p); } /* ACCESS */ static int nfsaclsvc_encode_accessres(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { p = nfs2svc_encode_fattr(rqstp, p, &resp->fh, &resp->stat); *p++ = htonl(resp->access); return xdr_ressize_check(rqstp, p); } /* * XDR release functions */ static int nfsaclsvc_release_getacl(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_getaclres *resp) { fh_put(&resp->fh); posix_acl_release(resp->acl_access); posix_acl_release(resp->acl_default); return 1; } static int nfsaclsvc_release_attrstat(struct svc_rqst *rqstp, __be32 *p, struct nfsd_attrstat *resp) { fh_put(&resp->fh); return 1; } static int nfsaclsvc_release_access(struct svc_rqst *rqstp, __be32 *p, struct nfsd3_accessres *resp) { fh_put(&resp->fh); return 1; } #define nfsaclsvc_decode_voidargs NULL #define nfsaclsvc_release_void NULL #define nfsd3_fhandleargs nfsd_fhandle #define nfsd3_attrstatres nfsd_attrstat #define nfsd3_voidres nfsd3_voidargs struct nfsd3_voidargs { int dummy; }; #define PROC(name, argt, rest, relt, cache, respsize) \ { (svc_procfunc) nfsacld_proc_##name, \ (kxdrproc_t) nfsaclsvc_decode_##argt##args, \ (kxdrproc_t) nfsaclsvc_encode_##rest##res, \ (kxdrproc_t) nfsaclsvc_release_##relt, \ sizeof(struct nfsd3_##argt##args), \ sizeof(struct nfsd3_##rest##res), \ 0, \ cache, \ respsize, \ } #define ST 1 /* status*/ #define AT 21 /* attributes */ #define pAT (1+AT) /* post attributes - conditional */ #define ACL (1+NFS_ACL_MAX_ENTRIES*3) /* Access Control List */ static struct svc_procedure nfsd_acl_procedures2[] = { PROC(null, void, void, void, RC_NOCACHE, ST), PROC(getacl, getacl, getacl, getacl, RC_NOCACHE, ST+1+2*(1+ACL)), PROC(setacl, setacl, attrstat, attrstat, RC_NOCACHE, ST+AT), PROC(getattr, fhandle, attrstat, attrstat, RC_NOCACHE, ST+AT), PROC(access, access, access, access, RC_NOCACHE, ST+AT+1), }; struct svc_version nfsd_acl_version2 = { .vs_vers = 2, .vs_nproc = 5, .vs_proc = nfsd_acl_procedures2, .vs_dispatch = nfsd_dispatch, .vs_xdrsize = NFS3_SVC_XDRSIZE, .vs_hidden = 0, };
./CrossVul/dataset_final_sorted/CWE-284/c/bad_4896_0
crossvul-cpp_data_good_1822_0
/* * Derived from "arch/i386/kernel/process.c" * Copyright (C) 1995 Linus Torvalds * * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and * Paul Mackerras (paulus@cs.anu.edu.au) * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/elf.h> #include <linux/prctl.h> #include <linux/init_task.h> #include <linux/export.h> #include <linux/kallsyms.h> #include <linux/mqueue.h> #include <linux/hardirq.h> #include <linux/utsname.h> #include <linux/ftrace.h> #include <linux/kernel_stat.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/hw_breakpoint.h> #include <linux/uaccess.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/time.h> #include <asm/runlatch.h> #include <asm/syscalls.h> #include <asm/switch_to.h> #include <asm/tm.h> #include <asm/debug.h> #ifdef CONFIG_PPC64 #include <asm/firmware.h> #endif #include <asm/code-patching.h> #include <linux/kprobes.h> #include <linux/kdebug.h> /* Transactional Memory debug */ #ifdef TM_DEBUG_SW #define TM_DEBUG(x...) printk(KERN_INFO x) #else #define TM_DEBUG(x...) do { } while(0) #endif extern unsigned long _get_SP(void); #ifndef CONFIG_SMP struct task_struct *last_task_used_math = NULL; struct task_struct *last_task_used_altivec = NULL; struct task_struct *last_task_used_vsx = NULL; struct task_struct *last_task_used_spe = NULL; #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM void giveup_fpu_maybe_transactional(struct task_struct *tsk) { /* * If we are saving the current thread's registers, and the * thread is in a transactional state, set the TIF_RESTORE_TM * bit so that we know to restore the registers before * returning to userspace. */ if (tsk == current && tsk->thread.regs && MSR_TM_ACTIVE(tsk->thread.regs->msr) && !test_thread_flag(TIF_RESTORE_TM)) { tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; set_thread_flag(TIF_RESTORE_TM); } giveup_fpu(tsk); } void giveup_altivec_maybe_transactional(struct task_struct *tsk) { /* * If we are saving the current thread's registers, and the * thread is in a transactional state, set the TIF_RESTORE_TM * bit so that we know to restore the registers before * returning to userspace. */ if (tsk == current && tsk->thread.regs && MSR_TM_ACTIVE(tsk->thread.regs->msr) && !test_thread_flag(TIF_RESTORE_TM)) { tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr; set_thread_flag(TIF_RESTORE_TM); } giveup_altivec(tsk); } #else #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk) #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #ifdef CONFIG_PPC_FPU /* * Make sure the floating-point register state in the * the thread_struct is up to date for task tsk. */ void flush_fp_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { /* * We need to disable preemption here because if we didn't, * another process could get scheduled after the regs->msr * test but before we have finished saving the FP registers * to the thread_struct. That process could take over the * FPU, and then when we get scheduled again we would store * bogus values for the remaining FP registers. */ preempt_disable(); if (tsk->thread.regs->msr & MSR_FP) { #ifdef CONFIG_SMP /* * This should only ever be called for current or * for a stopped child process. Since we save away * the FP register state on context switch on SMP, * there is something wrong if a stopped child appears * to still have its FP state in the CPU registers. */ BUG_ON(tsk != current); #endif giveup_fpu_maybe_transactional(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_fp_to_thread); #endif /* CONFIG_PPC_FPU */ void enable_kernel_fp(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) giveup_fpu_maybe_transactional(current); else giveup_fpu(NULL); /* just enables FP for kernel */ #else giveup_fpu_maybe_transactional(last_task_used_math); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_fp); #ifdef CONFIG_ALTIVEC void enable_kernel_altivec(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) giveup_altivec_maybe_transactional(current); else giveup_altivec_notask(); #else giveup_altivec_maybe_transactional(last_task_used_altivec); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_altivec); /* * Make sure the VMX/Altivec register state in the * the thread_struct is up to date for task tsk. */ void flush_altivec_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VEC) { #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif giveup_altivec_maybe_transactional(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_altivec_to_thread); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX void enable_kernel_vsx(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_VSX)) giveup_vsx(current); else giveup_vsx(NULL); /* just enable vsx for kernel - force */ #else giveup_vsx(last_task_used_vsx); #endif /* CONFIG_SMP */ } EXPORT_SYMBOL(enable_kernel_vsx); void giveup_vsx(struct task_struct *tsk) { giveup_fpu_maybe_transactional(tsk); giveup_altivec_maybe_transactional(tsk); __giveup_vsx(tsk); } EXPORT_SYMBOL(giveup_vsx); void flush_vsx_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_VSX) { #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif giveup_vsx(tsk); } preempt_enable(); } } EXPORT_SYMBOL_GPL(flush_vsx_to_thread); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE void enable_kernel_spe(void) { WARN_ON(preemptible()); #ifdef CONFIG_SMP if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) giveup_spe(current); else giveup_spe(NULL); /* just enable SPE for kernel - force */ #else giveup_spe(last_task_used_spe); #endif /* __SMP __ */ } EXPORT_SYMBOL(enable_kernel_spe); void flush_spe_to_thread(struct task_struct *tsk) { if (tsk->thread.regs) { preempt_disable(); if (tsk->thread.regs->msr & MSR_SPE) { #ifdef CONFIG_SMP BUG_ON(tsk != current); #endif tsk->thread.spefscr = mfspr(SPRN_SPEFSCR); giveup_spe(tsk); } preempt_enable(); } } #endif /* CONFIG_SPE */ #ifndef CONFIG_SMP /* * If we are doing lazy switching of CPU state (FP, altivec or SPE), * and the current task has some state, discard it. */ void discard_lazy_cpu_state(void) { preempt_disable(); if (last_task_used_math == current) last_task_used_math = NULL; #ifdef CONFIG_ALTIVEC if (last_task_used_altivec == current) last_task_used_altivec = NULL; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX if (last_task_used_vsx == current) last_task_used_vsx = NULL; #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE if (last_task_used_spe == current) last_task_used_spe = NULL; #endif preempt_enable(); } #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS void do_send_trap(struct pt_regs *regs, unsigned long address, unsigned long error_code, int signal_code, int breakpt) { siginfo_t info; current->thread.trap_nr = signal_code; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; /* Deliver the signal to userspace */ info.si_signo = SIGTRAP; info.si_errno = breakpt; /* breakpoint or watchpoint id */ info.si_code = signal_code; info.si_addr = (void __user *)address; force_sig_info(SIGTRAP, &info, current); } #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ void do_break (struct pt_regs *regs, unsigned long address, unsigned long error_code) { siginfo_t info; current->thread.trap_nr = TRAP_HWBKPT; if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code, 11, SIGSEGV) == NOTIFY_STOP) return; if (debugger_break_match(regs)) return; /* Clear the breakpoint */ hw_breakpoint_disable(); /* Deliver the signal to userspace */ info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_HWBKPT; info.si_addr = (void __user *)address; force_sig_info(SIGTRAP, &info, current); } #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk); #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * Set the debug registers back to their default "safe" values. */ static void set_debug_reg_defaults(struct thread_struct *thread) { thread->debug.iac1 = thread->debug.iac2 = 0; #if CONFIG_PPC_ADV_DEBUG_IACS > 2 thread->debug.iac3 = thread->debug.iac4 = 0; #endif thread->debug.dac1 = thread->debug.dac2 = 0; #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 thread->debug.dvc1 = thread->debug.dvc2 = 0; #endif thread->debug.dbcr0 = 0; #ifdef CONFIG_BOOKE /* * Force User/Supervisor bits to b11 (user-only MSR[PR]=1) */ thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | DBCR1_IAC4US; /* * Force Data Address Compare User/Supervisor bits to be User-only * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0. */ thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; #else thread->debug.dbcr1 = 0; #endif } static void prime_debug_regs(struct debug_reg *debug) { /* * We could have inherited MSR_DE from userspace, since * it doesn't get cleared on exception entry. Make sure * MSR_DE is clear before we enable any debug events. */ mtmsr(mfmsr() & ~MSR_DE); mtspr(SPRN_IAC1, debug->iac1); mtspr(SPRN_IAC2, debug->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 mtspr(SPRN_IAC3, debug->iac3); mtspr(SPRN_IAC4, debug->iac4); #endif mtspr(SPRN_DAC1, debug->dac1); mtspr(SPRN_DAC2, debug->dac2); #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 mtspr(SPRN_DVC1, debug->dvc1); mtspr(SPRN_DVC2, debug->dvc2); #endif mtspr(SPRN_DBCR0, debug->dbcr0); mtspr(SPRN_DBCR1, debug->dbcr1); #ifdef CONFIG_BOOKE mtspr(SPRN_DBCR2, debug->dbcr2); #endif } /* * Unless neither the old or new thread are making use of the * debug registers, set the debug registers from the values * stored in the new thread. */ void switch_booke_debug_regs(struct debug_reg *new_debug) { if ((current->thread.debug.dbcr0 & DBCR0_IDM) || (new_debug->dbcr0 & DBCR0_IDM)) prime_debug_regs(new_debug); } EXPORT_SYMBOL_GPL(switch_booke_debug_regs); #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ #ifndef CONFIG_HAVE_HW_BREAKPOINT static void set_debug_reg_defaults(struct thread_struct *thread) { thread->hw_brk.address = 0; thread->hw_brk.type = 0; set_breakpoint(&thread->hw_brk); } #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DAC1, dabr); #ifdef CONFIG_PPC_47x isync(); #endif return 0; } #elif defined(CONFIG_PPC_BOOK3S) static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { mtspr(SPRN_DABR, dabr); if (cpu_has_feature(CPU_FTR_DABRX)) mtspr(SPRN_DABRX, dabrx); return 0; } #else static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) { return -EINVAL; } #endif static inline int set_dabr(struct arch_hw_breakpoint *brk) { unsigned long dabr, dabrx; dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR); dabrx = ((brk->type >> 3) & 0x7); if (ppc_md.set_dabr) return ppc_md.set_dabr(dabr, dabrx); return __set_dabr(dabr, dabrx); } static inline int set_dawr(struct arch_hw_breakpoint *brk) { unsigned long dawr, dawrx, mrd; dawr = brk->address; dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \ << (63 - 58); //* read/write bits */ dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \ << (63 - 59); //* translate */ dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \ >> 3; //* PRIM bits */ /* dawr length is stored in field MDR bits 48:53. Matches range in doublewords (64 bits) baised by -1 eg. 0b000000=1DW and 0b111111=64DW. brk->len is in bytes. This aligns up to double word size, shifts and does the bias. */ mrd = ((brk->len + 7) >> 3) - 1; dawrx |= (mrd & 0x3f) << (63 - 53); if (ppc_md.set_dawr) return ppc_md.set_dawr(dawr, dawrx); mtspr(SPRN_DAWR, dawr); mtspr(SPRN_DAWRX, dawrx); return 0; } void __set_breakpoint(struct arch_hw_breakpoint *brk) { memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk)); if (cpu_has_feature(CPU_FTR_DAWR)) set_dawr(brk); else set_dabr(brk); } void set_breakpoint(struct arch_hw_breakpoint *brk) { preempt_disable(); __set_breakpoint(brk); preempt_enable(); } #ifdef CONFIG_PPC64 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array); #endif static inline bool hw_brk_match(struct arch_hw_breakpoint *a, struct arch_hw_breakpoint *b) { if (a->address != b->address) return false; if (a->type != b->type) return false; if (a->len != b->len) return false; return true; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM static void tm_reclaim_thread(struct thread_struct *thr, struct thread_info *ti, uint8_t cause) { unsigned long msr_diff = 0; /* * If FP/VSX registers have been already saved to the * thread_struct, move them to the transact_fp array. * We clear the TIF_RESTORE_TM bit since after the reclaim * the thread will no longer be transactional. */ if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) { msr_diff = thr->ckpt_regs.msr & ~thr->regs->msr; if (msr_diff & MSR_FP) memcpy(&thr->transact_fp, &thr->fp_state, sizeof(struct thread_fp_state)); if (msr_diff & MSR_VEC) memcpy(&thr->transact_vr, &thr->vr_state, sizeof(struct thread_vr_state)); clear_ti_thread_flag(ti, TIF_RESTORE_TM); msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1; } /* * Use the current MSR TM suspended bit to track if we have * checkpointed state outstanding. * On signal delivery, we'd normally reclaim the checkpointed * state to obtain stack pointer (see:get_tm_stackpointer()). * This will then directly return to userspace without going * through __switch_to(). However, if the stack frame is bad, * we need to exit this thread which calls __switch_to() which * will again attempt to reclaim the already saved tm state. * Hence we need to check that we've not already reclaimed * this state. * We do this using the current MSR, rather tracking it in * some specific thread_struct bit, as it has the additional * benifit of checking for a potential TM bad thing exception. */ if (!MSR_TM_SUSPENDED(mfmsr())) return; tm_reclaim(thr, thr->regs->msr, cause); /* Having done the reclaim, we now have the checkpointed * FP/VSX values in the registers. These might be valid * even if we have previously called enable_kernel_fp() or * flush_fp_to_thread(), so update thr->regs->msr to * indicate their current validity. */ thr->regs->msr |= msr_diff; } void tm_reclaim_current(uint8_t cause) { tm_enable(); tm_reclaim_thread(&current->thread, current_thread_info(), cause); } static inline void tm_reclaim_task(struct task_struct *tsk) { /* We have to work out if we're switching from/to a task that's in the * middle of a transaction. * * In switching we need to maintain a 2nd register state as * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the * checkpointed (tbegin) state in ckpt_regs and saves the transactional * (current) FPRs into oldtask->thread.transact_fpr[]. * * We also context switch (save) TFHAR/TEXASR/TFIAR in here. */ struct thread_struct *thr = &tsk->thread; if (!thr->regs) return; if (!MSR_TM_ACTIVE(thr->regs->msr)) goto out_and_saveregs; /* Stash the original thread MSR, as giveup_fpu et al will * modify it. We hold onto it to see whether the task used * FP & vector regs. If the TIF_RESTORE_TM flag is set, * ckpt_regs.msr is already set. */ if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM)) thr->ckpt_regs.msr = thr->regs->msr; TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " "ccr=%lx, msr=%lx, trap=%lx)\n", tsk->pid, thr->regs->nip, thr->regs->ccr, thr->regs->msr, thr->regs->trap); tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED); TM_DEBUG("--- tm_reclaim on pid %d complete\n", tsk->pid); out_and_saveregs: /* Always save the regs here, even if a transaction's not active. * This context-switches a thread's TM info SPRs. We do it here to * be consistent with the restore path (in recheckpoint) which * cannot happen later in _switch(). */ tm_save_sprs(thr); } extern void __tm_recheckpoint(struct thread_struct *thread, unsigned long orig_msr); void tm_recheckpoint(struct thread_struct *thread, unsigned long orig_msr) { unsigned long flags; /* We really can't be interrupted here as the TEXASR registers can't * change and later in the trecheckpoint code, we have a userspace R1. * So let's hard disable over this region. */ local_irq_save(flags); hard_irq_disable(); /* The TM SPRs are restored here, so that TEXASR.FS can be set * before the trecheckpoint and no explosion occurs. */ tm_restore_sprs(thread); __tm_recheckpoint(thread, orig_msr); local_irq_restore(flags); } static inline void tm_recheckpoint_new_task(struct task_struct *new) { unsigned long msr; if (!cpu_has_feature(CPU_FTR_TM)) return; /* Recheckpoint the registers of the thread we're about to switch to. * * If the task was using FP, we non-lazily reload both the original and * the speculative FP register states. This is because the kernel * doesn't see if/when a TM rollback occurs, so if we take an FP * unavoidable later, we are unable to determine which set of FP regs * need to be restored. */ if (!new->thread.regs) return; if (!MSR_TM_ACTIVE(new->thread.regs->msr)){ tm_restore_sprs(&new->thread); return; } msr = new->thread.ckpt_regs.msr; /* Recheckpoint to restore original checkpointed register state. */ TM_DEBUG("*** tm_recheckpoint of pid %d " "(new->msr 0x%lx, new->origmsr 0x%lx)\n", new->pid, new->thread.regs->msr, msr); /* This loads the checkpointed FP/VEC state, if used */ tm_recheckpoint(&new->thread, msr); /* This loads the speculative FP/VEC state, if used */ if (msr & MSR_FP) { do_load_up_transact_fpu(&new->thread); new->thread.regs->msr |= (MSR_FP | new->thread.fpexc_mode); } #ifdef CONFIG_ALTIVEC if (msr & MSR_VEC) { do_load_up_transact_altivec(&new->thread); new->thread.regs->msr |= MSR_VEC; } #endif /* We may as well turn on VSX too since all the state is restored now */ if (msr & MSR_VSX) new->thread.regs->msr |= MSR_VSX; TM_DEBUG("*** tm_recheckpoint of pid %d complete " "(kernel msr 0x%lx)\n", new->pid, mfmsr()); } static inline void __switch_to_tm(struct task_struct *prev) { if (cpu_has_feature(CPU_FTR_TM)) { tm_enable(); tm_reclaim_task(prev); } } /* * This is called if we are on the way out to userspace and the * TIF_RESTORE_TM flag is set. It checks if we need to reload * FP and/or vector state and does so if necessary. * If userspace is inside a transaction (whether active or * suspended) and FP/VMX/VSX instructions have ever been enabled * inside that transaction, then we have to keep them enabled * and keep the FP/VMX/VSX state loaded while ever the transaction * continues. The reason is that if we didn't, and subsequently * got a FP/VMX/VSX unavailable interrupt inside a transaction, * we don't know whether it's the same transaction, and thus we * don't know which of the checkpointed state and the transactional * state to use. */ void restore_tm_state(struct pt_regs *regs) { unsigned long msr_diff; clear_thread_flag(TIF_RESTORE_TM); if (!MSR_TM_ACTIVE(regs->msr)) return; msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; if (msr_diff & MSR_FP) { fp_enable(); load_fp_state(&current->thread.fp_state); regs->msr |= current->thread.fpexc_mode; } if (msr_diff & MSR_VEC) { vec_enable(); load_vr_state(&current->thread.vr_state); } regs->msr |= msr_diff; } #else #define tm_recheckpoint_new_task(new) #define __switch_to_tm(prev) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *new) { struct thread_struct *new_thread, *old_thread; struct task_struct *last; #ifdef CONFIG_PPC_BOOK3S_64 struct ppc64_tlb_batch *batch; #endif WARN_ON(!irqs_disabled()); /* Back up the TAR and DSCR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception * entry/exit instead, and be in pt_regs. FIXME, this should be in * pt_regs anyway (for debug).) * Save the TAR and DSCR here before we do treclaim/trecheckpoint as * these will change them. */ save_early_sprs(&prev->thread); __switch_to_tm(prev); #ifdef CONFIG_SMP /* avoid complexity of lazy save/restore of fpu * by just saving it every time we switch out if * this task used the fpu during the last quantum. * * If it tries to use the fpu again, it'll trap and * reload its fp regs. So we don't have to do a restore * every switch, just a save. * -- Cort */ if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP)) giveup_fpu(prev); #ifdef CONFIG_ALTIVEC /* * If the previous thread used altivec in the last quantum * (thus changing altivec regs) then save them. * We used to check the VRSAVE register but not all apps * set it, so we don't rely on it now (and in fact we need * to save & restore VSCR even if VRSAVE == 0). -- paulus * * On SMP we always save/restore altivec regs just to avoid the * complexity of changing processors. * -- Cort */ if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)) giveup_altivec(prev); #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX)) /* VMX and FPU registers are already save here */ __giveup_vsx(prev); #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* * If the previous thread used spe in the last quantum * (thus changing spe regs) then save them. * * On SMP we always save/restore spe regs just to avoid the * complexity of changing processors. */ if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE))) giveup_spe(prev); #endif /* CONFIG_SPE */ #else /* CONFIG_SMP */ #ifdef CONFIG_ALTIVEC /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_altivec -- Cort */ if (new->thread.regs && last_task_used_altivec == new) new->thread.regs->msr |= MSR_VEC; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_VSX if (new->thread.regs && last_task_used_vsx == new) new->thread.regs->msr |= MSR_VSX; #endif /* CONFIG_VSX */ #ifdef CONFIG_SPE /* Avoid the trap. On smp this this never happens since * we don't set last_task_used_spe */ if (new->thread.regs && last_task_used_spe == new) new->thread.regs->msr |= MSR_SPE; #endif /* CONFIG_SPE */ #endif /* CONFIG_SMP */ #ifdef CONFIG_PPC_ADV_DEBUG_REGS switch_booke_debug_regs(&new->thread.debug); #else /* * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would * schedule DABR */ #ifndef CONFIG_HAVE_HW_BREAKPOINT if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk))) __set_breakpoint(&new->thread.hw_brk); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif new_thread = &new->thread; old_thread = &current->thread; #ifdef CONFIG_PPC64 /* * Collect processor utilization data per process */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array); long unsigned start_tb, current_tb; start_tb = old_thread->start_tb; cu->current_tb = current_tb = mfspr(SPRN_PURR); old_thread->accum_tb += (current_tb - start_tb); new_thread->start_tb = current_tb; } #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC_BOOK3S_64 batch = this_cpu_ptr(&ppc64_tlb_batch); if (batch->active) { current_thread_info()->local_flags |= _TLF_LAZY_MMU; if (batch->index) __flush_tlb_pending(batch); batch->active = 0; } #endif /* CONFIG_PPC_BOOK3S_64 */ /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out * of sync. Hard disable here. */ hard_irq_disable(); tm_recheckpoint_new_task(new); last = _switch(old_thread, new_thread); #ifdef CONFIG_PPC_BOOK3S_64 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; batch = this_cpu_ptr(&ppc64_tlb_batch); batch->active = 1; } #endif /* CONFIG_PPC_BOOK3S_64 */ return last; } static int instructions_to_print = 16; static void show_instructions(struct pt_regs *regs) { int i; unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int)); printk("Instruction dump:"); for (i = 0; i < instructions_to_print; i++) { int instr; if (!(i % 8)) printk("\n"); #if !defined(CONFIG_BOOKE) /* If executing with the IMMU off, adjust pc rather * than print XXXXXXXX. */ if (!(regs->msr & MSR_IR)) pc = (unsigned long)phys_to_virt(pc); #endif if (!__kernel_text_address(pc) || probe_kernel_address((unsigned int __user *)pc, instr)) { printk(KERN_CONT "XXXXXXXX "); } else { if (regs->nip == pc) printk(KERN_CONT "<%08x> ", instr); else printk(KERN_CONT "%08x ", instr); } pc += sizeof(int); } printk("\n"); } static struct regbit { unsigned long bit; const char *name; } msr_bits[] = { #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE) {MSR_SF, "SF"}, {MSR_HV, "HV"}, #endif {MSR_VEC, "VEC"}, {MSR_VSX, "VSX"}, #ifdef CONFIG_BOOKE {MSR_CE, "CE"}, #endif {MSR_EE, "EE"}, {MSR_PR, "PR"}, {MSR_FP, "FP"}, {MSR_ME, "ME"}, #ifdef CONFIG_BOOKE {MSR_DE, "DE"}, #else {MSR_SE, "SE"}, {MSR_BE, "BE"}, #endif {MSR_IR, "IR"}, {MSR_DR, "DR"}, {MSR_PMM, "PMM"}, #ifndef CONFIG_BOOKE {MSR_RI, "RI"}, {MSR_LE, "LE"}, #endif {0, NULL} }; static void printbits(unsigned long val, struct regbit *bits) { const char *sep = ""; printk("<"); for (; bits->bit; ++bits) if (val & bits->bit) { printk("%s%s", sep, bits->name); sep = ","; } printk(">"); } #ifdef CONFIG_PPC64 #define REG "%016lx" #define REGS_PER_LINE 4 #define LAST_VOLATILE 13 #else #define REG "%08lx" #define REGS_PER_LINE 8 #define LAST_VOLATILE 12 #endif void show_regs(struct pt_regs * regs) { int i, trap; show_regs_print_info(KERN_DEFAULT); printk("NIP: "REG" LR: "REG" CTR: "REG"\n", regs->nip, regs->link, regs->ctr); printk("REGS: %p TRAP: %04lx %s (%s)\n", regs, regs->trap, print_tainted(), init_utsname()->release); printk("MSR: "REG" ", regs->msr); printbits(regs->msr, msr_bits); printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); trap = TRAP(regs); if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) printk("CFAR: "REG" ", regs->orig_gpr3); if (trap == 0x200 || trap == 0x300 || trap == 0x600) #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); #else printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); #endif #ifdef CONFIG_PPC64 printk("SOFTE: %ld ", regs->softe); #endif #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (MSR_TM_ACTIVE(regs->msr)) printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); #endif for (i = 0; i < 32; i++) { if ((i % REGS_PER_LINE) == 0) printk("\nGPR%02d: ", i); printk(REG " ", regs->gpr[i]); if (i == LAST_VOLATILE && !FULL_REGS(regs)) break; } printk("\n"); #ifdef CONFIG_KALLSYMS /* * Lookup NIP late so we have the best change of getting the * above info out without failing */ printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip); printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link); #endif show_stack(current, (unsigned long *) regs->gpr[1]); if (!user_mode(regs)) show_instructions(regs); } void exit_thread(void) { discard_lazy_cpu_state(); } void flush_thread(void) { discard_lazy_cpu_state(); #ifdef CONFIG_HAVE_HW_BREAKPOINT flush_ptrace_hw_breakpoint(current); #else /* CONFIG_HAVE_HW_BREAKPOINT */ set_debug_reg_defaults(&current->thread); #endif /* CONFIG_HAVE_HW_BREAKPOINT */ } void release_thread(struct task_struct *t) { } /* * this gets called so that we can store coprocessor state into memory and * copy the current task into the new thread. */ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { flush_fp_to_thread(src); flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); /* * Flush TM state out so we can copy it. __switch_to_tm() does this * flush but it removes the checkpointed state from the current CPU and * transitions the CPU out of TM mode. Hence we need to call * tm_recheckpoint_new_task() (on the same task) to restore the * checkpointed state back and the TM mode. */ __switch_to_tm(src); tm_recheckpoint_new_task(src); *dst = *src; clear_task_ebb(dst); return 0; } static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) { #ifdef CONFIG_PPC_STD_MMU_64 unsigned long sp_vsid; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) << SLB_VSID_SHIFT_1T; else sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT; sp_vsid |= SLB_VSID_KERNEL | llp; p->thread.ksp_vsid = sp_vsid; #endif } /* * Copy a thread.. */ /* * Copy architecture-specific thread state */ int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long kthread_arg, struct task_struct *p) { struct pt_regs *childregs, *kregs; extern void ret_from_fork(void); extern void ret_from_kernel_thread(void); void (*f)(void); unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE; /* Copy registers */ sp -= sizeof(struct pt_regs); childregs = (struct pt_regs *) sp; if (unlikely(p->flags & PF_KTHREAD)) { /* kernel thread */ struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); /* function */ if (usp) childregs->gpr[14] = ppc_function_entry((void *)usp); #ifdef CONFIG_PPC64 clear_tsk_thread_flag(p, TIF_32BIT); childregs->softe = 1; #endif childregs->gpr[15] = kthread_arg; p->thread.regs = NULL; /* no user register state */ ti->flags |= _TIF_RESTOREALL; f = ret_from_kernel_thread; } else { /* user thread */ struct pt_regs *regs = current_pt_regs(); CHECK_FULL_REGS(regs); *childregs = *regs; if (usp) childregs->gpr[1] = usp; p->thread.regs = childregs; childregs->gpr[3] = 0; /* Result from fork() */ if (clone_flags & CLONE_SETTLS) { #ifdef CONFIG_PPC64 if (!is_32bit_task()) childregs->gpr[13] = childregs->gpr[6]; else #endif childregs->gpr[2] = childregs->gpr[6]; } f = ret_from_fork; } sp -= STACK_FRAME_OVERHEAD; /* * The way this works is that at some point in the future * some task will call _switch to switch to the new task. * That will pop off the stack frame created below and start * the new task running at ret_from_fork. The new task will * do some house keeping and then return from the fork or clone * system call, using the stack frame created above. */ ((unsigned long *)sp)[0] = 0; sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; p->thread.ksp = sp; #ifdef CONFIG_PPC32 p->thread.ksp_limit = (unsigned long)task_stack_page(p) + _ALIGN_UP(sizeof(struct thread_info), 16); #endif #ifdef CONFIG_HAVE_HW_BREAKPOINT p->thread.ptrace_bps[0] = NULL; #endif p->thread.fp_save_area = NULL; #ifdef CONFIG_ALTIVEC p->thread.vr_save_area = NULL; #endif setup_ksp_vsid(p, sp); #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { p->thread.dscr_inherit = current->thread.dscr_inherit; p->thread.dscr = current->thread.dscr; } if (cpu_has_feature(CPU_FTR_HAS_PPR)) p->thread.ppr = INIT_PPR; #endif kregs->nip = ppc_function_entry(f); return 0; } /* * Set up a thread for executing a new program */ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp) { #ifdef CONFIG_PPC64 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */ #endif /* * If we exec out of a kernel thread then thread.regs will not be * set. Do it now. */ if (!current->thread.regs) { struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE; current->thread.regs = regs - 1; } memset(regs->gpr, 0, sizeof(regs->gpr)); regs->ctr = 0; regs->link = 0; regs->xer = 0; regs->ccr = 0; regs->gpr[1] = sp; /* * We have just cleared all the nonvolatile GPRs, so make * FULL_REGS(regs) return true. This is necessary to allow * ptrace to examine the thread immediately after exec. */ regs->trap &= ~1UL; #ifdef CONFIG_PPC32 regs->mq = 0; regs->nip = start; regs->msr = MSR_USER; #else if (!is_32bit_task()) { unsigned long entry; if (is_elf2_task()) { /* Look ma, no function descriptors! */ entry = start; /* * Ulrich says: * The latest iteration of the ABI requires that when * calling a function (at its global entry point), * the caller must ensure r12 holds the entry point * address (so that the function can quickly * establish addressability). */ regs->gpr[12] = start; /* Make sure that's restored on entry to userspace. */ set_thread_flag(TIF_RESTOREALL); } else { unsigned long toc; /* start is a relocated pointer to the function * descriptor for the elf _start routine. The first * entry in the function descriptor is the entry * address of _start and the second entry is the TOC * value we need to use. */ __get_user(entry, (unsigned long __user *)start); __get_user(toc, (unsigned long __user *)start+1); /* Check whether the e_entry function descriptor entries * need to be relocated before we can use them. */ if (load_addr != 0) { entry += load_addr; toc += load_addr; } regs->gpr[2] = toc; } regs->nip = entry; regs->msr = MSR_USER64; } else { regs->nip = start; regs->gpr[2] = 0; regs->msr = MSR_USER32; } #endif discard_lazy_cpu_state(); #ifdef CONFIG_VSX current->thread.used_vsr = 0; #endif memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state)); current->thread.fp_save_area = NULL; #ifdef CONFIG_ALTIVEC memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state)); current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */ current->thread.vr_save_area = NULL; current->thread.vrsave = 0; current->thread.used_vr = 0; #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_SPE memset(current->thread.evr, 0, sizeof(current->thread.evr)); current->thread.acc = 0; current->thread.spefscr = 0; current->thread.used_spe = 0; #endif /* CONFIG_SPE */ #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (cpu_has_feature(CPU_FTR_TM)) regs->msr |= MSR_TM; current->thread.tm_tfhar = 0; current->thread.tm_texasr = 0; current->thread.tm_tfiar = 0; #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ } EXPORT_SYMBOL(start_thread); #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \ | PR_FP_EXC_RES | PR_FP_EXC_INV) int set_fpexc_mode(struct task_struct *tsk, unsigned int val) { struct pt_regs *regs = tsk->thread.regs; /* This is a bit hairy. If we are an SPE enabled processor * (have embedded fp) we store the IEEE exception enable flags in * fpexc_mode. fpexc_mode is also used for setting FP exception * mode (asyn, precise, disabled) for 'Classic' FP. */ if (val & PR_FP_EXC_SW_ENABLE) { #ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set * directly by userspace, it must call prctl * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE * in the existing prctl settings) or * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in * the bits being set). <fenv.h> functions * saving and restoring the whole * floating-point environment need to do so * anyway to restore the prctl settings from * the saved environment. */ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); tsk->thread.fpexc_mode = val & (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT); return 0; } else { return -EINVAL; } #else return -EINVAL; #endif } /* on a CONFIG_SPE this does not hurt us. The bits that * __pack_fe01 use do not overlap with bits used for * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits * on CONFIG_SPE implementations are reserved so writing to * them does not change anything */ if (val > PR_FP_EXC_PRECISE) return -EINVAL; tsk->thread.fpexc_mode = __pack_fe01(val); if (regs != NULL && (regs->msr & MSR_FP) != 0) regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1)) | tsk->thread.fpexc_mode; return 0; } int get_fpexc_mode(struct task_struct *tsk, unsigned long adr) { unsigned int val; if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE) #ifdef CONFIG_SPE if (cpu_has_feature(CPU_FTR_SPE)) { /* * When the sticky exception bits are set * directly by userspace, it must call prctl * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE * in the existing prctl settings) or * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in * the bits being set). <fenv.h> functions * saving and restoring the whole * floating-point environment need to do so * anyway to restore the prctl settings from * the saved environment. */ tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR); val = tsk->thread.fpexc_mode; } else return -EINVAL; #else return -EINVAL; #endif else val = __unpack_fe01(tsk->thread.fpexc_mode); return put_user(val, (unsigned int __user *) adr); } int set_endian(struct task_struct *tsk, unsigned int val) { struct pt_regs *regs = tsk->thread.regs; if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) || (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE))) return -EINVAL; if (regs == NULL) return -EINVAL; if (val == PR_ENDIAN_BIG) regs->msr &= ~MSR_LE; else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE) regs->msr |= MSR_LE; else return -EINVAL; return 0; } int get_endian(struct task_struct *tsk, unsigned long adr) { struct pt_regs *regs = tsk->thread.regs; unsigned int val; if (!cpu_has_feature(CPU_FTR_PPC_LE) && !cpu_has_feature(CPU_FTR_REAL_LE)) return -EINVAL; if (regs == NULL) return -EINVAL; if (regs->msr & MSR_LE) { if (cpu_has_feature(CPU_FTR_REAL_LE)) val = PR_ENDIAN_LITTLE; else val = PR_ENDIAN_PPC_LITTLE; } else val = PR_ENDIAN_BIG; return put_user(val, (unsigned int __user *)adr); } int set_unalign_ctl(struct task_struct *tsk, unsigned int val) { tsk->thread.align_ctl = val; return 0; } int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) { return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr); } static inline int valid_irq_stack(unsigned long sp, struct task_struct *p, unsigned long nbytes) { unsigned long stack_page; unsigned long cpu = task_cpu(p); /* * Avoid crashing if the stack has overflowed and corrupted * task_cpu(p), which is in the thread_info struct. */ if (cpu < NR_CPUS && cpu_possible(cpu)) { stack_page = (unsigned long) hardirq_ctx[cpu]; if (sp >= stack_page + sizeof(struct thread_struct) && sp <= stack_page + THREAD_SIZE - nbytes) return 1; stack_page = (unsigned long) softirq_ctx[cpu]; if (sp >= stack_page + sizeof(struct thread_struct) && sp <= stack_page + THREAD_SIZE - nbytes) return 1; } return 0; } int validate_sp(unsigned long sp, struct task_struct *p, unsigned long nbytes) { unsigned long stack_page = (unsigned long)task_stack_page(p); if (sp >= stack_page + sizeof(struct thread_struct) && sp <= stack_page + THREAD_SIZE - nbytes) return 1; return valid_irq_stack(sp, p, nbytes); } EXPORT_SYMBOL(validate_sp); unsigned long get_wchan(struct task_struct *p) { unsigned long ip, sp; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; sp = p->thread.ksp; if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) return 0; do { sp = *(unsigned long *)sp; if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) return 0; if (count > 0) { ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; if (!in_sched_functions(ip)) return ip; } } while (count++ < 16); return 0; } static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH; void show_stack(struct task_struct *tsk, unsigned long *stack) { unsigned long sp, ip, lr, newsp; int count = 0; int firstframe = 1; #ifdef CONFIG_FUNCTION_GRAPH_TRACER int curr_frame = current->curr_ret_stack; extern void return_to_handler(void); unsigned long rth = (unsigned long)return_to_handler; #endif sp = (unsigned long) stack; if (tsk == NULL) tsk = current; if (sp == 0) { if (tsk == current) sp = current_stack_pointer(); else sp = tsk->thread.ksp; } lr = 0; printk("Call Trace:\n"); do { if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD)) return; stack = (unsigned long *) sp; newsp = stack[0]; ip = stack[STACK_FRAME_LR_SAVE]; if (!firstframe || ip != lr) { printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); #ifdef CONFIG_FUNCTION_GRAPH_TRACER if ((ip == rth) && curr_frame >= 0) { printk(" (%pS)", (void *)current->ret_stack[curr_frame].ret); curr_frame--; } #endif if (firstframe) printk(" (unreliable)"); printk("\n"); } firstframe = 0; /* * See if this is an exception frame. * We look for the "regshere" marker in the current frame. */ if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE) && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) { struct pt_regs *regs = (struct pt_regs *) (sp + STACK_FRAME_OVERHEAD); lr = regs->link; printk("--- interrupt: %lx at %pS\n LR = %pS\n", regs->trap, (void *)regs->nip, (void *)lr); firstframe = 1; } sp = newsp; } while (count++ < kstack_depth_to_print); } #ifdef CONFIG_PPC64 /* Called with hard IRQs off */ void notrace __ppc64_runlatch_on(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; ctrl = mfspr(SPRN_CTRLF); ctrl |= CTRL_RUNLATCH; mtspr(SPRN_CTRLT, ctrl); ti->local_flags |= _TLF_RUNLATCH; } /* Called with hard IRQs off */ void notrace __ppc64_runlatch_off(void) { struct thread_info *ti = current_thread_info(); unsigned long ctrl; ti->local_flags &= ~_TLF_RUNLATCH; ctrl = mfspr(SPRN_CTRLF); ctrl &= ~CTRL_RUNLATCH; mtspr(SPRN_CTRLT, ctrl); } #endif /* CONFIG_PPC64 */ unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() & ~PAGE_MASK; return sp & ~0xf; } static inline unsigned long brk_rnd(void) { unsigned long rnd = 0; /* 8MB for 32bit, 1GB for 64bit */ if (is_32bit_task()) rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); else rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); return rnd << PAGE_SHIFT; } unsigned long arch_randomize_brk(struct mm_struct *mm) { unsigned long base = mm->brk; unsigned long ret; #ifdef CONFIG_PPC_STD_MMU_64 /* * If we are using 1TB segments and we are allowed to randomise * the heap, we can put it above 1TB so it is backed by a 1TB * segment. Otherwise the heap will be in the bottom 1TB * which always uses 256MB segments and this may result in a * performance penalty. */ if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); #endif ret = PAGE_ALIGN(base + brk_rnd()); if (ret < mm->brk) return mm->brk; return ret; }
./CrossVul/dataset_final_sorted/CWE-284/c/good_1822_0
crossvul-cpp_data_good_5075_1
/* Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* _ _ * _ __ ___ ___ __| | ___ ___| | mod_ssl * | '_ ` _ \ / _ \ / _` | / __/ __| | Apache Interface to OpenSSL * | | | | | | (_) | (_| | \__ \__ \ | * |_| |_| |_|\___/ \__,_|___|___/___/_| * |_____| * ssl_engine_kernel.c * The SSL engine kernel */ /* ``It took me fifteen years to discover I had no talent for programming, but I couldn't give it up because by that time I was too famous.'' -- Unknown */ #include "ssl_private.h" #include "mod_ssl.h" #include "util_md5.h" #include "scoreboard.h" static void ssl_configure_env(request_rec *r, SSLConnRec *sslconn); #ifdef HAVE_TLSEXT static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s); #endif #define SWITCH_STATUS_LINE "HTTP/1.1 101 Switching Protocols" #define UPGRADE_HEADER "Upgrade: TLS/1.0, HTTP/1.1" #define CONNECTION_HEADER "Connection: Upgrade" /* Perform an upgrade-to-TLS for the given request, per RFC 2817. */ static apr_status_t upgrade_connection(request_rec *r) { struct conn_rec *conn = r->connection; apr_bucket_brigade *bb; SSLConnRec *sslconn; apr_status_t rv; SSL *ssl; ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02028) "upgrading connection to TLS"); bb = apr_brigade_create(r->pool, conn->bucket_alloc); rv = ap_fputs(conn->output_filters, bb, SWITCH_STATUS_LINE CRLF UPGRADE_HEADER CRLF CONNECTION_HEADER CRLF CRLF); if (rv == APR_SUCCESS) { APR_BRIGADE_INSERT_TAIL(bb, apr_bucket_flush_create(conn->bucket_alloc)); rv = ap_pass_brigade(conn->output_filters, bb); } if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02029) "failed to send 101 interim response for connection " "upgrade"); return rv; } ssl_init_ssl_connection(conn, r); sslconn = myConnConfig(conn); ssl = sslconn->ssl; /* Perform initial SSL handshake. */ SSL_set_accept_state(ssl); SSL_do_handshake(ssl); if (!SSL_is_init_finished(ssl)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02030) "TLS upgrade handshake failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); return APR_ECONNABORTED; } return APR_SUCCESS; } /* Perform a speculative (and non-blocking) read from the connection * filters for the given request, to determine whether there is any * pending data to read. Return non-zero if there is, else zero. */ static int has_buffered_data(request_rec *r) { apr_bucket_brigade *bb; apr_off_t len; apr_status_t rv; int result; bb = apr_brigade_create(r->pool, r->connection->bucket_alloc); rv = ap_get_brigade(r->connection->input_filters, bb, AP_MODE_SPECULATIVE, APR_NONBLOCK_READ, 1); result = rv == APR_SUCCESS && apr_brigade_length(bb, 1, &len) == APR_SUCCESS && len > 0; apr_brigade_destroy(bb); return result; } static int ap_array_same_str_set(apr_array_header_t *s1, apr_array_header_t *s2) { int i; const char *c; if (s1 == s2) { return 1; } else if (!s1 || !s2 || (s1->nelts != s2->nelts)) { return 0; } for (i = 0; i < s1->nelts; i++) { c = APR_ARRAY_IDX(s1, i, const char *); if (!c || !ap_array_str_contains(s2, c)) { return 0; } } return 1; } static int ssl_pk_server_compatible(modssl_pk_server_t *pks1, modssl_pk_server_t *pks2) { if (!pks1 || !pks2) { return 0; } /* both have the same certificates? */ if ((pks1->ca_name_path != pks2->ca_name_path) && (!pks1->ca_name_path || !pks2->ca_name_path || strcmp(pks1->ca_name_path, pks2->ca_name_path))) { return 0; } if ((pks1->ca_name_file != pks2->ca_name_file) && (!pks1->ca_name_file || !pks2->ca_name_file || strcmp(pks1->ca_name_file, pks2->ca_name_file))) { return 0; } if (!ap_array_same_str_set(pks1->cert_files, pks2->cert_files) || !ap_array_same_str_set(pks1->key_files, pks2->key_files)) { return 0; } return 1; } static int ssl_auth_compatible(modssl_auth_ctx_t *a1, modssl_auth_ctx_t *a2) { if (!a1 || !a2) { return 0; } /* both have the same verification */ if ((a1->verify_depth != a2->verify_depth) || (a1->verify_mode != a2->verify_mode)) { return 0; } /* both have the same ca path/file */ if ((a1->ca_cert_path != a2->ca_cert_path) && (!a1->ca_cert_path || !a2->ca_cert_path || strcmp(a1->ca_cert_path, a2->ca_cert_path))) { return 0; } if ((a1->ca_cert_file != a2->ca_cert_file) && (!a1->ca_cert_file || !a2->ca_cert_file || strcmp(a1->ca_cert_file, a2->ca_cert_file))) { return 0; } /* both have the same ca cipher suite string */ if ((a1->cipher_suite != a2->cipher_suite) && (!a1->cipher_suite || !a2->cipher_suite || strcmp(a1->cipher_suite, a2->cipher_suite))) { return 0; } return 1; } static int ssl_ctx_compatible(modssl_ctx_t *ctx1, modssl_ctx_t *ctx2) { if (!ctx1 || !ctx2 || (ctx1->protocol != ctx2->protocol) || !ssl_auth_compatible(&ctx1->auth, &ctx2->auth) || !ssl_pk_server_compatible(ctx1->pks, ctx2->pks)) { return 0; } return 1; } static int ssl_server_compatible(server_rec *s1, server_rec *s2) { SSLSrvConfigRec *sc1 = s1? mySrvConfig(s1) : NULL; SSLSrvConfigRec *sc2 = s2? mySrvConfig(s2) : NULL; /* both use the same TLS protocol? */ if (!sc1 || !sc2 || !ssl_ctx_compatible(sc1->server, sc2->server)) { return 0; } return 1; } /* * Post Read Request Handler */ int ssl_hook_ReadReq(request_rec *r) { SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLConnRec *sslconn; const char *upgrade; #ifdef HAVE_TLSEXT const char *servername; #endif SSL *ssl; /* Perform TLS upgrade here if "SSLEngine optional" is configured, * SSL is not already set up for this connection, and the client * has sent a suitable Upgrade header. */ if (sc->enabled == SSL_ENABLED_OPTIONAL && !myConnConfig(r->connection) && (upgrade = apr_table_get(r->headers_in, "Upgrade")) != NULL && ap_find_token(r->pool, upgrade, "TLS/1.0")) { if (upgrade_connection(r)) { return AP_FILTER_ERROR; } } /* If we are on a slave connection, we do not expect to have an SSLConnRec, * but our master connection might. */ sslconn = myConnConfig(r->connection); if (!(sslconn && sslconn->ssl) && r->connection->master) { sslconn = myConnConfig(r->connection->master); } /* If "SSLEngine optional" is configured, this is not an SSL * connection, and this isn't a subrequest, send an Upgrade * response header. Note this must happen before map_to_storage * and OPTIONS * request processing is completed. */ if (sc->enabled == SSL_ENABLED_OPTIONAL && !(sslconn && sslconn->ssl) && !r->main) { apr_table_setn(r->headers_out, "Upgrade", "TLS/1.0, HTTP/1.1"); apr_table_mergen(r->headers_out, "Connection", "upgrade"); } if (!sslconn) { return DECLINED; } if (sslconn->non_ssl_request == NON_SSL_SET_ERROR_MSG) { apr_table_setn(r->notes, "error-notes", "Reason: You're speaking plain HTTP to an SSL-enabled " "server port.<br />\n Instead use the HTTPS scheme to " "access this URL, please.<br />\n"); /* Now that we have caught this error, forget it. we are done * with using SSL on this request. */ sslconn->non_ssl_request = NON_SSL_OK; return HTTP_BAD_REQUEST; } /* * Get the SSL connection structure and perform the * delayed interlinking from SSL back to request_rec */ ssl = sslconn->ssl; if (!ssl) { return DECLINED; } #ifdef HAVE_TLSEXT /* * Perform SNI checks only on the initial request. In particular, * if these checks detect a problem, the checks shouldn't return an * error again when processing an ErrorDocument redirect for the * original problem. */ if (r->proxyreq != PROXYREQ_PROXY && ap_is_initial_req(r)) { server_rec *handshakeserver = sslconn->server; SSLSrvConfigRec *hssc = mySrvConfig(handshakeserver); if ((servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name))) { /* * The SNI extension supplied a hostname. So don't accept requests * with either no hostname or a hostname that selected a different * virtual host than the one used for the handshake, causing * different SSL parameters to be applied, such as SSLProtocol, * SSLCACertificateFile/Path and SSLCADNRequestFile/Path which * cannot be renegotiated (SSLCA* due to current limitations in * OpenSSL, see: * http://mail-archives.apache.org/mod_mbox/httpd-dev/200806.mbox/%3C48592955.2090303@velox.ch%3E * and * http://mail-archives.apache.org/mod_mbox/httpd-dev/201312.mbox/%3CCAKQ1sVNpOrdiBm-UPw1hEdSN7YQXRRjeaT-MCWbW_7mN%3DuFiOw%40mail.gmail.com%3E * ) */ if (!r->hostname) { ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02031) "Hostname %s provided via SNI, but no hostname" " provided in HTTP request", servername); return HTTP_BAD_REQUEST; } if (r->server != handshakeserver && !ssl_server_compatible(sslconn->server, r->server)) { /* * The request does not select the virtual host that was * selected by the SNI and its SSL parameters are different */ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02032) "Hostname %s provided via SNI and hostname %s provided" " via HTTP have no compatible SSL setup", servername, r->hostname); return HTTP_MISDIRECTED_REQUEST; } } else if (((sc->strict_sni_vhost_check == SSL_ENABLED_TRUE) || hssc->strict_sni_vhost_check == SSL_ENABLED_TRUE) && r->connection->vhost_lookup_data) { /* * We are using a name based configuration here, but no hostname was * provided via SNI. Don't allow that if are requested to do strict * checking. Check whether this strict checking was set up either in the * server config we used for handshaking or in our current server. * This should avoid insecure configuration by accident. */ ap_log_error(APLOG_MARK, APLOG_ERR, 0, r->server, APLOGNO(02033) "No hostname was provided via SNI for a name based" " virtual host"); apr_table_setn(r->notes, "error-notes", "Reason: The client software did not provide a " "hostname using Server Name Indication (SNI), " "which is required to access this server.<br />\n"); return HTTP_FORBIDDEN; } } #endif modssl_set_app_data2(ssl, r); /* * Log information about incoming HTTPS requests */ if (APLOGrinfo(r) && ap_is_initial_req(r)) { ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02034) "%s HTTPS request received for child %ld (server %s)", (r->connection->keepalives <= 0 ? "Initial (No.1)" : apr_psprintf(r->pool, "Subsequent (No.%d)", r->connection->keepalives+1)), r->connection->id, ssl_util_vhostid(r->pool, r->server)); } /* SetEnvIf ssl-*-shutdown flags can only be per-server, * so they won't change across keepalive requests */ if (sslconn->shutdown_type == SSL_SHUTDOWN_TYPE_UNSET) { ssl_configure_env(r, sslconn); } return DECLINED; } /* * Move SetEnvIf information from request_rec to conn_rec/BUFF * to allow the close connection handler to use them. */ static void ssl_configure_env(request_rec *r, SSLConnRec *sslconn) { int i; const apr_array_header_t *arr = apr_table_elts(r->subprocess_env); const apr_table_entry_t *elts = (const apr_table_entry_t *)arr->elts; sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_STANDARD; for (i = 0; i < arr->nelts; i++) { const char *key = elts[i].key; switch (*key) { case 's': /* being case-sensitive here. * and not checking for the -shutdown since these are the only * SetEnvIf "flags" we support */ if (!strncmp(key+1, "sl-", 3)) { key += 4; if (!strncmp(key, "unclean", 7)) { sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_UNCLEAN; } else if (!strncmp(key, "accurate", 8)) { sslconn->shutdown_type = SSL_SHUTDOWN_TYPE_ACCURATE; } return; /* should only ever be one ssl-*-shutdown */ } break; } } } /* * Access Handler */ int ssl_hook_Access(request_rec *r) { SSLDirConfigRec *dc = myDirConfig(r); SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLConnRec *sslconn = myConnConfig(r->connection); SSL *ssl = sslconn ? sslconn->ssl : NULL; server_rec *handshakeserver = sslconn ? sslconn->server : NULL; SSLSrvConfigRec *hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL; SSL_CTX *ctx = NULL; apr_array_header_t *requires; ssl_require_t *ssl_requires; int ok, i; BOOL renegotiate = FALSE, renegotiate_quick = FALSE; X509 *cert; X509 *peercert; X509_STORE *cert_store = NULL; X509_STORE_CTX *cert_store_ctx; STACK_OF(SSL_CIPHER) *cipher_list_old = NULL, *cipher_list = NULL; const SSL_CIPHER *cipher = NULL; int depth, verify_old, verify, n, is_slave = 0; const char *ncipher_suite; /* On a slave connection, we do not expect to have an SSLConnRec, but * our master connection might have one. */ if (!(sslconn && ssl) && r->connection->master) { sslconn = myConnConfig(r->connection->master); ssl = sslconn ? sslconn->ssl : NULL; handshakeserver = sslconn ? sslconn->server : NULL; hssc = handshakeserver? mySrvConfig(handshakeserver) : NULL; is_slave = 1; } if (ssl) { /* * We should have handshaken here (on handshakeserver), * otherwise we are being redirected (ErrorDocument) from * a renegotiation failure below. The access is still * forbidden in the latter case, let ap_die() handle * this recursive (same) error. */ if (!SSL_is_init_finished(ssl)) { return HTTP_FORBIDDEN; } ctx = SSL_get_SSL_CTX(ssl); } /* * Support for SSLRequireSSL directive */ if (dc->bSSLRequired && !ssl) { if ((sc->enabled == SSL_ENABLED_OPTIONAL) && !is_slave) { /* This vhost was configured for optional SSL, just tell the * client that we need to upgrade. */ apr_table_setn(r->err_headers_out, "Upgrade", "TLS/1.0, HTTP/1.1"); apr_table_setn(r->err_headers_out, "Connection", "Upgrade"); return HTTP_UPGRADE_REQUIRED; } ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02219) "access to %s failed, reason: %s", r->filename, "SSL connection required"); /* remember forbidden access for strict require option */ apr_table_setn(r->notes, "ssl-access-forbidden", "1"); return HTTP_FORBIDDEN; } /* * Check to see whether SSL is in use; if it's not, then no * further access control checks are relevant. (the test for * sc->enabled is probably strictly unnecessary) */ if (sc->enabled == SSL_ENABLED_FALSE || !ssl) { return DECLINED; } #ifdef HAVE_SRP /* * Support for per-directory reconfigured SSL connection parameters * * We do not force any renegotiation if the user is already authenticated * via SRP. * */ if (SSL_get_srp_username(ssl)) { return DECLINED; } #endif /* * Support for per-directory reconfigured SSL connection parameters. * * This is implemented by forcing an SSL renegotiation with the * reconfigured parameter suite. But Apache's internal API processing * makes our life very hard here, because when internal sub-requests occur * we nevertheless should avoid multiple unnecessary SSL handshakes (they * require extra network I/O and especially time to perform). * * But the optimization for filtering out the unnecessary handshakes isn't * obvious and trivial. Especially because while Apache is in its * sub-request processing the client could force additional handshakes, * too. And these take place perhaps without our notice. So the only * possibility is to explicitly _ask_ OpenSSL whether the renegotiation * has to be performed or not. It has to performed when some parameters * which were previously known (by us) are not those we've now * reconfigured (as known by OpenSSL) or (in optimized way) at least when * the reconfigured parameter suite is stronger (more restrictions) than * the currently active one. */ /* * Override of SSLCipherSuite * * We provide two options here: * * o The paranoid and default approach where we force a renegotiation when * the cipher suite changed in _any_ way (which is straight-forward but * often forces renegotiations too often and is perhaps not what the * user actually wanted). * * o The optimized and still secure way where we force a renegotiation * only if the currently active cipher is no longer contained in the * reconfigured/new cipher suite. Any other changes are not important * because it's the servers choice to select a cipher from the ones the * client supports. So as long as the current cipher is still in the new * cipher suite we're happy. Because we can assume we would have * selected it again even when other (better) ciphers exists now in the * new cipher suite. This approach is fine because the user explicitly * has to enable this via ``SSLOptions +OptRenegotiate''. So we do no * implicit optimizations. */ ncipher_suite = (dc->szCipherSuite? dc->szCipherSuite : (r->server != handshakeserver)? sc->server->auth.cipher_suite : NULL); if (ncipher_suite && (!sslconn->cipher_suite || strcmp(ncipher_suite, sslconn->cipher_suite))) { /* remember old state */ if (dc->nOptions & SSL_OPT_OPTRENEGOTIATE) { cipher = SSL_get_current_cipher(ssl); } else { cipher_list_old = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl); if (cipher_list_old) { cipher_list_old = sk_SSL_CIPHER_dup(cipher_list_old); } } /* configure new state */ if (is_slave) { /* TODO: this categorically fails changed cipher suite settings * on slave connections. We could do better by * - create a new SSL* from our SSL_CTX and set cipher suite there, * and retrieve ciphers, free afterwards * Modifying the SSL on a slave connection is no good. */ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "cipher-suite"); return HTTP_FORBIDDEN; } if (!SSL_set_cipher_list(ssl, ncipher_suite)) { ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02253) "Unable to reconfigure (per-directory) " "permitted SSL ciphers"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); if (cipher_list_old) { sk_SSL_CIPHER_free(cipher_list_old); } return HTTP_FORBIDDEN; } /* determine whether a renegotiation has to be forced */ cipher_list = (STACK_OF(SSL_CIPHER) *)SSL_get_ciphers(ssl); if (dc->nOptions & SSL_OPT_OPTRENEGOTIATE) { /* optimized way */ if ((!cipher && cipher_list) || (cipher && !cipher_list)) { renegotiate = TRUE; } else if (cipher && cipher_list && (sk_SSL_CIPHER_find(cipher_list, cipher) < 0)) { renegotiate = TRUE; } } else { /* paranoid way */ if ((!cipher_list_old && cipher_list) || (cipher_list_old && !cipher_list)) { renegotiate = TRUE; } else if (cipher_list_old && cipher_list) { for (n = 0; !renegotiate && (n < sk_SSL_CIPHER_num(cipher_list)); n++) { const SSL_CIPHER *value = sk_SSL_CIPHER_value(cipher_list, n); if (sk_SSL_CIPHER_find(cipher_list_old, value) < 0) { renegotiate = TRUE; } } for (n = 0; !renegotiate && (n < sk_SSL_CIPHER_num(cipher_list_old)); n++) { const SSL_CIPHER *value = sk_SSL_CIPHER_value(cipher_list_old, n); if (sk_SSL_CIPHER_find(cipher_list, value) < 0) { renegotiate = TRUE; } } } } /* cleanup */ if (cipher_list_old) { sk_SSL_CIPHER_free(cipher_list_old); } if (renegotiate) { if (is_slave) { /* The request causes renegotiation on a slave connection. * This is not allowed since we might have concurrent requests * on this connection. */ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "cipher-suite"); return HTTP_FORBIDDEN; } #ifdef SSL_OP_CIPHER_SERVER_PREFERENCE if (sc->cipher_server_pref == TRUE) { SSL_set_options(ssl, SSL_OP_CIPHER_SERVER_PREFERENCE); } #endif /* tracing */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02220) "Reconfigured cipher suite will force renegotiation"); } } /* * override of SSLVerifyClient * * We force a renegotiation if the reconfigured/new verify type is * stronger than the currently active verify type. * * The order is: none << optional_no_ca << optional << require * * Additionally the following optimization is possible here: When the * currently active verify type is "none" but a client certificate is * already known/present, it's enough to manually force a client * verification but at least skip the I/O-intensive renegotiation * handshake. */ if ((dc->nVerifyClient != SSL_CVERIFY_UNSET) || (sc->server->auth.verify_mode != SSL_CVERIFY_UNSET)) { /* remember old state */ verify_old = SSL_get_verify_mode(ssl); /* configure new state */ verify = SSL_VERIFY_NONE; if ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) || (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE)) { verify |= SSL_VERIFY_PEER_STRICT; } if ((dc->nVerifyClient == SSL_CVERIFY_OPTIONAL) || (dc->nVerifyClient == SSL_CVERIFY_OPTIONAL_NO_CA) || (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL) || (sc->server->auth.verify_mode == SSL_CVERIFY_OPTIONAL_NO_CA)) { verify |= SSL_VERIFY_PEER; } /* TODO: this seems premature since we do not know if there * are any changes required. */ SSL_set_verify(ssl, verify, ssl_callback_SSLVerify); SSL_set_verify_result(ssl, X509_V_OK); /* determine whether we've to force a renegotiation */ if (!renegotiate && verify != verify_old) { if (((verify_old == SSL_VERIFY_NONE) && (verify != SSL_VERIFY_NONE)) || (!(verify_old & SSL_VERIFY_PEER) && (verify & SSL_VERIFY_PEER)) || (!(verify_old & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) && (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))) { renegotiate = TRUE; if (is_slave) { /* The request causes renegotiation on a slave connection. * This is not allowed since we might have concurrent requests * on this connection. */ apr_table_setn(r->notes, "ssl-renegotiate-forbidden", "verify-client"); SSL_set_verify(ssl, verify_old, ssl_callback_SSLVerify); return HTTP_FORBIDDEN; } /* optimization */ if ((dc->nOptions & SSL_OPT_OPTRENEGOTIATE) && (verify_old == SSL_VERIFY_NONE) && ((peercert = SSL_get_peer_certificate(ssl)) != NULL)) { renegotiate_quick = TRUE; X509_free(peercert); } ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02255) "Changed client verification type will force " "%srenegotiation", renegotiate_quick ? "quick " : ""); } else if (verify != SSL_VERIFY_NONE) { /* * override of SSLVerifyDepth * * The depth checks are handled by us manually inside the * verify callback function and not by OpenSSL internally * (and our function is aware of both the per-server and * per-directory contexts). So we cannot ask OpenSSL about * the currently verify depth. Instead we remember it in our * SSLConnRec attached to the SSL* of OpenSSL. We've to force * the renegotiation if the reconfigured/new verify depth is * less than the currently active/remembered verify depth * (because this means more restriction on the certificate * chain). */ n = (sslconn->verify_depth != UNSET) ? sslconn->verify_depth : hssc->server->auth.verify_depth; /* determine the new depth */ sslconn->verify_depth = (dc->nVerifyDepth != UNSET) ? dc->nVerifyDepth : sc->server->auth.verify_depth; if (sslconn->verify_depth < n) { renegotiate = TRUE; ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02254) "Reduced client verification depth will " "force renegotiation"); } } } /* If we're handling a request for a vhost other than the default one, * then we need to make sure that client authentication is properly * enforced. For clients supplying an SNI extension, the peer * certificate verification has happened in the handshake already * (and r->server == handshakeserver). For non-SNI requests, * an additional check is needed here. If client authentication * is configured as mandatory, then we can only proceed if the * CA list doesn't have to be changed (OpenSSL doesn't provide * an option to change the list for an existing session). */ if ((r->server != handshakeserver) && renegotiate && ((verify & SSL_VERIFY_PEER) || (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT))) { #define MODSSL_CFG_CA_NE(f, sc1, sc2) \ (sc1->server->auth.f && \ (!sc2->server->auth.f || \ strNE(sc1->server->auth.f, sc2->server->auth.f))) if (MODSSL_CFG_CA_NE(ca_cert_file, sc, hssc) || MODSSL_CFG_CA_NE(ca_cert_path, sc, hssc)) { if (verify & SSL_VERIFY_FAIL_IF_NO_PEER_CERT) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02256) "Non-default virtual host with SSLVerify set to " "'require' and VirtualHost-specific CA certificate " "list is only available to clients with TLS server " "name indication (SNI) support"); SSL_set_verify(ssl, verify_old, NULL); return HTTP_FORBIDDEN; } else /* let it pass, possibly with an "incorrect" peer cert, * so make sure the SSL_CLIENT_VERIFY environment variable * will indicate partial success only, later on. */ sslconn->verify_info = "GENEROUS"; } } } /* If a renegotiation is now required for this location, and the * request includes a message body (and the client has not * requested a "100 Continue" response), then the client will be * streaming the request body over the wire already. In that * case, it is not possible to stop and perform a new SSL * handshake immediately; once the SSL library moves to the * "accept" state, it will reject the SSL packets which the client * is sending for the request body. * * To allow authentication to complete in this auth hook, the * solution used here is to fill a (bounded) buffer with the * request body, and then to reinject that request body later. */ if (renegotiate && !renegotiate_quick && (apr_table_get(r->headers_in, "transfer-encoding") || (apr_table_get(r->headers_in, "content-length") && strcmp(apr_table_get(r->headers_in, "content-length"), "0"))) && !r->expecting_100) { int rv; apr_size_t rsize; rsize = dc->nRenegBufferSize == UNSET ? DEFAULT_RENEG_BUFFER_SIZE : dc->nRenegBufferSize; if (rsize > 0) { /* Fill the I/O buffer with the request body if possible. */ rv = ssl_io_buffer_fill(r, rsize); } else { /* If the reneg buffer size is set to zero, just fail. */ rv = HTTP_REQUEST_ENTITY_TOO_LARGE; } if (rv) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02257) "could not buffer message body to allow " "SSL renegotiation to proceed"); return rv; } } /* * now do the renegotiation if anything was actually reconfigured */ if (renegotiate) { /* * Now we force the SSL renegotiation by sending the Hello Request * message to the client. Here we have to do a workaround: Actually * OpenSSL returns immediately after sending the Hello Request (the * intent AFAIK is because the SSL/TLS protocol says it's not a must * that the client replies to a Hello Request). But because we insist * on a reply (anything else is an error for us) we have to go to the * ACCEPT state manually. Using SSL_set_accept_state() doesn't work * here because it resets too much of the connection. So we set the * state explicitly and continue the handshake manually. */ ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02221) "Requesting connection re-negotiation"); if (renegotiate_quick) { STACK_OF(X509) *cert_stack; /* perform just a manual re-verification of the peer */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02258) "Performing quick renegotiation: " "just re-verifying the peer"); cert_stack = (STACK_OF(X509) *)SSL_get_peer_cert_chain(ssl); cert = SSL_get_peer_certificate(ssl); if (!cert_stack && cert) { /* client cert is in the session cache, but there is * no chain, since ssl3_get_client_certificate() * sk_X509_shift-ed the peer cert out of the chain. * we put it back here for the purpose of quick_renegotiation. */ cert_stack = sk_X509_new_null(); sk_X509_push(cert_stack, cert); } if (!cert_stack || (sk_X509_num(cert_stack) == 0)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02222) "Cannot find peer certificate chain"); return HTTP_FORBIDDEN; } if (!(cert_store || (cert_store = SSL_CTX_get_cert_store(ctx)))) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02223) "Cannot find certificate storage"); return HTTP_FORBIDDEN; } if (!cert) { cert = sk_X509_value(cert_stack, 0); } cert_store_ctx = X509_STORE_CTX_new(); X509_STORE_CTX_init(cert_store_ctx, cert_store, cert, cert_stack); depth = SSL_get_verify_depth(ssl); if (depth >= 0) { X509_STORE_CTX_set_depth(cert_store_ctx, depth); } X509_STORE_CTX_set_ex_data(cert_store_ctx, SSL_get_ex_data_X509_STORE_CTX_idx(), (char *)ssl); if (!X509_verify_cert(cert_store_ctx)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02224) "Re-negotiation verification step failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); } SSL_set_verify_result(ssl, X509_STORE_CTX_get_error(cert_store_ctx)); X509_STORE_CTX_cleanup(cert_store_ctx); X509_STORE_CTX_free(cert_store_ctx); if (cert_stack != SSL_get_peer_cert_chain(ssl)) { /* we created this ourselves, so free it */ sk_X509_pop_free(cert_stack, X509_free); } } else { char peekbuf[1]; const char *reneg_support; request_rec *id = r->main ? r->main : r; /* Additional mitigation for CVE-2009-3555: At this point, * before renegotiating, an (entire) request has been read * from the connection. An attacker may have sent further * data to "prefix" any subsequent request by the victim's * client after the renegotiation; this data may already * have been read and buffered. Forcing a connection * closure after the response ensures such data will be * discarded. Legimately pipelined HTTP requests will be * retried anyway with this approach. */ if (has_buffered_data(r)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02259) "insecure SSL re-negotiation required, but " "a pipelined request is present; keepalive " "disabled"); r->connection->keepalive = AP_CONN_CLOSE; } #if defined(SSL_get_secure_renegotiation_support) reneg_support = SSL_get_secure_renegotiation_support(ssl) ? "client does" : "client does not"; #else reneg_support = "server does not"; #endif /* Perform a full renegotiation. */ ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, APLOGNO(02260) "Performing full renegotiation: complete handshake " "protocol (%s support secure renegotiation)", reneg_support); SSL_set_session_id_context(ssl, (unsigned char *)&id, sizeof(id)); /* Toggle the renegotiation state to allow the new * handshake to proceed. */ sslconn->reneg_state = RENEG_ALLOW; SSL_renegotiate(ssl); SSL_do_handshake(ssl); if (!SSL_is_init_finished(ssl)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02225) "Re-negotiation request failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); r->connection->keepalive = AP_CONN_CLOSE; return HTTP_FORBIDDEN; } ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02226) "Awaiting re-negotiation handshake"); /* XXX: Should replace setting state with SSL_renegotiate(ssl); * However, this causes failures in perl-framework currently, * perhaps pre-test if we have already negotiated? */ /* Need to trigger renegotiation handshake by reading. * Peeking 0 bytes actually works. * See: http://marc.info/?t=145493359200002&r=1&w=2 */ SSL_peek(ssl, peekbuf, 0); sslconn->reneg_state = RENEG_REJECT; if (!SSL_is_init_finished(ssl)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02261) "Re-negotiation handshake failed"); ssl_log_ssl_error(SSLLOG_MARK, APLOG_ERR, r->server); r->connection->keepalive = AP_CONN_CLOSE; return HTTP_FORBIDDEN; } /* Full renegotiation successfull, we now have handshaken with * this server's parameters. */ sslconn->server = r->server; } /* * Remember the peer certificate's DN */ if ((cert = SSL_get_peer_certificate(ssl))) { if (sslconn->client_cert) { X509_free(sslconn->client_cert); } sslconn->client_cert = cert; sslconn->client_dn = NULL; } /* * Finally check for acceptable renegotiation results */ if ((dc->nVerifyClient != SSL_CVERIFY_NONE) || (sc->server->auth.verify_mode != SSL_CVERIFY_NONE)) { BOOL do_verify = ((dc->nVerifyClient == SSL_CVERIFY_REQUIRE) || (sc->server->auth.verify_mode == SSL_CVERIFY_REQUIRE)); if (do_verify && (SSL_get_verify_result(ssl) != X509_V_OK)) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02262) "Re-negotiation handshake failed: " "Client verification failed"); return HTTP_FORBIDDEN; } if (do_verify) { if ((peercert = SSL_get_peer_certificate(ssl)) == NULL) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02263) "Re-negotiation handshake failed: " "Client certificate missing"); return HTTP_FORBIDDEN; } X509_free(peercert); } } /* * Also check that SSLCipherSuite has been enforced as expected. */ if (cipher_list) { cipher = SSL_get_current_cipher(ssl); if (sk_SSL_CIPHER_find(cipher_list, cipher) < 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02264) "SSL cipher suite not renegotiated: " "access to %s denied using cipher %s", r->filename, SSL_CIPHER_get_name(cipher)); return HTTP_FORBIDDEN; } } /* remember any new cipher suite used in renegotiation */ if (ncipher_suite) { sslconn->cipher_suite = ncipher_suite; } } /* If we're trying to have the user name set from a client * certificate then we need to set it here. This should be safe as * the user name probably isn't important from an auth checking point * of view as the certificate supplied acts in that capacity. * However, if FakeAuth is being used then this isn't the case so * we need to postpone setting the username until later. */ if ((dc->nOptions & SSL_OPT_FAKEBASICAUTH) == 0 && dc->szUserName) { char *val = ssl_var_lookup(r->pool, r->server, r->connection, r, (char *)dc->szUserName); if (val && val[0]) r->user = val; else ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02227) "Failed to set r->user to '%s'", dc->szUserName); } /* * Check SSLRequire boolean expressions */ requires = dc->aRequirement; ssl_requires = (ssl_require_t *)requires->elts; for (i = 0; i < requires->nelts; i++) { ssl_require_t *req = &ssl_requires[i]; const char *errstring; ok = ap_expr_exec(r, req->mpExpr, &errstring); if (ok < 0) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02265) "access to %s failed, reason: Failed to execute " "SSL requirement expression: %s", r->filename, errstring); /* remember forbidden access for strict require option */ apr_table_setn(r->notes, "ssl-access-forbidden", "1"); return HTTP_FORBIDDEN; } if (ok != 1) { ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02266) "Access to %s denied for %s " "(requirement expression not fulfilled)", r->filename, r->useragent_ip); ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02228) "Failed expression: %s", req->cpExpr); ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02229) "access to %s failed, reason: %s", r->filename, "SSL requirement expression not fulfilled"); /* remember forbidden access for strict require option */ apr_table_setn(r->notes, "ssl-access-forbidden", "1"); return HTTP_FORBIDDEN; } } /* * Else access is granted from our point of view (except vendor * handlers override). But we have to return DECLINED here instead * of OK, because mod_auth and other modules still might want to * deny access. */ return DECLINED; } /* * Authentication Handler: * Fake a Basic authentication from the X509 client certificate. * * This must be run fairly early on to prevent a real authentication from * occuring, in particular it must be run before anything else that * authenticates a user. This means that the Module statement for this * module should be LAST in the Configuration file. */ int ssl_hook_UserCheck(request_rec *r) { SSLConnRec *sslconn = myConnConfig(r->connection); SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLDirConfigRec *dc = myDirConfig(r); char *user; const char *auth_line, *username, *password; /* * Additionally forbid access (again) * when strict require option is used. */ if ((dc->nOptions & SSL_OPT_STRICTREQUIRE) && (apr_table_get(r->notes, "ssl-access-forbidden"))) { return HTTP_FORBIDDEN; } /* * We decline when we are in a subrequest. The Authorization header * would already be present if it was added in the main request. */ if (!ap_is_initial_req(r)) { return DECLINED; } /* * Make sure the user is not able to fake the client certificate * based authentication by just entering an X.509 Subject DN * ("/XX=YYY/XX=YYY/..") as the username and "password" as the * password. */ if ((auth_line = apr_table_get(r->headers_in, "Authorization"))) { if (strcEQ(ap_getword(r->pool, &auth_line, ' '), "Basic")) { while ((*auth_line == ' ') || (*auth_line == '\t')) { auth_line++; } auth_line = ap_pbase64decode(r->pool, auth_line); username = ap_getword_nulls(r->pool, &auth_line, ':'); password = auth_line; if ((username[0] == '/') && strEQ(password, "password")) { ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, APLOGNO(02035) "Encountered FakeBasicAuth spoof: %s", username); return HTTP_FORBIDDEN; } } } /* * We decline operation in various situations... * - SSLOptions +FakeBasicAuth not configured * - r->user already authenticated * - ssl not enabled * - client did not present a certificate */ if (!((sc->enabled == SSL_ENABLED_TRUE || sc->enabled == SSL_ENABLED_OPTIONAL) && sslconn && sslconn->ssl && sslconn->client_cert) || !(dc->nOptions & SSL_OPT_FAKEBASICAUTH) || r->user) { return DECLINED; } if (!sslconn->client_dn) { X509_NAME *name = X509_get_subject_name(sslconn->client_cert); char *cp = X509_NAME_oneline(name, NULL, 0); sslconn->client_dn = apr_pstrdup(r->connection->pool, cp); OPENSSL_free(cp); } /* use SSLUserName if defined, otherwise use the full client DN */ if (dc->szUserName) { user = ssl_var_lookup(r->pool, r->server, r->connection, r, (char *)dc->szUserName); if (!user || !user[0]) { ap_log_rerror( APLOG_MARK, APLOG_WARNING, 0, r, APLOGNO(02434) "Failed to set FakeBasicAuth username to '%s', did not exist in certificate", dc->szUserName); return DECLINED; } } else { user = (char *)sslconn->client_dn; } /* * Fake a password - which one would be immaterial, as, it seems, an empty * password in the users file would match ALL incoming passwords, if only * we were using the standard crypt library routine. Unfortunately, OpenSSL * "fixes" a "bug" in crypt and thus prevents blank passwords from * working. (IMHO what they really fix is a bug in the users of the code * - failing to program correctly for shadow passwords). We need, * therefore, to provide a password. This password can be matched by * adding the string "xxj31ZMTZzkVA" as the password in the user file. * This is just the crypted variant of the word "password" ;-) */ auth_line = apr_pstrcat(r->pool, "Basic ", ap_pbase64encode(r->pool, apr_pstrcat(r->pool, user, ":password", NULL)), NULL); apr_table_setn(r->headers_in, "Authorization", auth_line); ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, APLOGNO(02036) "Faking HTTP Basic Auth header: \"Authorization: %s\"", auth_line); return DECLINED; } /* authorization phase */ int ssl_hook_Auth(request_rec *r) { SSLDirConfigRec *dc = myDirConfig(r); /* * Additionally forbid access (again) * when strict require option is used. */ if ((dc->nOptions & SSL_OPT_STRICTREQUIRE) && (apr_table_get(r->notes, "ssl-access-forbidden"))) { return HTTP_FORBIDDEN; } return DECLINED; } /* * Fixup Handler */ static const char *const ssl_hook_Fixup_vars[] = { "SSL_VERSION_INTERFACE", "SSL_VERSION_LIBRARY", "SSL_PROTOCOL", "SSL_SECURE_RENEG", "SSL_COMPRESS_METHOD", "SSL_CIPHER", "SSL_CIPHER_EXPORT", "SSL_CIPHER_USEKEYSIZE", "SSL_CIPHER_ALGKEYSIZE", "SSL_CLIENT_VERIFY", "SSL_CLIENT_M_VERSION", "SSL_CLIENT_M_SERIAL", "SSL_CLIENT_V_START", "SSL_CLIENT_V_END", "SSL_CLIENT_V_REMAIN", "SSL_CLIENT_S_DN", "SSL_CLIENT_I_DN", "SSL_CLIENT_A_KEY", "SSL_CLIENT_A_SIG", "SSL_CLIENT_CERT_RFC4523_CEA", "SSL_SERVER_M_VERSION", "SSL_SERVER_M_SERIAL", "SSL_SERVER_V_START", "SSL_SERVER_V_END", "SSL_SERVER_S_DN", "SSL_SERVER_I_DN", "SSL_SERVER_A_KEY", "SSL_SERVER_A_SIG", "SSL_SESSION_ID", "SSL_SESSION_RESUMED", #ifdef HAVE_SRP "SSL_SRP_USER", "SSL_SRP_USERINFO", #endif NULL }; int ssl_hook_Fixup(request_rec *r) { SSLConnRec *sslconn = myConnConfig(r->connection); SSLSrvConfigRec *sc = mySrvConfig(r->server); SSLDirConfigRec *dc = myDirConfig(r); apr_table_t *env = r->subprocess_env; char *var, *val = ""; #ifdef HAVE_TLSEXT const char *servername; #endif STACK_OF(X509) *peer_certs; SSL *ssl; int i; if (!(sslconn && sslconn->ssl) && r->connection->master) { sslconn = myConnConfig(r->connection->master); } /* * Check to see if SSL is on */ if (!(((sc->enabled == SSL_ENABLED_TRUE) || (sc->enabled == SSL_ENABLED_OPTIONAL)) && sslconn && (ssl = sslconn->ssl))) { return DECLINED; } /* * Annotate the SSI/CGI environment with standard SSL information */ /* the always present HTTPS (=HTTP over SSL) flag! */ apr_table_setn(env, "HTTPS", "on"); #ifdef HAVE_TLSEXT /* add content of SNI TLS extension (if supplied with ClientHello) */ if ((servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name))) { apr_table_set(env, "SSL_TLS_SNI", servername); } #endif /* standard SSL environment variables */ if (dc->nOptions & SSL_OPT_STDENVVARS) { modssl_var_extract_dns(env, ssl, r->pool); modssl_var_extract_san_entries(env, ssl, r->pool); for (i = 0; ssl_hook_Fixup_vars[i]; i++) { var = (char *)ssl_hook_Fixup_vars[i]; val = ssl_var_lookup(r->pool, r->server, r->connection, r, var); if (!strIsEmpty(val)) { apr_table_setn(env, var, val); } } } /* * On-demand bloat up the SSI/CGI environment with certificate data */ if (dc->nOptions & SSL_OPT_EXPORTCERTDATA) { val = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_SERVER_CERT"); apr_table_setn(env, "SSL_SERVER_CERT", val); val = ssl_var_lookup(r->pool, r->server, r->connection, r, "SSL_CLIENT_CERT"); apr_table_setn(env, "SSL_CLIENT_CERT", val); if ((peer_certs = (STACK_OF(X509) *)SSL_get_peer_cert_chain(ssl))) { for (i = 0; i < sk_X509_num(peer_certs); i++) { var = apr_psprintf(r->pool, "SSL_CLIENT_CERT_CHAIN_%d", i); val = ssl_var_lookup(r->pool, r->server, r->connection, r, var); if (val) { apr_table_setn(env, var, val); } } } } #ifdef SSL_get_secure_renegotiation_support apr_table_setn(r->notes, "ssl-secure-reneg", SSL_get_secure_renegotiation_support(ssl) ? "1" : "0"); #endif return DECLINED; } /* _________________________________________________________________ ** ** Authz providers for use with mod_authz_core ** _________________________________________________________________ */ static authz_status ssl_authz_require_ssl_check(request_rec *r, const char *require_line, const void *parsed) { SSLConnRec *sslconn = myConnConfig(r->connection); SSL *ssl = sslconn ? sslconn->ssl : NULL; if (ssl) return AUTHZ_GRANTED; else return AUTHZ_DENIED; } static const char *ssl_authz_require_ssl_parse(cmd_parms *cmd, const char *require_line, const void **parsed) { if (require_line && require_line[0]) return "'Require ssl' does not take arguments"; return NULL; } const authz_provider ssl_authz_provider_require_ssl = { &ssl_authz_require_ssl_check, &ssl_authz_require_ssl_parse, }; static authz_status ssl_authz_verify_client_check(request_rec *r, const char *require_line, const void *parsed) { SSLConnRec *sslconn = myConnConfig(r->connection); SSL *ssl = sslconn ? sslconn->ssl : NULL; if (!ssl) return AUTHZ_DENIED; if (sslconn->verify_error == NULL && sslconn->verify_info == NULL && SSL_get_verify_result(ssl) == X509_V_OK) { X509 *xs = SSL_get_peer_certificate(ssl); if (xs) { X509_free(xs); return AUTHZ_GRANTED; } else { X509_free(xs); } } return AUTHZ_DENIED; } static const char *ssl_authz_verify_client_parse(cmd_parms *cmd, const char *require_line, const void **parsed) { if (require_line && require_line[0]) return "'Require ssl-verify-client' does not take arguments"; return NULL; } const authz_provider ssl_authz_provider_verify_client = { &ssl_authz_verify_client_check, &ssl_authz_verify_client_parse, }; /* _________________________________________________________________ ** ** OpenSSL Callback Functions ** _________________________________________________________________ */ /* * Hand out standard DH parameters, based on the authentication strength */ DH *ssl_callback_TmpDH(SSL *ssl, int export, int keylen) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); EVP_PKEY *pkey; int type; #ifdef SSL_CERT_SET_SERVER /* * When multiple certs/keys are configured for the SSL_CTX: make sure * that we get the private key which is indeed used for the current * SSL connection (available in OpenSSL 1.0.2 or later only) */ SSL_set_current_cert(ssl, SSL_CERT_SET_SERVER); #endif pkey = SSL_get_privatekey(ssl); #if OPENSSL_VERSION_NUMBER < 0x10100000L type = pkey ? EVP_PKEY_type(pkey->type) : EVP_PKEY_NONE; #else type = pkey ? EVP_PKEY_base_id(pkey) : EVP_PKEY_NONE; #endif /* * OpenSSL will call us with either keylen == 512 or keylen == 1024 * (see the definition of SSL_EXPORT_PKEYLENGTH in ssl_locl.h). * Adjust the DH parameter length according to the size of the * RSA/DSA private key used for the current connection, and always * use at least 1024-bit parameters. * Note: This may cause interoperability issues with implementations * which limit their DH support to 1024 bit - e.g. Java 7 and earlier. * In this case, SSLCertificateFile can be used to specify fixed * 1024-bit DH parameters (with the effect that OpenSSL skips this * callback). */ if ((type == EVP_PKEY_RSA) || (type == EVP_PKEY_DSA)) { keylen = EVP_PKEY_bits(pkey); } ap_log_cerror(APLOG_MARK, APLOG_TRACE2, 0, c, "handing out built-in DH parameters for %d-bit authenticated connection", keylen); return modssl_get_dh_params(keylen); } /* * This OpenSSL callback function is called when OpenSSL * does client authentication and verifies the certificate chain. */ int ssl_callback_SSLVerify(int ok, X509_STORE_CTX *ctx) { /* Get Apache context back through OpenSSL context */ SSL *ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()); conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); request_rec *r = (request_rec *)modssl_get_app_data2(ssl); server_rec *s = r ? r->server : mySrvFromConn(conn); SSLSrvConfigRec *sc = mySrvConfig(s); SSLConnRec *sslconn = myConnConfig(conn); SSLDirConfigRec *dc = r ? myDirConfig(r) : sslconn->dc; modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); int crl_check_mode = mctx->crl_check_mask & ~SSL_CRLCHECK_FLAGS; /* Get verify ingredients */ int errnum = X509_STORE_CTX_get_error(ctx); int errdepth = X509_STORE_CTX_get_error_depth(ctx); int depth, verify; /* * Log verification information */ ssl_log_cxerror(SSLLOG_MARK, APLOG_DEBUG, 0, conn, X509_STORE_CTX_get_current_cert(ctx), APLOGNO(02275) "Certificate Verification, depth %d, " "CRL checking mode: %s (%x)", errdepth, crl_check_mode == SSL_CRLCHECK_CHAIN ? "chain" : crl_check_mode == SSL_CRLCHECK_LEAF ? "leaf" : "none", mctx->crl_check_mask); /* * Check for optionally acceptable non-verifiable issuer situation */ if (dc && (dc->nVerifyClient != SSL_CVERIFY_UNSET)) { verify = dc->nVerifyClient; } else { verify = mctx->auth.verify_mode; } if (verify == SSL_CVERIFY_NONE) { /* * SSLProxyVerify is either not configured or set to "none". * (this callback doesn't happen in the server context if SSLVerify * is not configured or set to "none") */ return TRUE; } if (ssl_verify_error_is_optional(errnum) && (verify == SSL_CVERIFY_OPTIONAL_NO_CA)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, conn, APLOGNO(02037) "Certificate Verification: Verifiable Issuer is " "configured as optional, therefore we're accepting " "the certificate"); sslconn->verify_info = "GENEROUS"; ok = TRUE; } /* * Expired certificates vs. "expired" CRLs: by default, OpenSSL * turns X509_V_ERR_CRL_HAS_EXPIRED into a "certificate_expired(45)" * SSL alert, but that's not really the message we should convey to the * peer (at the very least, it's confusing, and in many cases, it's also * inaccurate, as the certificate itself may very well not have expired * yet). We set the X509_STORE_CTX error to something which OpenSSL's * s3_both.c:ssl_verify_alarm_type() maps to SSL_AD_CERTIFICATE_UNKNOWN, * i.e. the peer will receive a "certificate_unknown(46)" alert. * We do not touch errnum, though, so that later on we will still log * the "real" error, as returned by OpenSSL. */ if (!ok && errnum == X509_V_ERR_CRL_HAS_EXPIRED) { X509_STORE_CTX_set_error(ctx, -1); } if (!ok && errnum == X509_V_ERR_UNABLE_TO_GET_CRL && (mctx->crl_check_mask & SSL_CRLCHECK_NO_CRL_FOR_CERT_OK)) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, conn, "Certificate Verification: Temporary error (%d): %s: " "optional therefore we're accepting the certificate", errnum, X509_verify_cert_error_string(errnum)); X509_STORE_CTX_set_error(ctx, X509_V_OK); errnum = X509_V_OK; ok = TRUE; } #ifndef OPENSSL_NO_OCSP /* * Perform OCSP-based revocation checks */ if (ok && sc->server->ocsp_enabled == TRUE) { /* If there was an optional verification error, it's not * possible to perform OCSP validation since the issuer may be * missing/untrusted. Fail in that case. */ if (ssl_verify_error_is_optional(errnum)) { X509_STORE_CTX_set_error(ctx, X509_V_ERR_APPLICATION_VERIFICATION); errnum = X509_V_ERR_APPLICATION_VERIFICATION; ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, conn, APLOGNO(02038) "cannot perform OCSP validation for cert " "if issuer has not been verified " "(optional_no_ca configured)"); ok = FALSE; } else { ok = modssl_verify_ocsp(ctx, sc, s, conn, conn->pool); if (!ok) { errnum = X509_STORE_CTX_get_error(ctx); } } } #endif /* * If we already know it's not ok, log the real reason */ if (!ok) { if (APLOGcinfo(conn)) { ssl_log_cxerror(SSLLOG_MARK, APLOG_INFO, 0, conn, X509_STORE_CTX_get_current_cert(ctx), APLOGNO(02276) "Certificate Verification: Error (%d): %s", errnum, X509_verify_cert_error_string(errnum)); } else { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, conn, APLOGNO(02039) "Certificate Verification: Error (%d): %s", errnum, X509_verify_cert_error_string(errnum)); } if (sslconn->client_cert) { X509_free(sslconn->client_cert); sslconn->client_cert = NULL; } sslconn->client_dn = NULL; sslconn->verify_error = X509_verify_cert_error_string(errnum); } /* * Finally check the depth of the certificate verification */ if (dc && (dc->nVerifyDepth != UNSET)) { depth = dc->nVerifyDepth; } else { depth = mctx->auth.verify_depth; } if (errdepth > depth) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, conn, APLOGNO(02040) "Certificate Verification: Certificate Chain too long " "(chain has %d certificates, but maximum allowed are " "only %d)", errdepth, depth); errnum = X509_V_ERR_CERT_CHAIN_TOO_LONG; sslconn->verify_error = X509_verify_cert_error_string(errnum); ok = FALSE; } /* * And finally signal OpenSSL the (perhaps changed) state */ return ok; } #define SSLPROXY_CERT_CB_LOG_FMT \ "Proxy client certificate callback: (%s) " static void modssl_proxy_info_log(conn_rec *c, X509_INFO *info, const char *msg) { ssl_log_cxerror(SSLLOG_MARK, APLOG_DEBUG, 0, c, info->x509, APLOGNO(02277) SSLPROXY_CERT_CB_LOG_FMT "%s, sending", (mySrvConfigFromConn(c))->vhost_id, msg); } /* * caller will decrement the cert and key reference * so we need to increment here to prevent them from * being freed. */ #if OPENSSL_VERSION_NUMBER < 0x10100000L #define modssl_set_cert_info(info, cert, pkey) \ *cert = info->x509; \ CRYPTO_add(&(*cert)->references, +1, CRYPTO_LOCK_X509); \ *pkey = info->x_pkey->dec_pkey; \ CRYPTO_add(&(*pkey)->references, +1, CRYPTO_LOCK_X509_PKEY) #else #define modssl_set_cert_info(info, cert, pkey) \ *cert = info->x509; \ X509_up_ref(*cert); \ *pkey = info->x_pkey->dec_pkey; \ EVP_PKEY_up_ref(*pkey); #endif int ssl_callback_proxy_cert(SSL *ssl, X509 **x509, EVP_PKEY **pkey) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(c); SSLSrvConfigRec *sc = mySrvConfig(s); SSLDirConfigRec *dc = myDirConfigFromConn(c); X509_NAME *ca_name, *issuer, *ca_issuer; X509_INFO *info; X509 *ca_cert; STACK_OF(X509_NAME) *ca_list; STACK_OF(X509_INFO) *certs; STACK_OF(X509) *ca_certs; STACK_OF(X509) **ca_cert_chains; int i, j, k; ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02267) SSLPROXY_CERT_CB_LOG_FMT "entered", sc->vhost_id); certs = (dc && dc->proxy) ? dc->proxy->pkp->certs : NULL; if (!certs || (sk_X509_INFO_num(certs) <= 0)) { ap_log_error(APLOG_MARK, APLOG_WARNING, 0, s, APLOGNO(02268) SSLPROXY_CERT_CB_LOG_FMT "downstream server wanted client certificate " "but none are configured", sc->vhost_id); return FALSE; } ca_list = SSL_get_client_CA_list(ssl); if (!ca_list || (sk_X509_NAME_num(ca_list) <= 0)) { /* * downstream server didn't send us a list of acceptable CA certs, * so we send the first client cert in the list. */ info = sk_X509_INFO_value(certs, 0); modssl_proxy_info_log(c, info, APLOGNO(02278) "no acceptable CA list"); modssl_set_cert_info(info, x509, pkey); return TRUE; } ca_cert_chains = dc->proxy->pkp->ca_certs; for (i = 0; i < sk_X509_NAME_num(ca_list); i++) { ca_name = sk_X509_NAME_value(ca_list, i); for (j = 0; j < sk_X509_INFO_num(certs); j++) { info = sk_X509_INFO_value(certs, j); issuer = X509_get_issuer_name(info->x509); /* Search certs (by issuer name) one by one*/ if (X509_NAME_cmp(issuer, ca_name) == 0) { modssl_proxy_info_log(c, info, APLOGNO(02279) "found acceptable cert"); modssl_set_cert_info(info, x509, pkey); return TRUE; } if (ca_cert_chains) { /* * Failed to find direct issuer - search intermediates * (by issuer name), if provided. */ ca_certs = ca_cert_chains[j]; for (k = 0; k < sk_X509_num(ca_certs); k++) { ca_cert = sk_X509_value(ca_certs, k); ca_issuer = X509_get_issuer_name(ca_cert); if(X509_NAME_cmp(ca_issuer, ca_name) == 0 ) { modssl_proxy_info_log(c, info, APLOGNO(02280) "found acceptable cert by intermediate CA"); modssl_set_cert_info(info, x509, pkey); return TRUE; } } /* end loop through chained certs */ } } /* end loop through available certs */ } ap_log_error(APLOG_MARK, APLOG_DEBUG, 0, s, APLOGNO(02269) SSLPROXY_CERT_CB_LOG_FMT "no client certificate found!?", sc->vhost_id); return FALSE; } static void ssl_session_log(server_rec *s, const char *request, IDCONST unsigned char *id, unsigned int idlen, const char *status, const char *result, long timeout) { char buf[MODSSL_SESSION_ID_STRING_LEN]; char timeout_str[56] = {'\0'}; if (!APLOGdebug(s)) { return; } if (timeout) { apr_snprintf(timeout_str, sizeof(timeout_str), "timeout=%lds ", timeout); } ap_log_error(APLOG_MARK, APLOG_TRACE2, 0, s, "Inter-Process Session Cache: " "request=%s status=%s id=%s %s(session %s)", request, status, modssl_SSL_SESSION_id2sz(id, idlen, buf, sizeof(buf)), timeout_str, result); } /* * This callback function is executed by OpenSSL whenever a new SSL_SESSION is * added to the internal OpenSSL session cache. We use this hook to spread the * SSL_SESSION also to the inter-process disk-cache to make share it with our * other Apache pre-forked server processes. */ int ssl_callback_NewSessionCacheEntry(SSL *ssl, SSL_SESSION *session) { /* Get Apache context back through OpenSSL context */ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(conn); SSLSrvConfigRec *sc = mySrvConfig(s); long timeout = sc->session_cache_timeout; BOOL rc; IDCONST unsigned char *id; unsigned int idlen; /* * Set the timeout also for the internal OpenSSL cache, because this way * our inter-process cache is consulted only when it's really necessary. */ SSL_set_timeout(session, timeout); /* * Store the SSL_SESSION in the inter-process cache with the * same expire time, so it expires automatically there, too. */ #ifdef OPENSSL_NO_SSL_INTERN id = (unsigned char *)SSL_SESSION_get_id(session, &idlen); #else id = session->session_id; idlen = session->session_id_length; #endif rc = ssl_scache_store(s, id, idlen, apr_time_from_sec(SSL_SESSION_get_time(session) + timeout), session, conn->pool); ssl_session_log(s, "SET", id, idlen, rc == TRUE ? "OK" : "BAD", "caching", timeout); /* * return 0 which means to OpenSSL that the session is still * valid and was not freed by us with SSL_SESSION_free(). */ return 0; } /* * This callback function is executed by OpenSSL whenever a * SSL_SESSION is looked up in the internal OpenSSL cache and it * was not found. We use this to lookup the SSL_SESSION in the * inter-process disk-cache where it was perhaps stored by one * of our other Apache pre-forked server processes. */ SSL_SESSION *ssl_callback_GetSessionCacheEntry(SSL *ssl, IDCONST unsigned char *id, int idlen, int *do_copy) { /* Get Apache context back through OpenSSL context */ conn_rec *conn = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(conn); SSL_SESSION *session; /* * Try to retrieve the SSL_SESSION from the inter-process cache */ session = ssl_scache_retrieve(s, id, idlen, conn->pool); ssl_session_log(s, "GET", id, idlen, session ? "FOUND" : "MISSED", session ? "reuse" : "renewal", 0); /* * Return NULL or the retrieved SSL_SESSION. But indicate (by * setting do_copy to 0) that the reference count on the * SSL_SESSION should not be incremented by the SSL library, * because we will no longer hold a reference to it ourself. */ *do_copy = 0; return session; } /* * This callback function is executed by OpenSSL whenever a * SSL_SESSION is removed from the internal OpenSSL cache. * We use this to remove the SSL_SESSION in the inter-process * disk-cache, too. */ void ssl_callback_DelSessionCacheEntry(SSL_CTX *ctx, SSL_SESSION *session) { server_rec *s; SSLSrvConfigRec *sc; IDCONST unsigned char *id; unsigned int idlen; /* * Get Apache context back through OpenSSL context */ if (!(s = (server_rec *)SSL_CTX_get_app_data(ctx))) { return; /* on server shutdown Apache is already gone */ } sc = mySrvConfig(s); /* * Remove the SSL_SESSION from the inter-process cache */ #ifdef OPENSSL_NO_SSL_INTERN id = (unsigned char *)SSL_SESSION_get_id(session, &idlen); #else id = session->session_id; idlen = session->session_id_length; #endif /* TODO: Do we need a temp pool here, or are we always shutting down? */ ssl_scache_remove(s, id, idlen, sc->mc->pPool); ssl_session_log(s, "REM", id, idlen, "OK", "dead", 0); return; } /* Dump debugginfo trace to the log file. */ static void log_tracing_state(const SSL *ssl, conn_rec *c, server_rec *s, int where, int rc) { /* * create the various trace messages */ if (where & SSL_CB_HANDSHAKE_START) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Handshake: start", MODSSL_LIBRARY_NAME); } else if (where & SSL_CB_HANDSHAKE_DONE) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Handshake: done", MODSSL_LIBRARY_NAME); } else if (where & SSL_CB_LOOP) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Loop: %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (where & SSL_CB_READ) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Read: %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (where & SSL_CB_WRITE) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Write: %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (where & SSL_CB_ALERT) { char *str = (where & SSL_CB_READ) ? "read" : "write"; ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Alert: %s:%s:%s", MODSSL_LIBRARY_NAME, str, SSL_alert_type_string_long(rc), SSL_alert_desc_string_long(rc)); } else if (where & SSL_CB_EXIT) { if (rc == 0) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Exit: failed in %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } else if (rc < 0) { ap_log_cerror(APLOG_MARK, APLOG_TRACE3, 0, c, "%s: Exit: error in %s", MODSSL_LIBRARY_NAME, SSL_state_string_long(ssl)); } } /* * Because SSL renegotiations can happen at any time (not only after * SSL_accept()), the best way to log the current connection details is * right after a finished handshake. */ if (where & SSL_CB_HANDSHAKE_DONE) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02041) "Protocol: %s, Cipher: %s (%s/%s bits)", ssl_var_lookup(NULL, s, c, NULL, "SSL_PROTOCOL"), ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER"), ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER_USEKEYSIZE"), ssl_var_lookup(NULL, s, c, NULL, "SSL_CIPHER_ALGKEYSIZE")); } } /* * This callback function is executed while OpenSSL processes the SSL * handshake and does SSL record layer stuff. It's used to trap * client-initiated renegotiations, and for dumping everything to the * log. */ void ssl_callback_Info(const SSL *ssl, int where, int rc) { conn_rec *c; server_rec *s; SSLConnRec *scr; /* Retrieve the conn_rec and the associated SSLConnRec. */ if ((c = (conn_rec *)SSL_get_app_data((SSL *)ssl)) == NULL) { return; } if ((scr = myConnConfig(c)) == NULL) { return; } /* If the reneg state is to reject renegotiations, check the SSL * state machine and move to ABORT if a Client Hello is being * read. */ if (!scr->is_proxy && (where & SSL_CB_HANDSHAKE_START) && scr->reneg_state == RENEG_REJECT) { scr->reneg_state = RENEG_ABORT; ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02042) "rejecting client initiated renegotiation"); } /* If the first handshake is complete, change state to reject any * subsequent client-initiated renegotiation. */ else if ((where & SSL_CB_HANDSHAKE_DONE) && scr->reneg_state == RENEG_INIT) { scr->reneg_state = RENEG_REJECT; } s = mySrvFromConn(c); if (s && APLOGdebug(s)) { log_tracing_state(ssl, c, s, where, rc); } } #ifdef HAVE_TLSEXT /* * This function sets the virtual host from an extended * client hello with a server name indication extension ("SNI", cf. RFC 6066). */ static apr_status_t init_vhost(conn_rec *c, SSL *ssl) { const char *servername; if (c) { SSLConnRec *sslcon = myConnConfig(c); if (sslcon->server != c->base_server) { /* already found the vhost */ return APR_SUCCESS; } servername = SSL_get_servername(ssl, TLSEXT_NAMETYPE_host_name); if (servername) { if (ap_vhost_iterate_given_conn(c, ssl_find_vhost, (void *)servername)) { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02043) "SSL virtual host for servername %s found", servername); return APR_SUCCESS; } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02044) "No matching SSL virtual host for servername " "%s found (using default/first virtual host)", servername); /* * RFC 6066 section 3 says "It is NOT RECOMMENDED to send * a warning-level unrecognized_name(112) alert, because * the client's behavior in response to warning-level alerts * is unpredictable." * * To maintain backwards compatibility in mod_ssl, we * no longer send any alert (neither warning- nor fatal-level), * i.e. we take the second action suggested in RFC 6066: * "If the server understood the ClientHello extension but * does not recognize the server name, the server SHOULD take * one of two actions: either abort the handshake by sending * a fatal-level unrecognized_name(112) alert or continue * the handshake." */ } } else { ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02645) "Server name not provided via TLS extension " "(using default/first virtual host)"); } } return APR_NOTFOUND; } /* * This callback function is executed when OpenSSL encounters an extended * client hello with a server name indication extension ("SNI", cf. RFC 6066). */ int ssl_callback_ServerNameIndication(SSL *ssl, int *al, modssl_ctx_t *mctx) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); apr_status_t status = init_vhost(c, ssl); return (status == APR_SUCCESS)? SSL_TLSEXT_ERR_OK : SSL_TLSEXT_ERR_NOACK; } /* * Find a (name-based) SSL virtual host where either the ServerName * or one of the ServerAliases matches the supplied name (to be used * with ap_vhost_iterate_given_conn()) */ static int ssl_find_vhost(void *servername, conn_rec *c, server_rec *s) { SSLSrvConfigRec *sc; SSL *ssl; BOOL found; SSLConnRec *sslcon; found = ssl_util_vhost_matches(servername, s); /* set SSL_CTX (if matched) */ sslcon = myConnConfig(c); if (found && (ssl = sslcon->ssl) && (sc = mySrvConfig(s))) { SSL_CTX *ctx = SSL_set_SSL_CTX(ssl, sc->server->ssl_ctx); /* * SSL_set_SSL_CTX() only deals with the server cert, * so we need to duplicate a few additional settings * from the ctx by hand */ SSL_set_options(ssl, SSL_CTX_get_options(ctx)); if ((SSL_get_verify_mode(ssl) == SSL_VERIFY_NONE) || (SSL_num_renegotiations(ssl) == 0)) { /* * Only initialize the verification settings from the ctx * if they are not yet set, or if we're called when a new * SSL connection is set up (num_renegotiations == 0). * Otherwise, we would possibly reset a per-directory * configuration which was put into effect by ssl_hook_Access. */ SSL_set_verify(ssl, SSL_CTX_get_verify_mode(ctx), SSL_CTX_get_verify_callback(ctx)); } /* * Adjust the session id context. ssl_init_ssl_connection() * always picks the configuration of the first vhost when * calling SSL_new(), but we want to tie the session to the * vhost we have just switched to. Again, we have to make sure * that we're not overwriting a session id context which was * possibly set in ssl_hook_Access(), before triggering * a renegotiation. */ if (SSL_num_renegotiations(ssl) == 0) { unsigned char *sid_ctx = (unsigned char *)ap_md5_binary(c->pool, (unsigned char *)sc->vhost_id, sc->vhost_id_len); SSL_set_session_id_context(ssl, sid_ctx, APR_MD5_DIGESTSIZE*2); } /* * Save the found server into our SSLConnRec for later * retrieval */ sslcon->server = s; sslcon->cipher_suite = sc->server->auth.cipher_suite; ap_update_child_status_from_server(c->sbh, SERVER_BUSY_READ, c, s); /* * There is one special filter callback, which is set * very early depending on the base_server's log level. * If this is not the first vhost we're now selecting * (and the first vhost doesn't use APLOG_TRACE4), then * we need to set that callback here. */ if (APLOGtrace4(s)) { BIO *rbio = SSL_get_rbio(ssl), *wbio = SSL_get_wbio(ssl); BIO_set_callback(rbio, ssl_io_data_cb); BIO_set_callback_arg(rbio, (void *)ssl); if (wbio && wbio != rbio) { BIO_set_callback(wbio, ssl_io_data_cb); BIO_set_callback_arg(wbio, (void *)ssl); } } return 1; } return 0; } #endif /* HAVE_TLSEXT */ #ifdef HAVE_TLS_SESSION_TICKETS /* * This callback function is executed when OpenSSL needs a key for encrypting/ * decrypting a TLS session ticket (RFC 5077) and a ticket key file has been * configured through SSLSessionTicketKeyFile. */ int ssl_callback_SessionTicket(SSL *ssl, unsigned char *keyname, unsigned char *iv, EVP_CIPHER_CTX *cipher_ctx, HMAC_CTX *hctx, int mode) { conn_rec *c = (conn_rec *)SSL_get_app_data(ssl); server_rec *s = mySrvFromConn(c); SSLSrvConfigRec *sc = mySrvConfig(s); SSLConnRec *sslconn = myConnConfig(c); modssl_ctx_t *mctx = myCtxConfig(sslconn, sc); modssl_ticket_key_t *ticket_key = mctx->ticket_key; if (mode == 1) { /* * OpenSSL is asking for a key for encrypting a ticket, * see s3_srvr.c:ssl3_send_newsession_ticket() */ if (ticket_key == NULL) { /* should never happen, but better safe than sorry */ return -1; } memcpy(keyname, ticket_key->key_name, 16); RAND_bytes(iv, EVP_MAX_IV_LENGTH); EVP_EncryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL, ticket_key->aes_key, iv); HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02289) "TLS session ticket key for %s successfully set, " "creating new session ticket", sc->vhost_id); return 1; } else if (mode == 0) { /* * OpenSSL is asking for the decryption key, * see t1_lib.c:tls_decrypt_ticket() */ /* check key name */ if (ticket_key == NULL || memcmp(keyname, ticket_key->key_name, 16)) { return 0; } EVP_DecryptInit_ex(cipher_ctx, EVP_aes_128_cbc(), NULL, ticket_key->aes_key, iv); HMAC_Init_ex(hctx, ticket_key->hmac_secret, 16, tlsext_tick_md(), NULL); ap_log_cerror(APLOG_MARK, APLOG_DEBUG, 0, c, APLOGNO(02290) "TLS session ticket key for %s successfully set, " "decrypting existing session ticket", sc->vhost_id); return 1; } /* OpenSSL is not expected to call us with modes other than 1 or 0 */ return -1; } #endif /* HAVE_TLS_SESSION_TICKETS */ #ifdef HAVE_TLS_ALPN /* * This callback function is executed when the TLS Application-Layer * Protocol Negotiation Extension (ALPN, RFC 7301) is triggered by the Client * Hello, giving a list of desired protocol names (in descending preference) * to the server. * The callback has to select a protocol name or return an error if none of * the clients preferences is supported. * The selected protocol does not have to be on the client list, according * to RFC 7301, so no checks are performed. * The client protocol list is serialized as length byte followed by ASCII * characters (not null-terminated), followed by the next protocol name. */ int ssl_callback_alpn_select(SSL *ssl, const unsigned char **out, unsigned char *outlen, const unsigned char *in, unsigned int inlen, void *arg) { conn_rec *c = (conn_rec*)SSL_get_app_data(ssl); SSLConnRec *sslconn = myConnConfig(c); apr_array_header_t *client_protos; const char *proposed; size_t len; int i; /* If the connection object is not available, * then there's nothing for us to do. */ if (c == NULL) { return SSL_TLSEXT_ERR_OK; } if (inlen == 0) { /* someone tries to trick us? */ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02837) "ALPN client protocol list empty"); return SSL_TLSEXT_ERR_ALERT_FATAL; } client_protos = apr_array_make(c->pool, 0, sizeof(char *)); for (i = 0; i < inlen; /**/) { unsigned int plen = in[i++]; if (plen + i > inlen) { /* someone tries to trick us? */ ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02838) "ALPN protocol identifier too long"); return SSL_TLSEXT_ERR_ALERT_FATAL; } APR_ARRAY_PUSH(client_protos, char *) = apr_pstrndup(c->pool, (const char *)in+i, plen); i += plen; } /* The order the callbacks are invoked from TLS extensions is, unfortunately * not defined and older openssl versions do call ALPN selection before * they callback the SNI. We need to make sure that we know which vhost * we are dealing with so we respect the correct protocols. */ init_vhost(c, ssl); proposed = ap_select_protocol(c, NULL, sslconn->server, client_protos); if (!proposed) { proposed = ap_get_protocol(c); } len = strlen(proposed); if (len > 255) { ap_log_cerror(APLOG_MARK, APLOG_ERR, 0, c, APLOGNO(02840) "ALPN negotiated protocol name too long"); return SSL_TLSEXT_ERR_ALERT_FATAL; } *out = (const unsigned char *)proposed; *outlen = (unsigned char)len; if (strcmp(proposed, ap_get_protocol(c))) { apr_status_t status; status = ap_switch_protocol(c, NULL, sslconn->server, proposed); if (status != APR_SUCCESS) { ap_log_cerror(APLOG_MARK, APLOG_ERR, status, c, APLOGNO(02908) "protocol switch to '%s' failed", proposed); return SSL_TLSEXT_ERR_ALERT_FATAL; } } return SSL_TLSEXT_ERR_OK; } #endif /* HAVE_TLS_ALPN */ #ifdef HAVE_SRP int ssl_callback_SRPServerParams(SSL *ssl, int *ad, void *arg) { modssl_ctx_t *mctx = (modssl_ctx_t *)arg; char *username = SSL_get_srp_username(ssl); SRP_user_pwd *u; if (username == NULL #if OPENSSL_VERSION_NUMBER < 0x10100000L || (u = SRP_VBASE_get_by_user(mctx->srp_vbase, username)) == NULL) { #else || (u = SRP_VBASE_get1_by_user(mctx->srp_vbase, username)) == NULL) { #endif *ad = SSL_AD_UNKNOWN_PSK_IDENTITY; return SSL3_AL_FATAL; } if (SSL_set_srp_server_param(ssl, u->N, u->g, u->s, u->v, u->info) < 0) { #if OPENSSL_VERSION_NUMBER >= 0x10100000L SRP_user_pwd_free(u); #endif *ad = SSL_AD_INTERNAL_ERROR; return SSL3_AL_FATAL; } /* reset all other options */ #if OPENSSL_VERSION_NUMBER >= 0x10100000L SRP_user_pwd_free(u); #endif SSL_set_verify(ssl, SSL_VERIFY_NONE, ssl_callback_SSLVerify); return SSL_ERROR_NONE; } #endif /* HAVE_SRP */
./CrossVul/dataset_final_sorted/CWE-284/c/good_5075_1
crossvul-cpp_data_good_5349_2
/* * TCP over IPv6 * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * * Based on: * linux/net/ipv4/tcp.c * linux/net/ipv4/tcp_input.c * linux/net/ipv4/tcp_output.c * * Fixes: * Hideaki YOSHIFUJI : sin6_scope_id support * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind * a single port at the same time. * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/bottom_half.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/jiffies.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/init.h> #include <linux/jhash.h> #include <linux/ipsec.h> #include <linux/times.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <net/tcp.h> #include <net/ndisc.h> #include <net/inet6_hashtables.h> #include <net/inet6_connection_sock.h> #include <net/ipv6.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include <net/ip6_checksum.h> #include <net/inet_ecn.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/snmp.h> #include <net/dsfield.h> #include <net/timewait_sock.h> #include <net/inet_common.h> #include <net/secure_seq.h> #include <net/busy_poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <crypto/hash.h> #include <linux/scatterlist.h> static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb); static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req); static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); static const struct inet_connection_sock_af_ops ipv6_mapped; static const struct inet_connection_sock_af_ops ipv6_specific; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific; static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific; #else static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr) { return NULL; } #endif static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { const struct rt6_info *rt = (const struct rt6_info *)dst; sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt); } } static __u32 tcp_v6_init_sequence(const struct sk_buff *skb) { return secure_tcpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32, ipv6_hdr(skb)->saddr.s6_addr32, tcp_hdr(skb)->dest, tcp_hdr(skb)->source); } static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; struct inet_sock *inet = inet_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp = tcp_sk(sk); struct in6_addr *saddr = NULL, *final_p, final; struct ipv6_txoptions *opt; struct flowi6 fl6; struct dst_entry *dst; int addr_type; int err; if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EAFNOSUPPORT; memset(&fl6, 0, sizeof(fl6)); if (np->sndflow) { fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK; IP6_ECN_flow_init(fl6.flowlabel); if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { struct ip6_flowlabel *flowlabel; flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (!flowlabel) return -EINVAL; fl6_sock_release(flowlabel); } } /* * connect() to INADDR_ANY means loopback (BSD'ism). */ if (ipv6_addr_any(&usin->sin6_addr)) usin->sin6_addr.s6_addr[15] = 0x1; addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -ENETUNREACH; if (addr_type&IPV6_ADDR_LINKLOCAL) { if (addr_len >= sizeof(struct sockaddr_in6) && usin->sin6_scope_id) { /* If interface is set while binding, indices * must coincide. */ if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != usin->sin6_scope_id) return -EINVAL; sk->sk_bound_dev_if = usin->sin6_scope_id; } /* Connect to link-local address requires an interface */ if (!sk->sk_bound_dev_if) return -EINVAL; } if (tp->rx_opt.ts_recent_stamp && !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) { tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; tp->write_seq = 0; } sk->sk_v6_daddr = usin->sin6_addr; np->flow_label = fl6.flowlabel; /* * TCP over IPv4 */ if (addr_type == IPV6_ADDR_MAPPED) { u32 exthdrlen = icsk->icsk_ext_hdr_len; struct sockaddr_in sin; SOCK_DEBUG(sk, "connect: ipv4 mapped\n"); if (__ipv6_only_sock(sk)) return -ENETUNREACH; sin.sin_family = AF_INET; sin.sin_port = usin->sin6_port; sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3]; icsk->icsk_af_ops = &ipv6_mapped; sk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin)); if (err) { icsk->icsk_ext_hdr_len = exthdrlen; icsk->icsk_af_ops = &ipv6_specific; sk->sk_backlog_rcv = tcp_v6_do_rcv; #ifdef CONFIG_TCP_MD5SIG tp->af_specific = &tcp_sock_ipv6_specific; #endif goto failure; } np->saddr = sk->sk_v6_rcv_saddr; return err; } if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) saddr = &sk->sk_v6_rcv_saddr; fl6.flowi6_proto = IPPROTO_TCP; fl6.daddr = sk->sk_v6_daddr; fl6.saddr = saddr ? *saddr : np->saddr; fl6.flowi6_oif = sk->sk_bound_dev_if; fl6.flowi6_mark = sk->sk_mark; fl6.fl6_dport = usin->sin6_port; fl6.fl6_sport = inet->inet_sport; opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk)); final_p = fl6_update_dst(&fl6, opt, &final); security_sk_classify_flow(sk, flowi6_to_flowi(&fl6)); dst = ip6_dst_lookup_flow(sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto failure; } if (!saddr) { saddr = &fl6.saddr; sk->sk_v6_rcv_saddr = *saddr; } /* set the source address */ np->saddr = *saddr; inet->inet_rcv_saddr = LOOPBACK4_IPV6; sk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(sk, dst, NULL, NULL); if (tcp_death_row.sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && ipv6_addr_equal(&fl6.daddr, &sk->sk_v6_daddr)) tcp_fetch_timewait_stamp(sk, dst); icsk->icsk_ext_hdr_len = 0; if (opt) icsk->icsk_ext_hdr_len = opt->opt_flen + opt->opt_nflen; tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr); inet->inet_dport = usin->sin6_port; tcp_set_state(sk, TCP_SYN_SENT); err = inet6_hash_connect(&tcp_death_row, sk); if (err) goto late_failure; sk_set_txhash(sk); if (!tp->write_seq && likely(!tp->repair)) tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, sk->sk_v6_daddr.s6_addr32, inet->inet_sport, inet->inet_dport); err = tcp_connect(sk); if (err) goto late_failure; return 0; late_failure: tcp_set_state(sk, TCP_CLOSE); __sk_dst_reset(sk); failure: inet->inet_dport = 0; sk->sk_route_caps = 0; return err; } static void tcp_v6_mtu_reduced(struct sock *sk) { struct dst_entry *dst; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info); if (!dst) return; if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) { tcp_sync_mss(sk, dst_mtu(dst)); tcp_simple_retransmit(sk); } } static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, u8 type, u8 code, int offset, __be32 info) { const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; const struct tcphdr *th = (struct tcphdr *)(skb->data+offset); struct net *net = dev_net(skb->dev); struct request_sock *fastopen; struct ipv6_pinfo *np; struct tcp_sock *tp; __u32 seq, snd_una; struct sock *sk; bool fatal; int err; sk = __inet6_lookup_established(net, &tcp_hashinfo, &hdr->daddr, th->dest, &hdr->saddr, ntohs(th->source), skb->dev->ifindex); if (!sk) { __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } seq = ntohl(th->seq); fatal = icmpv6_err_convert(type, code, &err); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, fatal); bh_lock_sock(sk); if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); if (sk->sk_state == TCP_CLOSE) goto out; if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } np = inet6_sk(sk); if (type == NDISC_REDIRECT) { struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie); if (dst) dst->ops->redirect(dst, sk, skb); goto out; } if (type == ICMPV6_PKT_TOOBIG) { /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; if (!ip6_sk_accept_pmtu(sk)) goto out; tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) sock_hold(sk); goto out; } /* Might be for an request_sock */ switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * is already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ tcp_done(sk); } else sk->sk_err_soft = err; goto out; } if (!sock_owned_by_user(sk) && np->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else sk->sk_err_soft = err; out: bh_unlock_sock(sk); sock_put(sk); } static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { struct inet_request_sock *ireq = inet_rsk(req); struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; struct flowi6 *fl6 = &fl->u.ip6; struct sk_buff *skb; int err = -ENOMEM; /* First, grab a route. */ if (!dst && (dst = inet6_csk_route_req(sk, fl6, req, IPPROTO_TCP)) == NULL) goto done; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr, &ireq->ir_v6_rmt_addr); fl6->daddr = ireq->ir_v6_rmt_addr; if (np->repflow && ireq->pktopts) fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts)); rcu_read_lock(); opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); err = ip6_xmit(sk, skb, fl6, opt, np->tclass); rcu_read_unlock(); err = net_xmit_eval(err); } done: return err; } static void tcp_v6_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->ipv6_opt); kfree_skb(inet_rsk(req)->pktopts); } #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk, const struct in6_addr *addr) { return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6); } static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr); } static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin6->sin6_family != AF_INET6) return -EINVAL; if (!cmd.tcpm_keylen) { if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET); return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6); } if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; if (ipv6_addr_v4mapped(&sin6->sin6_addr)) return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3], AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr, AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); } static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp, const struct in6_addr *daddr, const struct in6_addr *saddr, const struct tcphdr *th, int nbytes) { struct tcp6_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; /* 1. TCP pseudo-header (RFC2460) */ bp->saddr = *saddr; bp->daddr = *daddr; bp->protocol = cpu_to_be32(IPPROTO_TCP); bp->len = cpu_to_be32(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->md5_req); } static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, const struct in6_addr *daddr, struct in6_addr *saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct ahash_request *req; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } static int tcp_v6_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { const struct in6_addr *saddr, *daddr; struct tcp_md5sig_pool *hp; struct ahash_request *req; const struct tcphdr *th = tcp_hdr(skb); if (sk) { /* valid for establish/request sockets */ saddr = &sk->sk_v6_rcv_saddr; daddr = &sk->sk_v6_daddr; } else { const struct ipv6hdr *ip6h = ipv6_hdr(skb); saddr = &ip6h->saddr; daddr = &ip6h->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; } #endif static bool tcp_v6_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_TCP_MD5SIG const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct ipv6hdr *ip6h = ipv6_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; u8 newhash[16]; hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return false; if (hash_expected && !hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } /* check the signature */ genhash = tcp_v6_md5_hash_skb(newhash, hash_expected, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n", genhash ? "failed" : "mismatch", &ip6h->saddr, ntohs(th->source), &ip6h->daddr, ntohs(th->dest)); return true; } #endif return false; } static void tcp_v6_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); const struct ipv6_pinfo *np = inet6_sk(sk_listener); ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr; ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr; /* So that link locals have meaning */ if (!sk_listener->sk_bound_dev_if && ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL) ireq->ir_iif = tcp_v6_iif(skb); if (!TCP_SKB_CB(skb)->tcp_tw_isn && (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim || np->repflow)) { atomic_inc(&skb->users); ireq->pktopts = skb; } } static struct dst_entry *tcp_v6_route_req(const struct sock *sk, struct flowi *fl, const struct request_sock *req, bool *strict) { if (strict) *strict = true; return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP); } struct request_sock_ops tcp6_request_sock_ops __read_mostly = { .family = AF_INET6, .obj_size = sizeof(struct tcp6_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v6_reqsk_send_ack, .destructor = tcp_v6_reqsk_destructor, .send_reset = tcp_v6_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = { .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr), #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, #endif .init_req = tcp_v6_init_req, #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v6_init_sequence, #endif .route_req = tcp_v6_route_req, .init_seq = tcp_v6_init_sequence, .send_synack = tcp_v6_send_synack, }; static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int rst, u8 tclass, __be32 label) { const struct tcphdr *th = tcp_hdr(skb); struct tcphdr *t1; struct sk_buff *buff; struct flowi6 fl6; struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); struct sock *ctl_sk = net->ipv6.tcp_sk; unsigned int tot_len = sizeof(struct tcphdr); struct dst_entry *dst; __be32 *topt; if (tsecr) tot_len += TCPOLEN_TSTAMP_ALIGNED; #ifdef CONFIG_TCP_MD5SIG if (key) tot_len += TCPOLEN_MD5SIG_ALIGNED; #endif buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len, GFP_ATOMIC); if (!buff) return; skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len); t1 = (struct tcphdr *) skb_push(buff, tot_len); skb_reset_transport_header(buff); /* Swap the send and the receive. */ memset(t1, 0, sizeof(*t1)); t1->dest = th->source; t1->source = th->dest; t1->doff = tot_len / 4; t1->seq = htonl(seq); t1->ack_seq = htonl(ack); t1->ack = !rst || !th->ack; t1->rst = rst; t1->window = htons(win); topt = (__be32 *)(t1 + 1); if (tsecr) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); *topt++ = htonl(tsval); *topt++ = htonl(tsecr); } #ifdef CONFIG_TCP_MD5SIG if (key) { *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); tcp_v6_md5_hash_hdr((__u8 *)topt, key, &ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, t1); } #endif memset(&fl6, 0, sizeof(fl6)); fl6.daddr = ipv6_hdr(skb)->saddr; fl6.saddr = ipv6_hdr(skb)->daddr; fl6.flowlabel = label; buff->ip_summed = CHECKSUM_PARTIAL; buff->csum = 0; __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; if (rt6_need_strict(&fl6.daddr) && !oif) fl6.flowi6_oif = tcp_v6_iif(skb); else { if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) oif = skb->skb_iif; fl6.flowi6_oif = oif; } fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); /* Pass a socket to ip6_dst_lookup either it is for RST * Underlying function will use this to retrieve the network * namespace */ dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL); if (!IS_ERR(dst)) { skb_dst_set(buff, dst); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); TCP_INC_STATS(net, TCP_MIB_OUTSEGS); if (rst) TCP_INC_STATS(net, TCP_MIB_OUTRSTS); return; } kfree_skb(buff); } static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); u32 seq = 0, ack_seq = 0; struct tcp_md5sig_key *key = NULL; #ifdef CONFIG_TCP_MD5SIG const __u8 *hash_location = NULL; struct ipv6hdr *ipv6h = ipv6_hdr(skb); unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif int oif; if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && !ipv6_unicast_destination(skb)) return; #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr); } else if (hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev), &tcp_hashinfo, NULL, 0, &ipv6h->saddr, th->source, &ipv6h->daddr, ntohs(th->source), tcp_v6_iif(skb)); if (!sk1) goto out; key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr); if (!key) goto out; genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto out; } #endif if (th->ack) seq = ntohl(th->ack_seq); else ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2); oif = sk ? sk->sk_bound_dev_if : 0; tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0); #ifdef CONFIG_TCP_MD5SIG out: rcu_read_unlock(); #endif } static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, u8 tclass, __be32 label) { tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0, tclass, label); } static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); inet_twsk_put(tw); } static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if, tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), 0, 0); } static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v6_check(sk, skb); #endif return sk; } static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_conn_request(sk, skb); if (!ipv6_unicast_destination(skb)) goto drop; return tcp_conn_request(&tcp6_request_sock_ops, &tcp_request_sock_ipv6_ops, sk, skb); drop: tcp_listendrop(sk); return 0; /* don't send reset */ } static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct ipv6_pinfo *newnp; const struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt; struct tcp6_sock *newtcp6sk; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif struct flowi6 fl6; if (skb->protocol == htons(ETH_P_IP)) { /* * v6 mapped */ newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst, req_unhash, own_req); if (!newsk) return NULL; newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newinet = inet_sk(newsk); newnp = inet6_sk(newsk); newtp = tcp_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newnp->saddr = newsk->sk_v6_rcv_saddr; inet_csk(newsk)->icsk_af_ops = &ipv6_mapped; newsk->sk_backlog_rcv = tcp_v4_do_rcv; #ifdef CONFIG_TCP_MD5SIG newtp->af_specific = &tcp_sock_ipv6_mapped_specific; #endif newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = tcp_v6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); if (np->repflow) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); /* * No need to charge this sock to the relevant IPv6 refcnt debug socks count * here, tcp_create_openreq_child now does this for us, see the comment in * that function for the gory details. -acme */ /* It is tricky place. Until this moment IPv4 tcp worked with IPv6 icsk.icsk_af_ops. Sync it now. */ tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); return newsk; } ireq = inet_rsk(req); if (sk_acceptq_is_full(sk)) goto out_overflow; if (!dst) { dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP); if (!dst) goto out; } newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto out_nonewsk; /* * No need to charge this sock to the relevant IPv6 refcnt debug socks * count here, tcp_create_openreq_child now does this for us, see the * comment in that function for the gory details. -acme */ newsk->sk_gso_type = SKB_GSO_TCPV6; ip6_dst_store(newsk, dst, NULL, NULL); inet6_sk_rx_dst_set(newsk, skb); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; newtp = tcp_sk(newsk); newinet = inet_sk(newsk); newnp = inet6_sk(newsk); memcpy(newnp, np, sizeof(struct ipv6_pinfo)); newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr; newnp->saddr = ireq->ir_v6_loc_addr; newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr; newsk->sk_bound_dev_if = ireq->ir_iif; /* Now IPv6 options... First: no IPv4 options. */ newinet->inet_opt = NULL; newnp->ipv6_ac_list = NULL; newnp->ipv6_fl_list = NULL; /* Clone RX bits */ newnp->rxopt.all = np->rxopt.all; newnp->pktoptions = NULL; newnp->opt = NULL; newnp->mcast_oif = tcp_v6_iif(skb); newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb)); if (np->repflow) newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb)); /* Clone native IPv6 options from listening socket (if any) Yes, keeping reference count would be much more clever, but we make one more one thing there: reattach optmem to newsk. */ opt = ireq->ipv6_opt; if (!opt) opt = rcu_dereference(np->opt); if (opt) { opt = ipv6_dup_options(newsk, opt); RCU_INIT_POINTER(newnp->opt, opt); } inet_csk(newsk)->icsk_ext_hdr_len = 0; if (opt) inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + opt->opt_flen; tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = dst_metric_advmss(dst); if (tcp_sk(sk)->rx_opt.user_mss && tcp_sk(sk)->rx_opt.user_mss < newtp->advmss) newtp->advmss = tcp_sk(sk)->rx_opt.user_mss; tcp_initialize_rcv_mss(newsk); newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6; newinet->inet_rcv_saddr = LOOPBACK4_IPV6; #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr); if (key) { /* We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr, AF_INET6, key->key, key->keylen, sk_gfp_mask(sk, GFP_ATOMIC)); } #endif if (__inet_inherit_port(sk, newsk) < 0) { inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto out; } *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); if (*own_req) { tcp_move_syn(newtp, req); /* Clone pktoptions received with SYN, if we own the req */ if (ireq->pktopts) { newnp->pktoptions = skb_clone(ireq->pktopts, sk_gfp_mask(sk, GFP_ATOMIC)); consume_skb(ireq->pktopts); ireq->pktopts = NULL; if (newnp->pktoptions) skb_set_owner_r(newnp->pktoptions, newsk); } } return newsk; out_overflow: __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); out_nonewsk: dst_release(dst); out: tcp_listendrop(sk); return NULL; } static void tcp_v6_restore_cb(struct sk_buff *skb) { /* We need to move header back to the beginning if xfrm6_policy_check() * and tcp_v6_fill_cb() are going to be called again. * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there. */ memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6, sizeof(struct inet6_skb_parm)); } /* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) { struct ipv6_pinfo *np = inet6_sk(sk); struct tcp_sock *tp; struct sk_buff *opt_skb = NULL; /* Imagine: socket is IPv6. IPv4 packet arrives, goes to IPv4 receive handler and backlogged. From backlog it always goes here. Kerboom... Fortunately, tcp_rcv_established and rcv_established handle them correctly, but it is not case with tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK */ if (skb->protocol == htons(ETH_P_IP)) return tcp_v4_do_rcv(sk, skb); if (tcp_filter(sk, skb)) goto discard; /* * socket locking is here for SMP purposes as backlog rcv * is currently called with bh processing disabled. */ /* Do Stevens' IPV6_PKTOPTIONS. Yes, guys, it is the only place in our code, where we may make it not affecting IPv4. The rest of code is protocol independent, and I do not like idea to uglify IPv4. Actually, all the idea behind IPV6_PKTOPTIONS looks not very well thought. For now we latch options, received in the last packet, enqueued by tcp. Feel free to propose better solution. --ANK (980728) */ if (np->rxopt.all) opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || dst->ops->check(dst, np->rx_dst_cookie) == NULL) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); if (opt_skb) goto ipv6_pktoptions; return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v6_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) goto reset; if (opt_skb) __kfree_skb(opt_skb); return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) goto reset; if (opt_skb) goto ipv6_pktoptions; return 0; reset: tcp_v6_send_reset(sk, skb); discard: if (opt_skb) __kfree_skb(opt_skb); kfree_skb(skb); return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; ipv6_pktoptions: /* Do you ask, what is it? 1. skb was enqueued by tcp. 2. skb is added to tail of read queue, rather than out of order. 3. socket is not in passive state. 4. Finally, it really contains options, which user wants to receive. */ tp = tcp_sk(sk); if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt && !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) { if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo) np->mcast_oif = tcp_v6_iif(opt_skb); if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit; if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass) np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb)); if (np->repflow) np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb)); if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) { skb_set_owner_r(opt_skb, sk); tcp_v6_restore_cb(opt_skb); opt_skb = xchg(&np->pktoptions, opt_skb); } else { __kfree_skb(opt_skb); opt_skb = xchg(&np->pktoptions, NULL); } } kfree_skb(opt_skb); return 0; } static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr, const struct tcphdr *th) { /* This is tricky: we move IP6CB at its correct location into * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because * _decode_session6() uses IP6CB(). * barrier() makes sure compiler won't play aliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb), sizeof(struct inet6_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff*4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr); TCP_SKB_CB(skb)->sacked = 0; } static int tcp_v6_rcv(struct sk_buff *skb) { const struct tcphdr *th; const struct ipv6hdr *hdr; bool refcounted; struct sock *sk; int ret; struct net *net = dev_net(skb->dev); if (skb->pkt_type != PACKET_HOST) goto discard_it; /* * Count it even if it's bad. */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr)/4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff*4)) goto discard_it; if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); lookup: sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, inet6_iif(skb), &refcounted); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); struct sock *nsk; sk = req->rsk_listener; tcp_v6_fill_cb(skb, hdr, th); if (tcp_v6_inbound_md5_hash(sk, skb)) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } sock_hold(sk); refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); goto discard_and_relse; } if (nsk == sk) { reqsk_put(req); tcp_v6_restore_cb(skb); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v6_send_reset(nsk, skb); goto discard_and_relse; } else { sock_put(sk); return 0; } } if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; tcp_v6_fill_cb(skb, hdr, th); if (tcp_v6_inbound_md5_hash(sk, skb)) goto discard_and_relse; if (tcp_filter(sk, skb)) goto discard_and_relse; th = (const struct tcphdr *)skb->data; hdr = ipv6_hdr(skb); skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v6_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v6_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret ? -1 : 0; no_tcp_socket: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v6_send_reset(NULL, skb); } discard_it: kfree_skb(skb); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } tcp_v6_fill_cb(skb, hdr, th); if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2; sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, skb, __tcp_hdrlen(th), &ipv6_hdr(skb)->saddr, th->source, &ipv6_hdr(skb)->daddr, ntohs(th->dest), tcp_v6_iif(skb)); if (sk2) { struct inet_timewait_sock *tw = inet_twsk(sk); inet_twsk_deschedule_put(tw); sk = sk2; tcp_v6_restore_cb(skb); refcounted = false; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v6_timewait_ack(sk, skb); break; case TCP_TW_RST: tcp_v6_restore_cb(skb); tcp_v6_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS: ; } goto discard_it; } static void tcp_v6_early_demux(struct sk_buff *skb) { const struct ipv6hdr *hdr; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; hdr = ipv6_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; /* Note : We use inet6_iif() here, not tcp_v6_iif() */ sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo, &hdr->saddr, th->source, &hdr->daddr, ntohs(th->dest), inet6_iif(skb)); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); if (dst && inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } } static struct timewait_sock_ops tcp6_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp6_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor = tcp_twsk_destructor, }; static const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), .net_frag_header_len = sizeof(struct frag_hdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif .mtu_reduced = tcp_v6_mtu_reduced, }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = { .md5_lookup = tcp_v6_md5_lookup, .calc_md5_hash = tcp_v6_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, }; #endif /* * TCP over IPv4 via INET6 API */ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .addr2sockaddr = inet6_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in6), .bind_conflict = inet6_csk_bind_conflict, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ipv6_setsockopt, .compat_getsockopt = compat_ipv6_getsockopt, #endif .mtu_reduced = tcp_v4_mtu_reduced, }; #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v6_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */ static int tcp_v6_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv6_specific; #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific; #endif return 0; } static void tcp_v6_destroy_sock(struct sock *sk) { tcp_v4_destroy_sock(sk); inet6_destroy_sock(sk); } #ifdef CONFIG_PROC_FS /* Proc filesystem TCPv6 sock list dumping. */ static void get_openreq6(struct seq_file *seq, const struct request_sock *req, int i) { long ttd = req->rsk_timer.expires - jiffies; const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; if (ttd < 0) ttd = 0; seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], inet_rsk(req)->ir_num, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], ntohs(inet_rsk(req)->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_to_clock_t(ttd), req->num_timeout, from_kuid_munged(seq_user_ns(seq), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); } static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i) { const struct in6_addr *dest, *src; __u16 destp, srcp; int timer_active; unsigned long timer_expires; const struct inet_sock *inet = inet_sk(sp); const struct tcp_sock *tp = tcp_sk(sp); const struct inet_connection_sock *icsk = inet_csk(sp); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; int rx_queue; int state; dest = &sp->sk_v6_daddr; src = &sp->sk_v6_rcv_saddr; destp = ntohs(inet->inet_dport); srcp = ntohs(inet->inet_sport); if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sp->sk_timer)) { timer_active = 2; timer_expires = sp->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = sk_state_load(sp); if (state == TCP_LISTEN) rx_queue = sp->sk_ack_backlog; else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, state, tp->write_seq - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)), icsk->icsk_probes_out, sock_i_ino(sp), atomic_read(&sp->sk_refcnt), sp, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh) ); } static void get_timewait6_sock(struct seq_file *seq, struct inet_timewait_sock *tw, int i) { long delta = tw->tw_timer.expires - jiffies; const struct in6_addr *dest, *src; __u16 destp, srcp; dest = &tw->tw_v6_daddr; src = &tw->tw_v6_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(seq, "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n", i, src->s6_addr32[0], src->s6_addr32[1], src->s6_addr32[2], src->s6_addr32[3], srcp, dest->s6_addr32[0], dest->s6_addr32[1], dest->s6_addr32[2], dest->s6_addr32[3], destp, tw->tw_substate, 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw); } static int tcp6_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl " "local_address " "remote_address " "st tx_queue rx_queue tr tm->when retrnsmt" " uid timeout inode\n"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait6_sock(seq, v, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq6(seq, v, st->num); else get_tcp6_sock(seq, v, st->num); out: return 0; } static const struct file_operations tcp6_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp6_seq_afinfo = { .name = "tcp6", .family = AF_INET6, .seq_fops = &tcp6_afinfo_seq_fops, .seq_ops = { .show = tcp6_seq_show, }, }; int __net_init tcp6_proc_init(struct net *net) { return tcp_proc_register(net, &tcp6_seq_afinfo); } void tcp6_proc_exit(struct net *net) { tcp_proc_unregister(net, &tcp6_seq_afinfo); } #endif struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v6_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v6_init_sock, .destroy = tcp_v6_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v6_do_rcv, .release_cb = tcp_release_cb, .hash = inet6_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .orphan_count = &tcp_orphan_count, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp6_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp6_timewait_sock_ops, .rsk_prot = &tcp6_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, }; static const struct inet6_protocol tcpv6_protocol = { .early_demux = tcp_v6_early_demux, .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; static struct inet_protosw tcpv6_protosw = { .type = SOCK_STREAM, .protocol = IPPROTO_TCP, .prot = &tcpv6_prot, .ops = &inet6_stream_ops, .flags = INET_PROTOSW_PERMANENT | INET_PROTOSW_ICSK, }; static int __net_init tcpv6_net_init(struct net *net) { return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, SOCK_RAW, IPPROTO_TCP, net); } static void __net_exit tcpv6_net_exit(struct net *net) { inet_ctl_sock_destroy(net->ipv6.tcp_sk); } static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET6); } static struct pernet_operations tcpv6_net_ops = { .init = tcpv6_net_init, .exit = tcpv6_net_exit, .exit_batch = tcpv6_net_exit_batch, }; int __init tcpv6_init(void) { int ret; ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP); if (ret) goto out; /* register inet6 protocol */ ret = inet6_register_protosw(&tcpv6_protosw); if (ret) goto out_tcpv6_protocol; ret = register_pernet_subsys(&tcpv6_net_ops); if (ret) goto out_tcpv6_protosw; out: return ret; out_tcpv6_protosw: inet6_unregister_protosw(&tcpv6_protosw); out_tcpv6_protocol: inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); goto out; } void tcpv6_exit(void) { unregister_pernet_subsys(&tcpv6_net_ops); inet6_unregister_protosw(&tcpv6_protosw); inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP); }
./CrossVul/dataset_final_sorted/CWE-284/c/good_5349_2
crossvul-cpp_data_bad_4453_0
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include "speakup.h" #include "spk_types.h" #include "spk_priv.h" struct spk_ldisc_data { char buf; struct completion completion; bool buf_free; }; static struct spk_synth *spk_ttyio_synth; static struct tty_struct *speakup_tty; /* mutex to protect against speakup_tty disappearing from underneath us while * we are using it. this can happen when the device physically unplugged, * while in use. it also serialises access to speakup_tty. */ static DEFINE_MUTEX(speakup_tty_mutex); static int ser_to_dev(int ser, dev_t *dev_no) { if (ser < 0 || ser > (255 - 64)) { pr_err("speakup: Invalid ser param. Must be between 0 and 191 inclusive.\n"); return -EINVAL; } *dev_no = MKDEV(4, (64 + ser)); return 0; } static int get_dev_to_use(struct spk_synth *synth, dev_t *dev_no) { /* use ser only when dev is not specified */ if (strcmp(synth->dev_name, SYNTH_DEFAULT_DEV) || synth->ser == SYNTH_DEFAULT_SER) return tty_dev_name_to_number(synth->dev_name, dev_no); return ser_to_dev(synth->ser, dev_no); } static int spk_ttyio_ldisc_open(struct tty_struct *tty) { struct spk_ldisc_data *ldisc_data; if (!tty->ops->write) return -EOPNOTSUPP; speakup_tty = tty; ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL); if (!ldisc_data) return -ENOMEM; init_completion(&ldisc_data->completion); ldisc_data->buf_free = true; speakup_tty->disc_data = ldisc_data; return 0; } static void spk_ttyio_ldisc_close(struct tty_struct *tty) { mutex_lock(&speakup_tty_mutex); kfree(speakup_tty->disc_data); speakup_tty = NULL; mutex_unlock(&speakup_tty_mutex); } static int spk_ttyio_receive_buf2(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct spk_ldisc_data *ldisc_data = tty->disc_data; if (spk_ttyio_synth->read_buff_add) { int i; for (i = 0; i < count; i++) spk_ttyio_synth->read_buff_add(cp[i]); return count; } if (!ldisc_data->buf_free) /* ttyio_in will tty_schedule_flip */ return 0; /* Make sure the consumer has read buf before we have seen * buf_free == true and overwrite buf */ mb(); ldisc_data->buf = cp[0]; ldisc_data->buf_free = false; complete(&ldisc_data->completion); return 1; } static struct tty_ldisc_ops spk_ttyio_ldisc_ops = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "speakup_ldisc", .open = spk_ttyio_ldisc_open, .close = spk_ttyio_ldisc_close, .receive_buf2 = spk_ttyio_receive_buf2, }; static int spk_ttyio_out(struct spk_synth *in_synth, const char ch); static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch); static void spk_ttyio_send_xchar(char ch); static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear); static unsigned char spk_ttyio_in(void); static unsigned char spk_ttyio_in_nowait(void); static void spk_ttyio_flush_buffer(void); static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth); struct spk_io_ops spk_ttyio_ops = { .synth_out = spk_ttyio_out, .synth_out_unicode = spk_ttyio_out_unicode, .send_xchar = spk_ttyio_send_xchar, .tiocmset = spk_ttyio_tiocmset, .synth_in = spk_ttyio_in, .synth_in_nowait = spk_ttyio_in_nowait, .flush_buffer = spk_ttyio_flush_buffer, .wait_for_xmitr = spk_ttyio_wait_for_xmitr, }; EXPORT_SYMBOL_GPL(spk_ttyio_ops); static inline void get_termios(struct tty_struct *tty, struct ktermios *out_termios) { down_read(&tty->termios_rwsem); *out_termios = tty->termios; up_read(&tty->termios_rwsem); } static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) { int ret = 0; struct tty_struct *tty; struct ktermios tmp_termios; dev_t dev; ret = get_dev_to_use(synth, &dev); if (ret) return ret; tty = tty_kopen(dev); if (IS_ERR(tty)) return PTR_ERR(tty); if (tty->ops->open) ret = tty->ops->open(tty, NULL); else ret = -ENODEV; if (ret) { tty_unlock(tty); return ret; } clear_bit(TTY_HUPPED, &tty->flags); /* ensure hardware flow control is enabled */ get_termios(tty, &tmp_termios); if (!(tmp_termios.c_cflag & CRTSCTS)) { tmp_termios.c_cflag |= CRTSCTS; tty_set_termios(tty, &tmp_termios); /* * check c_cflag to see if it's updated as tty_set_termios * may not return error even when no tty bits are * changed by the request. */ get_termios(tty, &tmp_termios); if (!(tmp_termios.c_cflag & CRTSCTS)) pr_warn("speakup: Failed to set hardware flow control\n"); } tty_unlock(tty); ret = tty_set_ldisc(tty, N_SPEAKUP); if (ret) pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); return ret; } void spk_ttyio_register_ldisc(void) { if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops)) pr_warn("speakup: Error registering line discipline. Most synths won't work.\n"); } void spk_ttyio_unregister_ldisc(void) { if (tty_unregister_ldisc(N_SPEAKUP)) pr_warn("speakup: Couldn't unregister ldisc\n"); } static int spk_ttyio_out(struct spk_synth *in_synth, const char ch) { mutex_lock(&speakup_tty_mutex); if (in_synth->alive && speakup_tty && speakup_tty->ops->write) { int ret = speakup_tty->ops->write(speakup_tty, &ch, 1); mutex_unlock(&speakup_tty_mutex); if (ret == 0) /* No room */ return 0; if (ret < 0) { pr_warn("%s: I/O error, deactivating speakup\n", in_synth->long_name); /* No synth any more, so nobody will restart TTYs, * and we thus need to do it ourselves. Now that there * is no synth we can let application flood anyway */ in_synth->alive = 0; speakup_start_ttys(); return 0; } return 1; } mutex_unlock(&speakup_tty_mutex); return 0; } static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch) { int ret; if (ch < 0x80) { ret = spk_ttyio_out(in_synth, ch); } else if (ch < 0x800) { ret = spk_ttyio_out(in_synth, 0xc0 | (ch >> 6)); ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f)); } else { ret = spk_ttyio_out(in_synth, 0xe0 | (ch >> 12)); ret &= spk_ttyio_out(in_synth, 0x80 | ((ch >> 6) & 0x3f)); ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f)); } return ret; } static int check_tty(struct tty_struct *tty) { if (!tty) { pr_warn("%s: I/O error, deactivating speakup\n", spk_ttyio_synth->long_name); /* No synth any more, so nobody will restart TTYs, and we thus * need to do it ourselves. Now that there is no synth we can * let application flood anyway */ spk_ttyio_synth->alive = 0; speakup_start_ttys(); return 1; } return 0; } static void spk_ttyio_send_xchar(char ch) { mutex_lock(&speakup_tty_mutex); if (check_tty(speakup_tty)) { mutex_unlock(&speakup_tty_mutex); return; } if (speakup_tty->ops->send_xchar) speakup_tty->ops->send_xchar(speakup_tty, ch); mutex_unlock(&speakup_tty_mutex); } static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) { mutex_lock(&speakup_tty_mutex); if (check_tty(speakup_tty)) { mutex_unlock(&speakup_tty_mutex); return; } if (speakup_tty->ops->tiocmset) speakup_tty->ops->tiocmset(speakup_tty, set, clear); mutex_unlock(&speakup_tty_mutex); } static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth) { return 1; } static unsigned char ttyio_in(int timeout) { struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data; char rv; if (wait_for_completion_timeout(&ldisc_data->completion, usecs_to_jiffies(timeout)) == 0) { if (timeout) pr_warn("spk_ttyio: timeout (%d) while waiting for input\n", timeout); return 0xff; } rv = ldisc_data->buf; /* Make sure we have read buf before we set buf_free to let * the producer overwrite it */ mb(); ldisc_data->buf_free = true; /* Let TTY push more characters */ tty_schedule_flip(speakup_tty->port); return rv; } static unsigned char spk_ttyio_in(void) { return ttyio_in(SPK_SYNTH_TIMEOUT); } static unsigned char spk_ttyio_in_nowait(void) { u8 rv = ttyio_in(0); return (rv == 0xff) ? 0 : rv; } static void spk_ttyio_flush_buffer(void) { mutex_lock(&speakup_tty_mutex); if (check_tty(speakup_tty)) { mutex_unlock(&speakup_tty_mutex); return; } if (speakup_tty->ops->flush_buffer) speakup_tty->ops->flush_buffer(speakup_tty); mutex_unlock(&speakup_tty_mutex); } int spk_ttyio_synth_probe(struct spk_synth *synth) { int rv = spk_ttyio_initialise_ldisc(synth); if (rv) return rv; synth->alive = 1; spk_ttyio_synth = synth; return 0; } EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe); void spk_ttyio_release(void) { if (!speakup_tty) return; tty_lock(speakup_tty); if (speakup_tty->ops->close) speakup_tty->ops->close(speakup_tty, NULL); tty_ldisc_flush(speakup_tty); tty_unlock(speakup_tty); tty_kclose(speakup_tty); } EXPORT_SYMBOL_GPL(spk_ttyio_release); const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff) { u_char ch; while ((ch = *buff)) { if (ch == '\n') ch = synth->procspeech; if (tty_write_room(speakup_tty) < 1 || !synth->io_ops->synth_out(synth, ch)) return buff; buff++; } return NULL; } EXPORT_SYMBOL_GPL(spk_ttyio_synth_immediate);
./CrossVul/dataset_final_sorted/CWE-763/c/bad_4453_0
crossvul-cpp_data_good_4453_0
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/slab.h> #include "speakup.h" #include "spk_types.h" #include "spk_priv.h" struct spk_ldisc_data { char buf; struct completion completion; bool buf_free; }; static struct spk_synth *spk_ttyio_synth; static struct tty_struct *speakup_tty; /* mutex to protect against speakup_tty disappearing from underneath us while * we are using it. this can happen when the device physically unplugged, * while in use. it also serialises access to speakup_tty. */ static DEFINE_MUTEX(speakup_tty_mutex); static int ser_to_dev(int ser, dev_t *dev_no) { if (ser < 0 || ser > (255 - 64)) { pr_err("speakup: Invalid ser param. Must be between 0 and 191 inclusive.\n"); return -EINVAL; } *dev_no = MKDEV(4, (64 + ser)); return 0; } static int get_dev_to_use(struct spk_synth *synth, dev_t *dev_no) { /* use ser only when dev is not specified */ if (strcmp(synth->dev_name, SYNTH_DEFAULT_DEV) || synth->ser == SYNTH_DEFAULT_SER) return tty_dev_name_to_number(synth->dev_name, dev_no); return ser_to_dev(synth->ser, dev_no); } static int spk_ttyio_ldisc_open(struct tty_struct *tty) { struct spk_ldisc_data *ldisc_data; if (!tty->ops->write) return -EOPNOTSUPP; mutex_lock(&speakup_tty_mutex); if (speakup_tty) { mutex_unlock(&speakup_tty_mutex); return -EBUSY; } speakup_tty = tty; ldisc_data = kmalloc(sizeof(*ldisc_data), GFP_KERNEL); if (!ldisc_data) { speakup_tty = NULL; mutex_unlock(&speakup_tty_mutex); return -ENOMEM; } init_completion(&ldisc_data->completion); ldisc_data->buf_free = true; speakup_tty->disc_data = ldisc_data; mutex_unlock(&speakup_tty_mutex); return 0; } static void spk_ttyio_ldisc_close(struct tty_struct *tty) { mutex_lock(&speakup_tty_mutex); kfree(speakup_tty->disc_data); speakup_tty = NULL; mutex_unlock(&speakup_tty_mutex); } static int spk_ttyio_receive_buf2(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) { struct spk_ldisc_data *ldisc_data = tty->disc_data; if (spk_ttyio_synth->read_buff_add) { int i; for (i = 0; i < count; i++) spk_ttyio_synth->read_buff_add(cp[i]); return count; } if (!ldisc_data->buf_free) /* ttyio_in will tty_schedule_flip */ return 0; /* Make sure the consumer has read buf before we have seen * buf_free == true and overwrite buf */ mb(); ldisc_data->buf = cp[0]; ldisc_data->buf_free = false; complete(&ldisc_data->completion); return 1; } static struct tty_ldisc_ops spk_ttyio_ldisc_ops = { .owner = THIS_MODULE, .magic = TTY_LDISC_MAGIC, .name = "speakup_ldisc", .open = spk_ttyio_ldisc_open, .close = spk_ttyio_ldisc_close, .receive_buf2 = spk_ttyio_receive_buf2, }; static int spk_ttyio_out(struct spk_synth *in_synth, const char ch); static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch); static void spk_ttyio_send_xchar(char ch); static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear); static unsigned char spk_ttyio_in(void); static unsigned char spk_ttyio_in_nowait(void); static void spk_ttyio_flush_buffer(void); static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth); struct spk_io_ops spk_ttyio_ops = { .synth_out = spk_ttyio_out, .synth_out_unicode = spk_ttyio_out_unicode, .send_xchar = spk_ttyio_send_xchar, .tiocmset = spk_ttyio_tiocmset, .synth_in = spk_ttyio_in, .synth_in_nowait = spk_ttyio_in_nowait, .flush_buffer = spk_ttyio_flush_buffer, .wait_for_xmitr = spk_ttyio_wait_for_xmitr, }; EXPORT_SYMBOL_GPL(spk_ttyio_ops); static inline void get_termios(struct tty_struct *tty, struct ktermios *out_termios) { down_read(&tty->termios_rwsem); *out_termios = tty->termios; up_read(&tty->termios_rwsem); } static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) { int ret = 0; struct tty_struct *tty; struct ktermios tmp_termios; dev_t dev; ret = get_dev_to_use(synth, &dev); if (ret) return ret; tty = tty_kopen(dev); if (IS_ERR(tty)) return PTR_ERR(tty); if (tty->ops->open) ret = tty->ops->open(tty, NULL); else ret = -ENODEV; if (ret) { tty_unlock(tty); return ret; } clear_bit(TTY_HUPPED, &tty->flags); /* ensure hardware flow control is enabled */ get_termios(tty, &tmp_termios); if (!(tmp_termios.c_cflag & CRTSCTS)) { tmp_termios.c_cflag |= CRTSCTS; tty_set_termios(tty, &tmp_termios); /* * check c_cflag to see if it's updated as tty_set_termios * may not return error even when no tty bits are * changed by the request. */ get_termios(tty, &tmp_termios); if (!(tmp_termios.c_cflag & CRTSCTS)) pr_warn("speakup: Failed to set hardware flow control\n"); } tty_unlock(tty); ret = tty_set_ldisc(tty, N_SPEAKUP); if (ret) pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); return ret; } void spk_ttyio_register_ldisc(void) { if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops)) pr_warn("speakup: Error registering line discipline. Most synths won't work.\n"); } void spk_ttyio_unregister_ldisc(void) { if (tty_unregister_ldisc(N_SPEAKUP)) pr_warn("speakup: Couldn't unregister ldisc\n"); } static int spk_ttyio_out(struct spk_synth *in_synth, const char ch) { mutex_lock(&speakup_tty_mutex); if (in_synth->alive && speakup_tty && speakup_tty->ops->write) { int ret = speakup_tty->ops->write(speakup_tty, &ch, 1); mutex_unlock(&speakup_tty_mutex); if (ret == 0) /* No room */ return 0; if (ret < 0) { pr_warn("%s: I/O error, deactivating speakup\n", in_synth->long_name); /* No synth any more, so nobody will restart TTYs, * and we thus need to do it ourselves. Now that there * is no synth we can let application flood anyway */ in_synth->alive = 0; speakup_start_ttys(); return 0; } return 1; } mutex_unlock(&speakup_tty_mutex); return 0; } static int spk_ttyio_out_unicode(struct spk_synth *in_synth, u16 ch) { int ret; if (ch < 0x80) { ret = spk_ttyio_out(in_synth, ch); } else if (ch < 0x800) { ret = spk_ttyio_out(in_synth, 0xc0 | (ch >> 6)); ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f)); } else { ret = spk_ttyio_out(in_synth, 0xe0 | (ch >> 12)); ret &= spk_ttyio_out(in_synth, 0x80 | ((ch >> 6) & 0x3f)); ret &= spk_ttyio_out(in_synth, 0x80 | (ch & 0x3f)); } return ret; } static int check_tty(struct tty_struct *tty) { if (!tty) { pr_warn("%s: I/O error, deactivating speakup\n", spk_ttyio_synth->long_name); /* No synth any more, so nobody will restart TTYs, and we thus * need to do it ourselves. Now that there is no synth we can * let application flood anyway */ spk_ttyio_synth->alive = 0; speakup_start_ttys(); return 1; } return 0; } static void spk_ttyio_send_xchar(char ch) { mutex_lock(&speakup_tty_mutex); if (check_tty(speakup_tty)) { mutex_unlock(&speakup_tty_mutex); return; } if (speakup_tty->ops->send_xchar) speakup_tty->ops->send_xchar(speakup_tty, ch); mutex_unlock(&speakup_tty_mutex); } static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) { mutex_lock(&speakup_tty_mutex); if (check_tty(speakup_tty)) { mutex_unlock(&speakup_tty_mutex); return; } if (speakup_tty->ops->tiocmset) speakup_tty->ops->tiocmset(speakup_tty, set, clear); mutex_unlock(&speakup_tty_mutex); } static int spk_ttyio_wait_for_xmitr(struct spk_synth *in_synth) { return 1; } static unsigned char ttyio_in(int timeout) { struct spk_ldisc_data *ldisc_data = speakup_tty->disc_data; char rv; if (wait_for_completion_timeout(&ldisc_data->completion, usecs_to_jiffies(timeout)) == 0) { if (timeout) pr_warn("spk_ttyio: timeout (%d) while waiting for input\n", timeout); return 0xff; } rv = ldisc_data->buf; /* Make sure we have read buf before we set buf_free to let * the producer overwrite it */ mb(); ldisc_data->buf_free = true; /* Let TTY push more characters */ tty_schedule_flip(speakup_tty->port); return rv; } static unsigned char spk_ttyio_in(void) { return ttyio_in(SPK_SYNTH_TIMEOUT); } static unsigned char spk_ttyio_in_nowait(void) { u8 rv = ttyio_in(0); return (rv == 0xff) ? 0 : rv; } static void spk_ttyio_flush_buffer(void) { mutex_lock(&speakup_tty_mutex); if (check_tty(speakup_tty)) { mutex_unlock(&speakup_tty_mutex); return; } if (speakup_tty->ops->flush_buffer) speakup_tty->ops->flush_buffer(speakup_tty); mutex_unlock(&speakup_tty_mutex); } int spk_ttyio_synth_probe(struct spk_synth *synth) { int rv = spk_ttyio_initialise_ldisc(synth); if (rv) return rv; synth->alive = 1; spk_ttyio_synth = synth; return 0; } EXPORT_SYMBOL_GPL(spk_ttyio_synth_probe); void spk_ttyio_release(void) { if (!speakup_tty) return; tty_lock(speakup_tty); if (speakup_tty->ops->close) speakup_tty->ops->close(speakup_tty, NULL); tty_ldisc_flush(speakup_tty); tty_unlock(speakup_tty); tty_kclose(speakup_tty); } EXPORT_SYMBOL_GPL(spk_ttyio_release); const char *spk_ttyio_synth_immediate(struct spk_synth *synth, const char *buff) { u_char ch; while ((ch = *buff)) { if (ch == '\n') ch = synth->procspeech; if (tty_write_room(speakup_tty) < 1 || !synth->io_ops->synth_out(synth, ch)) return buff; buff++; } return NULL; } EXPORT_SYMBOL_GPL(spk_ttyio_synth_immediate);
./CrossVul/dataset_final_sorted/CWE-763/c/good_4453_0
crossvul-cpp_data_bad_4273_0
/* ** $Id: lgc.c $ ** Garbage Collector ** See Copyright Notice in lua.h */ #define lgc_c #define LUA_CORE #include "lprefix.h" #include <stdio.h> #include <string.h> #include "lua.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" /* ** Maximum number of elements to sweep in each single step. ** (Large enough to dissipate fixed overheads but small enough ** to allow small steps for the collector.) */ #define GCSWEEPMAX 100 /* ** Maximum number of finalizers to call in each single step. */ #define GCFINMAX 10 /* ** Cost of calling one finalizer. */ #define GCFINALIZECOST 50 /* ** The equivalent, in bytes, of one unit of "work" (visiting a slot, ** sweeping an object, etc.) */ #define WORK2MEM sizeof(TValue) /* ** macro to adjust 'pause': 'pause' is actually used like ** 'pause / PAUSEADJ' (value chosen by tests) */ #define PAUSEADJ 100 /* mask to erase all color bits (plus gen. related stuff) */ #define maskcolors (~(bitmask(BLACKBIT) | WHITEBITS | AGEBITS)) /* macro to erase all color bits then sets only the current white bit */ #define makewhite(g,x) \ (x->marked = cast_byte((x->marked & maskcolors) | luaC_white(g))) #define white2gray(x) resetbits(x->marked, WHITEBITS) #define black2gray(x) resetbit(x->marked, BLACKBIT) #define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) #define keyiswhite(n) (keyiscollectable(n) && iswhite(gckey(n))) #define checkconsistency(obj) \ lua_longassert(!iscollectable(obj) || righttt(obj)) /* ** Protected access to objects in values */ #define gcvalueN(o) (iscollectable(o) ? gcvalue(o) : NULL) #define markvalue(g,o) { checkconsistency(o); \ if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); } #define markkey(g, n) { if keyiswhite(n) reallymarkobject(g,gckey(n)); } #define markobject(g,t) { if (iswhite(t)) reallymarkobject(g, obj2gco(t)); } /* ** mark an object that can be NULL (either because it is really optional, ** or it was stripped as debug info, or inside an uncompleted structure) */ #define markobjectN(g,t) { if (t) markobject(g,t); } static void reallymarkobject (global_State *g, GCObject *o); static lu_mem atomic (lua_State *L); static void entersweep (lua_State *L); /* ** {====================================================== ** Generic functions ** ======================================================= */ /* ** one after last element in a hash array */ #define gnodelast(h) gnode(h, cast_sizet(sizenode(h))) static GCObject **getgclist (GCObject *o) { switch (o->tt) { case LUA_VTABLE: return &gco2t(o)->gclist; case LUA_VLCL: return &gco2lcl(o)->gclist; case LUA_VCCL: return &gco2ccl(o)->gclist; case LUA_VTHREAD: return &gco2th(o)->gclist; case LUA_VPROTO: return &gco2p(o)->gclist; case LUA_VUSERDATA: { Udata *u = gco2u(o); lua_assert(u->nuvalue > 0); return &u->gclist; } default: lua_assert(0); return 0; } } /* ** Link a collectable object 'o' with a known type into list pointed by 'p'. */ #define linkgclist(o,p) ((o)->gclist = (p), (p) = obj2gco(o)) /* ** Link a generic collectable object 'o' into list pointed by 'p'. */ #define linkobjgclist(o,p) (*getgclist(o) = (p), (p) = obj2gco(o)) /* ** Clear keys for empty entries in tables. If entry is empty ** and its key is not marked, mark its entry as dead. This allows the ** collection of the key, but keeps its entry in the table (its removal ** could break a chain). The main feature of a dead key is that it must ** be different from any other value, to do not disturb searches. ** Other places never manipulate dead keys, because its associated empty ** value is enough to signal that the entry is logically empty. */ static void clearkey (Node *n) { lua_assert(isempty(gval(n))); if (keyiswhite(n)) setdeadkey(n); /* unused and unmarked key; remove it */ } /* ** tells whether a key or value can be cleared from a weak ** table. Non-collectable objects are never removed from weak ** tables. Strings behave as 'values', so are never removed too. for ** other objects: if really collected, cannot keep them; for objects ** being finalized, keep them in keys, but not in values */ static int iscleared (global_State *g, const GCObject *o) { if (o == NULL) return 0; /* non-collectable value */ else if (novariant(o->tt) == LUA_TSTRING) { markobject(g, o); /* strings are 'values', so are never weak */ return 0; } else return iswhite(o); } /* ** barrier that moves collector forward, that is, mark the white object ** 'v' being pointed by the black object 'o'. (If in sweep phase, clear ** the black object to white [sweep it] to avoid other barrier calls for ** this same object.) In the generational mode, 'v' must also become ** old, if 'o' is old; however, it cannot be changed directly to OLD, ** because it may still point to non-old objects. So, it is marked as ** OLD0. In the next cycle it will become OLD1, and in the next it ** will finally become OLD (regular old). */ void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) { global_State *g = G(L); lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); if (keepinvariant(g)) { /* must keep invariant? */ reallymarkobject(g, v); /* restore invariant */ if (isold(o)) { lua_assert(!isold(v)); /* white object could not be old */ setage(v, G_OLD0); /* restore generational invariant */ } } else { /* sweep phase */ lua_assert(issweepphase(g)); makewhite(g, o); /* mark main obj. as white to avoid other barriers */ } } /* ** barrier that moves collector backward, that is, mark the black object ** pointing to a white object as gray again. */ void luaC_barrierback_ (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(isblack(o) && !isdead(g, o)); lua_assert(g->gckind != KGC_GEN || (isold(o) && getage(o) != G_TOUCHED1)); if (getage(o) != G_TOUCHED2) /* not already in gray list? */ linkobjgclist(o, g->grayagain); /* link it in 'grayagain' */ black2gray(o); /* make object gray (again) */ setage(o, G_TOUCHED1); /* touched in current cycle */ } void luaC_fix (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(g->allgc == o); /* object must be 1st in 'allgc' list! */ white2gray(o); /* they will be gray forever */ setage(o, G_OLD); /* and old forever */ g->allgc = o->next; /* remove object from 'allgc' list */ o->next = g->fixedgc; /* link it to 'fixedgc' list */ g->fixedgc = o; } /* ** create a new collectable object (with given type and size) and link ** it to 'allgc' list. */ GCObject *luaC_newobj (lua_State *L, int tt, size_t sz) { global_State *g = G(L); GCObject *o = cast(GCObject *, luaM_newobject(L, novariant(tt), sz)); o->marked = luaC_white(g); o->tt = tt; o->next = g->allgc; g->allgc = o; return o; } /* }====================================================== */ /* ** {====================================================== ** Mark functions ** ======================================================= */ /* ** Mark an object. Userdata, strings, and closed upvalues are visited ** and turned black here. Other objects are marked gray and added ** to appropriate list to be visited (and turned black) later. (Open ** upvalues are already linked in 'headuv' list. They are kept gray ** to avoid barriers, as their values will be revisited by the thread.) */ static void reallymarkobject (global_State *g, GCObject *o) { white2gray(o); switch (o->tt) { case LUA_VSHRSTR: case LUA_VLNGSTR: { gray2black(o); break; } case LUA_VUPVAL: { UpVal *uv = gco2upv(o); if (!upisopen(uv)) /* open upvalues are kept gray */ gray2black(o); markvalue(g, uv->v); /* mark its content */ break; } case LUA_VUSERDATA: { Udata *u = gco2u(o); if (u->nuvalue == 0) { /* no user values? */ markobjectN(g, u->metatable); /* mark its metatable */ gray2black(o); /* nothing else to mark */ break; } /* else... */ } /* FALLTHROUGH */ case LUA_VLCL: case LUA_VCCL: case LUA_VTABLE: case LUA_VTHREAD: case LUA_VPROTO: { linkobjgclist(o, g->gray); break; } default: lua_assert(0); break; } } /* ** mark metamethods for basic types */ static void markmt (global_State *g) { int i; for (i=0; i < LUA_NUMTAGS; i++) markobjectN(g, g->mt[i]); } /* ** mark all objects in list of being-finalized */ static lu_mem markbeingfnz (global_State *g) { GCObject *o; lu_mem count = 0; for (o = g->tobefnz; o != NULL; o = o->next) { count++; markobject(g, o); } return count; } /* ** Mark all values stored in marked open upvalues from non-marked threads. ** (Values from marked threads were already marked when traversing the ** thread.) Remove from the list threads that no longer have upvalues and ** not-marked threads. */ static int remarkupvals (global_State *g) { lua_State *thread; lua_State **p = &g->twups; int work = 0; while ((thread = *p) != NULL) { work++; lua_assert(!isblack(thread)); /* threads are never black */ if (isgray(thread) && thread->openupval != NULL) p = &thread->twups; /* keep marked thread with upvalues in the list */ else { /* thread is not marked or without upvalues */ UpVal *uv; *p = thread->twups; /* remove thread from the list */ thread->twups = thread; /* mark that it is out of list */ for (uv = thread->openupval; uv != NULL; uv = uv->u.open.next) { work++; if (!iswhite(uv)) /* upvalue already visited? */ markvalue(g, uv->v); /* mark its value */ } } } return work; } /* ** mark root set and reset all gray lists, to start a new collection */ static void restartcollection (global_State *g) { g->gray = g->grayagain = NULL; g->weak = g->allweak = g->ephemeron = NULL; markobject(g, g->mainthread); markvalue(g, &g->l_registry); markmt(g); markbeingfnz(g); /* mark any finalizing object left from previous cycle */ } /* }====================================================== */ /* ** {====================================================== ** Traverse functions ** ======================================================= */ /* ** Traverse a table with weak values and link it to proper list. During ** propagate phase, keep it in 'grayagain' list, to be revisited in the ** atomic phase. In the atomic phase, if table has any white value, ** put it in 'weak' list, to be cleared. */ static void traverseweakvalue (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); /* if there is array part, assume it may have white values (it is not worth traversing it now just to check) */ int hasclears = (h->alimit > 0); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); if (!hasclears && iscleared(g, gcvalueN(gval(n)))) /* a white value? */ hasclears = 1; /* table will have to be cleared */ } } if (g->gcstate == GCSatomic && hasclears) linkgclist(h, g->weak); /* has to be cleared later */ else linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ } /* ** Traverse an ephemeron table and link it to proper list. Returns true ** iff any object was marked during this traversal (which implies that ** convergence has to continue). During propagation phase, keep table ** in 'grayagain' list, to be visited again in the atomic phase. In ** the atomic phase, if table has any white->white entry, it has to ** be revisited during ephemeron convergence (as that key may turn ** black). Otherwise, if it has any white key, table has to be cleared ** (in the atomic phase). In generational mode, it (like all visited ** tables) must be kept in some gray list for post-processing. */ static int traverseephemeron (global_State *g, Table *h, int inv) { int marked = 0; /* true if an object is marked in this traversal */ int hasclears = 0; /* true if table has white keys */ int hasww = 0; /* true if table has entry "white-key -> white-value" */ unsigned int i; unsigned int asize = luaH_realasize(h); unsigned int nsize = sizenode(h); /* traverse array part */ for (i = 0; i < asize; i++) { if (valiswhite(&h->array[i])) { marked = 1; reallymarkobject(g, gcvalue(&h->array[i])); } } /* traverse hash part; if 'inv', traverse descending (see 'convergeephemerons') */ for (i = 0; i < nsize; i++) { Node *n = inv ? gnode(h, nsize - 1 - i) : gnode(h, i); if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else if (iscleared(g, gckeyN(n))) { /* key is not marked (yet)? */ hasclears = 1; /* table must be cleared */ if (valiswhite(gval(n))) /* value not marked yet? */ hasww = 1; /* white-white entry */ } else if (valiswhite(gval(n))) { /* value not marked yet? */ marked = 1; reallymarkobject(g, gcvalue(gval(n))); /* mark it now */ } } /* link table into proper list */ if (g->gcstate == GCSpropagate) linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ else if (hasww) /* table has white->white entries? */ linkgclist(h, g->ephemeron); /* have to propagate again */ else if (hasclears) /* table has white keys? */ linkgclist(h, g->allweak); /* may have to clean white keys */ else if (g->gckind == KGC_GEN) linkgclist(h, g->grayagain); /* keep it in some list */ else gray2black(h); return marked; } static void traversestrongtable (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) /* traverse array part */ markvalue(g, &h->array[i]); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); markvalue(g, gval(n)); } } if (g->gckind == KGC_GEN) { linkgclist(h, g->grayagain); /* keep it in some gray list */ black2gray(h); } } static lu_mem traversetable (global_State *g, Table *h) { const char *weakkey, *weakvalue; const TValue *mode = gfasttm(g, h->metatable, TM_MODE); markobjectN(g, h->metatable); if (mode && ttisstring(mode) && /* is there a weak mode? */ (cast_void(weakkey = strchr(svalue(mode), 'k')), cast_void(weakvalue = strchr(svalue(mode), 'v')), (weakkey || weakvalue))) { /* is really weak? */ black2gray(h); /* keep table gray */ if (!weakkey) /* strong keys? */ traverseweakvalue(g, h); else if (!weakvalue) /* strong values? */ traverseephemeron(g, h, 0); else /* all weak */ linkgclist(h, g->allweak); /* nothing to traverse now */ } else /* not weak */ traversestrongtable(g, h); return 1 + h->alimit + 2 * allocsizenode(h); } static int traverseudata (global_State *g, Udata *u) { int i; markobjectN(g, u->metatable); /* mark its metatable */ for (i = 0; i < u->nuvalue; i++) markvalue(g, &u->uv[i].uv); if (g->gckind == KGC_GEN) { linkgclist(u, g->grayagain); /* keep it in some gray list */ black2gray(u); } return 1 + u->nuvalue; } /* ** Traverse a prototype. (While a prototype is being build, its ** arrays can be larger than needed; the extra slots are filled with ** NULL, so the use of 'markobjectN') */ static int traverseproto (global_State *g, Proto *f) { int i; markobjectN(g, f->source); for (i = 0; i < f->sizek; i++) /* mark literals */ markvalue(g, &f->k[i]); for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */ markobjectN(g, f->upvalues[i].name); for (i = 0; i < f->sizep; i++) /* mark nested protos */ markobjectN(g, f->p[i]); for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */ markobjectN(g, f->locvars[i].varname); return 1 + f->sizek + f->sizeupvalues + f->sizep + f->sizelocvars; } static int traverseCclosure (global_State *g, CClosure *cl) { int i; for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */ markvalue(g, &cl->upvalue[i]); return 1 + cl->nupvalues; } /* ** Traverse a Lua closure, marking its prototype and its upvalues. ** (Both can be NULL while closure is being created.) */ static int traverseLclosure (global_State *g, LClosure *cl) { int i; markobjectN(g, cl->p); /* mark its prototype */ for (i = 0; i < cl->nupvalues; i++) { /* visit its upvalues */ UpVal *uv = cl->upvals[i]; markobjectN(g, uv); /* mark upvalue */ } return 1 + cl->nupvalues; } /* ** Traverse a thread, marking the elements in the stack up to its top ** and cleaning the rest of the stack in the final traversal. ** That ensures that the entire stack have valid (non-dead) objects. */ static int traversethread (global_State *g, lua_State *th) { UpVal *uv; StkId o = th->stack; if (o == NULL) return 1; /* stack not completely built yet */ lua_assert(g->gcstate == GCSatomic || th->openupval == NULL || isintwups(th)); for (; o < th->top; o++) /* mark live elements in the stack */ markvalue(g, s2v(o)); for (uv = th->openupval; uv != NULL; uv = uv->u.open.next) markobject(g, uv); /* open upvalues cannot be collected */ if (g->gcstate == GCSatomic) { /* final traversal? */ StkId lim = th->stack + th->stacksize; /* real end of stack */ for (; o < lim; o++) /* clear not-marked stack slice */ setnilvalue(s2v(o)); /* 'remarkupvals' may have removed thread from 'twups' list */ if (!isintwups(th) && th->openupval != NULL) { th->twups = g->twups; /* link it back to the list */ g->twups = th; } } else if (!g->gcemergency) luaD_shrinkstack(th); /* do not change stack in emergency cycle */ return 1 + th->stacksize; } /* ** traverse one gray object, turning it to black (except for threads, ** which are always gray). */ static lu_mem propagatemark (global_State *g) { GCObject *o = g->gray; gray2black(o); g->gray = *getgclist(o); /* remove from 'gray' list */ switch (o->tt) { case LUA_VTABLE: return traversetable(g, gco2t(o)); case LUA_VUSERDATA: return traverseudata(g, gco2u(o)); case LUA_VLCL: return traverseLclosure(g, gco2lcl(o)); case LUA_VCCL: return traverseCclosure(g, gco2ccl(o)); case LUA_VPROTO: return traverseproto(g, gco2p(o)); case LUA_VTHREAD: { lua_State *th = gco2th(o); linkgclist(th, g->grayagain); /* insert into 'grayagain' list */ black2gray(o); return traversethread(g, th); } default: lua_assert(0); return 0; } } static lu_mem propagateall (global_State *g) { lu_mem tot = 0; while (g->gray) tot += propagatemark(g); return tot; } /* ** Traverse all ephemeron tables propagating marks from keys to values. ** Repeat until it converges, that is, nothing new is marked. 'dir' ** inverts the direction of the traversals, trying to speed up ** convergence on chains in the same table. ** */ static void convergeephemerons (global_State *g) { int changed; int dir = 0; do { GCObject *w; GCObject *next = g->ephemeron; /* get ephemeron list */ g->ephemeron = NULL; /* tables may return to this list when traversed */ changed = 0; while ((w = next) != NULL) { /* for each ephemeron table */ next = gco2t(w)->gclist; /* list is rebuilt during loop */ if (traverseephemeron(g, gco2t(w), dir)) { /* marked some value? */ propagateall(g); /* propagate changes */ changed = 1; /* will have to revisit all ephemeron tables */ } } dir = !dir; /* invert direction next time */ } while (changed); /* repeat until no more changes */ } /* }====================================================== */ /* ** {====================================================== ** Sweep Functions ** ======================================================= */ /* ** clear entries with unmarked keys from all weaktables in list 'l' */ static void clearbykeys (global_State *g, GCObject *l) { for (; l; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *limit = gnodelast(h); Node *n; for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gckeyN(n))) /* unmarked key? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } /* ** clear entries with unmarked values from all weaktables in list 'l' up ** to element 'f' */ static void clearbyvalues (global_State *g, GCObject *l, GCObject *f) { for (; l != f; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) { TValue *o = &h->array[i]; if (iscleared(g, gcvalueN(o))) /* value was collected? */ setempty(o); /* remove entry */ } for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gcvalueN(gval(n)))) /* unmarked value? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } static void freeupval (lua_State *L, UpVal *uv) { if (upisopen(uv)) luaF_unlinkupval(uv); luaM_free(L, uv); } static void freeobj (lua_State *L, GCObject *o) { switch (o->tt) { case LUA_VPROTO: luaF_freeproto(L, gco2p(o)); break; case LUA_VUPVAL: freeupval(L, gco2upv(o)); break; case LUA_VLCL: luaM_freemem(L, o, sizeLclosure(gco2lcl(o)->nupvalues)); break; case LUA_VCCL: luaM_freemem(L, o, sizeCclosure(gco2ccl(o)->nupvalues)); break; case LUA_VTABLE: luaH_free(L, gco2t(o)); break; case LUA_VTHREAD: luaE_freethread(L, gco2th(o)); break; case LUA_VUSERDATA: { Udata *u = gco2u(o); luaM_freemem(L, o, sizeudata(u->nuvalue, u->len)); break; } case LUA_VSHRSTR: luaS_remove(L, gco2ts(o)); /* remove it from hash table */ luaM_freemem(L, o, sizelstring(gco2ts(o)->shrlen)); break; case LUA_VLNGSTR: luaM_freemem(L, o, sizelstring(gco2ts(o)->u.lnglen)); break; default: lua_assert(0); } } /* ** sweep at most 'countin' elements from a list of GCObjects erasing dead ** objects, where a dead object is one marked with the old (non current) ** white; change all non-dead objects back to white, preparing for next ** collection cycle. Return where to continue the traversal or NULL if ** list is finished. ('*countout' gets the number of elements traversed.) */ static GCObject **sweeplist (lua_State *L, GCObject **p, int countin, int *countout) { global_State *g = G(L); int ow = otherwhite(g); int i; int white = luaC_white(g); /* current white */ for (i = 0; *p != NULL && i < countin; i++) { GCObject *curr = *p; int marked = curr->marked; if (isdeadm(ow, marked)) { /* is 'curr' dead? */ *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* change mark to 'white' */ curr->marked = cast_byte((marked & maskcolors) | white); p = &curr->next; /* go to next element */ } } if (countout) *countout = i; /* number of elements traversed */ return (*p == NULL) ? NULL : p; } /* ** sweep a list until a live object (or end of list) */ static GCObject **sweeptolive (lua_State *L, GCObject **p) { GCObject **old = p; do { p = sweeplist(L, p, 1, NULL); } while (p == old); return p; } /* }====================================================== */ /* ** {====================================================== ** Finalization ** ======================================================= */ /* ** If possible, shrink string table. */ static void checkSizes (lua_State *L, global_State *g) { if (!g->gcemergency) { if (g->strt.nuse < g->strt.size / 4) { /* string table too big? */ l_mem olddebt = g->GCdebt; luaS_resize(L, g->strt.size / 2); g->GCestimate += g->GCdebt - olddebt; /* correct estimate */ } } } /* ** Get the next udata to be finalized from the 'tobefnz' list, and ** link it back into the 'allgc' list. */ static GCObject *udata2finalize (global_State *g) { GCObject *o = g->tobefnz; /* get first element */ lua_assert(tofinalize(o)); g->tobefnz = o->next; /* remove it from 'tobefnz' list */ o->next = g->allgc; /* return it to 'allgc' list */ g->allgc = o; resetbit(o->marked, FINALIZEDBIT); /* object is "normal" again */ if (issweepphase(g)) makewhite(g, o); /* "sweep" object */ return o; } static void dothecall (lua_State *L, void *ud) { UNUSED(ud); luaD_callnoyield(L, L->top - 2, 0); } static void GCTM (lua_State *L) { global_State *g = G(L); const TValue *tm; TValue v; lua_assert(!g->gcemergency); setgcovalue(L, &v, udata2finalize(g)); tm = luaT_gettmbyobj(L, &v, TM_GC); if (!notm(tm)) { /* is there a finalizer? */ int status; lu_byte oldah = L->allowhook; int running = g->gcrunning; L->allowhook = 0; /* stop debug hooks during GC metamethod */ g->gcrunning = 0; /* avoid GC steps */ setobj2s(L, L->top++, tm); /* push finalizer... */ setobj2s(L, L->top++, &v); /* ... and its argument */ L->ci->callstatus |= CIST_FIN; /* will run a finalizer */ status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0); L->ci->callstatus &= ~CIST_FIN; /* not running a finalizer anymore */ L->allowhook = oldah; /* restore hooks */ g->gcrunning = running; /* restore state */ if (unlikely(status != LUA_OK)) { /* error while running __gc? */ luaE_warnerror(L, "__gc metamethod"); L->top--; /* pops error object */ } } } /* ** Call a few finalizers */ static int runafewfinalizers (lua_State *L, int n) { global_State *g = G(L); int i; for (i = 0; i < n && g->tobefnz; i++) GCTM(L); /* call one finalizer */ return i; } /* ** call all pending finalizers */ static void callallpendingfinalizers (lua_State *L) { global_State *g = G(L); while (g->tobefnz) GCTM(L); } /* ** find last 'next' field in list 'p' list (to add elements in its end) */ static GCObject **findlast (GCObject **p) { while (*p != NULL) p = &(*p)->next; return p; } /* ** Move all unreachable objects (or 'all' objects) that need ** finalization from list 'finobj' to list 'tobefnz' (to be finalized). ** (Note that objects after 'finobjold' cannot be white, so they ** don't need to be traversed. In incremental mode, 'finobjold' is NULL, ** so the whole list is traversed.) */ static void separatetobefnz (global_State *g, int all) { GCObject *curr; GCObject **p = &g->finobj; GCObject **lastnext = findlast(&g->tobefnz); while ((curr = *p) != g->finobjold) { /* traverse all finalizable objects */ lua_assert(tofinalize(curr)); if (!(iswhite(curr) || all)) /* not being collected? */ p = &curr->next; /* don't bother with it */ else { if (curr == g->finobjsur) /* removing 'finobjsur'? */ g->finobjsur = curr->next; /* correct it */ *p = curr->next; /* remove 'curr' from 'finobj' list */ curr->next = *lastnext; /* link at the end of 'tobefnz' list */ *lastnext = curr; lastnext = &curr->next; } } } /* ** if object 'o' has a finalizer, remove it from 'allgc' list (must ** search the list to find it) and link it in 'finobj' list. */ void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) { global_State *g = G(L); if (tofinalize(o) || /* obj. is already marked... */ gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */ return; /* nothing to be done */ else { /* move 'o' to 'finobj' list */ GCObject **p; if (issweepphase(g)) { makewhite(g, o); /* "sweep" object 'o' */ if (g->sweepgc == &o->next) /* should not remove 'sweepgc' object */ g->sweepgc = sweeptolive(L, g->sweepgc); /* change 'sweepgc' */ } else { /* correct pointers into 'allgc' list, if needed */ if (o == g->survival) g->survival = o->next; if (o == g->old) g->old = o->next; if (o == g->reallyold) g->reallyold = o->next; } /* search for pointer pointing to 'o' */ for (p = &g->allgc; *p != o; p = &(*p)->next) { /* empty */ } *p = o->next; /* remove 'o' from 'allgc' list */ o->next = g->finobj; /* link it in 'finobj' list */ g->finobj = o; l_setbit(o->marked, FINALIZEDBIT); /* mark it as such */ } } /* }====================================================== */ /* ** {====================================================== ** Generational Collector ** ======================================================= */ static void setpause (global_State *g); /* mask to erase all color bits, not changing gen-related stuff */ #define maskgencolors (~(bitmask(BLACKBIT) | WHITEBITS)) /* ** Sweep a list of objects, deleting dead ones and turning ** the non dead to old (without changing their colors). */ static void sweep2old (lua_State *L, GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(isdead(G(L), curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* all surviving objects become old */ setage(curr, G_OLD); p = &curr->next; /* go to next element */ } } } /* ** Sweep for generational mode. Delete dead objects. (Because the ** collection is not incremental, there are no "new white" objects ** during the sweep. So, any white object must be dead.) For ** non-dead objects, advance their ages and clear the color of ** new objects. (Old objects keep their colors.) */ static GCObject **sweepgen (lua_State *L, global_State *g, GCObject **p, GCObject *limit) { static const lu_byte nextage[] = { G_SURVIVAL, /* from G_NEW */ G_OLD1, /* from G_SURVIVAL */ G_OLD1, /* from G_OLD0 */ G_OLD, /* from G_OLD1 */ G_OLD, /* from G_OLD (do not change) */ G_TOUCHED1, /* from G_TOUCHED1 (do not change) */ G_TOUCHED2 /* from G_TOUCHED2 (do not change) */ }; int white = luaC_white(g); GCObject *curr; while ((curr = *p) != limit) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(!isold(curr) && isdead(g, curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* correct mark and age */ if (getage(curr) == G_NEW) curr->marked = cast_byte((curr->marked & maskgencolors) | white); setage(curr, nextage[getage(curr)]); p = &curr->next; /* go to next element */ } } return p; } /* ** Traverse a list making all its elements white and clearing their ** age. */ static void whitelist (global_State *g, GCObject *p) { int white = luaC_white(g); for (; p != NULL; p = p->next) p->marked = cast_byte((p->marked & maskcolors) | white); } /* ** Correct a list of gray objects. ** Because this correction is done after sweeping, young objects might ** be turned white and still be in the list. They are only removed. ** For tables and userdata, advance 'touched1' to 'touched2'; 'touched2' ** objects become regular old and are removed from the list. ** For threads, just remove white ones from the list. */ static GCObject **correctgraylist (GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { switch (curr->tt) { case LUA_VTABLE: case LUA_VUSERDATA: { GCObject **next = getgclist(curr); if (getage(curr) == G_TOUCHED1) { /* touched in this cycle? */ lua_assert(isgray(curr)); gray2black(curr); /* make it black, for next barrier */ changeage(curr, G_TOUCHED1, G_TOUCHED2); p = next; /* go to next element */ } else { /* not touched in this cycle */ if (!iswhite(curr)) { /* not white? */ lua_assert(isold(curr)); if (getage(curr) == G_TOUCHED2) /* advance from G_TOUCHED2... */ changeage(curr, G_TOUCHED2, G_OLD); /* ... to G_OLD */ gray2black(curr); /* make it black */ } /* else, object is white: just remove it from this list */ *p = *next; /* remove 'curr' from gray list */ } break; } case LUA_VTHREAD: { lua_State *th = gco2th(curr); lua_assert(!isblack(th)); if (iswhite(th)) /* new object? */ *p = th->gclist; /* remove from gray list */ else /* old threads remain gray */ p = &th->gclist; /* go to next element */ break; } default: lua_assert(0); /* nothing more could be gray here */ } } return p; } /* ** Correct all gray lists, coalescing them into 'grayagain'. */ static void correctgraylists (global_State *g) { GCObject **list = correctgraylist(&g->grayagain); *list = g->weak; g->weak = NULL; list = correctgraylist(list); *list = g->allweak; g->allweak = NULL; list = correctgraylist(list); *list = g->ephemeron; g->ephemeron = NULL; correctgraylist(list); } /* ** Mark 'OLD1' objects when starting a new young collection. ** Gray objects are already in some gray list, and so will be visited ** in the atomic step. */ static void markold (global_State *g, GCObject *from, GCObject *to) { GCObject *p; for (p = from; p != to; p = p->next) { if (getage(p) == G_OLD1) { lua_assert(!iswhite(p)); if (isblack(p)) { black2gray(p); /* should be '2white', but gray works too */ reallymarkobject(g, p); } } } } /* ** Finish a young-generation collection. */ static void finishgencycle (lua_State *L, global_State *g) { correctgraylists(g); checkSizes(L, g); g->gcstate = GCSpropagate; /* skip restart */ if (!g->gcemergency) callallpendingfinalizers(L); } /* ** Does a young collection. First, mark 'OLD1' objects. Then does the ** atomic step. Then, sweep all lists and advance pointers. Finally, ** finish the collection. */ static void youngcollection (lua_State *L, global_State *g) { GCObject **psurvival; /* to point to first non-dead survival object */ lua_assert(g->gcstate == GCSpropagate); markold(g, g->allgc, g->reallyold); markold(g, g->finobj, g->finobjrold); atomic(L); /* sweep nursery and get a pointer to its last live element */ psurvival = sweepgen(L, g, &g->allgc, g->survival); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->reallyold); g->reallyold = g->old; g->old = *psurvival; /* 'survival' survivals are old now */ g->survival = g->allgc; /* all news are survivals */ /* repeat for 'finobj' lists */ psurvival = sweepgen(L, g, &g->finobj, g->finobjsur); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->finobjrold); g->finobjrold = g->finobjold; g->finobjold = *psurvival; /* 'survival' survivals are old now */ g->finobjsur = g->finobj; /* all news are survivals */ sweepgen(L, g, &g->tobefnz, NULL); finishgencycle(L, g); } static void atomic2gen (lua_State *L, global_State *g) { /* sweep all elements making them old */ sweep2old(L, &g->allgc); /* everything alive now is old */ g->reallyold = g->old = g->survival = g->allgc; /* repeat for 'finobj' lists */ sweep2old(L, &g->finobj); g->finobjrold = g->finobjold = g->finobjsur = g->finobj; sweep2old(L, &g->tobefnz); g->gckind = KGC_GEN; g->lastatomic = 0; g->GCestimate = gettotalbytes(g); /* base for memory control */ finishgencycle(L, g); } /* ** Enter generational mode. Must go until the end of an atomic cycle ** to ensure that all threads and weak tables are in the gray lists. ** Then, turn all objects into old and finishes the collection. */ static lu_mem entergen (lua_State *L, global_State *g) { lu_mem numobjs; luaC_runtilstate(L, bitmask(GCSpause)); /* prepare to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ numobjs = atomic(L); /* propagates all and then do the atomic stuff */ atomic2gen(L, g); return numobjs; } /* ** Enter incremental mode. Turn all objects white, make all ** intermediate lists point to NULL (to avoid invalid pointers), ** and go to the pause state. */ static void enterinc (global_State *g) { whitelist(g, g->allgc); g->reallyold = g->old = g->survival = NULL; whitelist(g, g->finobj); whitelist(g, g->tobefnz); g->finobjrold = g->finobjold = g->finobjsur = NULL; g->gcstate = GCSpause; g->gckind = KGC_INC; g->lastatomic = 0; } /* ** Change collector mode to 'newmode'. */ void luaC_changemode (lua_State *L, int newmode) { global_State *g = G(L); if (newmode != g->gckind) { if (newmode == KGC_GEN) /* entering generational mode? */ entergen(L, g); else enterinc(g); /* entering incremental mode */ } g->lastatomic = 0; } /* ** Does a full collection in generational mode. */ static lu_mem fullgen (lua_State *L, global_State *g) { enterinc(g); return entergen(L, g); } /* ** Set debt for the next minor collection, which will happen when ** memory grows 'genminormul'%. */ static void setminordebt (global_State *g) { luaE_setdebt(g, -(cast(l_mem, (gettotalbytes(g) / 100)) * g->genminormul)); } /* ** Does a major collection after last collection was a "bad collection". ** ** When the program is building a big structure, it allocates lots of ** memory but generates very little garbage. In those scenarios, ** the generational mode just wastes time doing small collections, and ** major collections are frequently what we call a "bad collection", a ** collection that frees too few objects. To avoid the cost of switching ** between generational mode and the incremental mode needed for full ** (major) collections, the collector tries to stay in incremental mode ** after a bad collection, and to switch back to generational mode only ** after a "good" collection (one that traverses less than 9/8 objects ** of the previous one). ** The collector must choose whether to stay in incremental mode or to ** switch back to generational mode before sweeping. At this point, it ** does not know the real memory in use, so it cannot use memory to ** decide whether to return to generational mode. Instead, it uses the ** number of objects traversed (returned by 'atomic') as a proxy. The ** field 'g->lastatomic' keeps this count from the last collection. ** ('g->lastatomic != 0' also means that the last collection was bad.) */ static void stepgenfull (lua_State *L, global_State *g) { lu_mem newatomic; /* count of traversed objects */ lu_mem lastatomic = g->lastatomic; /* count from last collection */ if (g->gckind == KGC_GEN) /* still in generational mode? */ enterinc(g); /* enter incremental mode */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ newatomic = atomic(L); /* mark everybody */ if (newatomic < lastatomic + (lastatomic >> 3)) { /* good collection? */ atomic2gen(L, g); /* return to generational mode */ setminordebt(g); } else { /* another bad collection; stay in incremental mode */ g->GCestimate = gettotalbytes(g); /* first estimate */; entersweep(L); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); g->lastatomic = newatomic; } } /* ** Does a generational "step". ** Usually, this means doing a minor collection and setting the debt to ** make another collection when memory grows 'genminormul'% larger. ** ** However, there are exceptions. If memory grows 'genmajormul'% ** larger than it was at the end of the last major collection (kept ** in 'g->GCestimate'), the function does a major collection. At the ** end, it checks whether the major collection was able to free a ** decent amount of memory (at least half the growth in memory since ** previous major collection). If so, the collector keeps its state, ** and the next collection will probably be minor again. Otherwise, ** we have what we call a "bad collection". In that case, set the field ** 'g->lastatomic' to signal that fact, so that the next collection will ** go to 'stepgenfull'. ** ** 'GCdebt <= 0' means an explicit call to GC step with "size" zero; ** in that case, do a minor collection. */ static void genstep (lua_State *L, global_State *g) { if (g->lastatomic != 0) /* last collection was a bad one? */ stepgenfull(L, g); /* do a full step */ else { lu_mem majorbase = g->GCestimate; /* memory after last major collection */ lu_mem majorinc = (majorbase / 100) * getgcparam(g->genmajormul); if (g->GCdebt > 0 && gettotalbytes(g) > majorbase + majorinc) { lu_mem numobjs = fullgen(L, g); /* do a major collection */ if (gettotalbytes(g) < majorbase + (majorinc / 2)) { /* collected at least half of memory growth since last major collection; keep doing minor collections */ setminordebt(g); } else { /* bad collection */ g->lastatomic = numobjs; /* signal that last collection was bad */ setpause(g); /* do a long wait for next (major) collection */ } } else { /* regular case; do a minor collection */ youngcollection(L, g); setminordebt(g); g->GCestimate = majorbase; /* preserve base value */ } } lua_assert(isdecGCmodegen(g)); } /* }====================================================== */ /* ** {====================================================== ** GC control ** ======================================================= */ /* ** Set the "time" to wait before starting a new GC cycle; cycle will ** start when memory use hits the threshold of ('estimate' * pause / ** PAUSEADJ). (Division by 'estimate' should be OK: it cannot be zero, ** because Lua cannot even start with less than PAUSEADJ bytes). */ static void setpause (global_State *g) { l_mem threshold, debt; int pause = getgcparam(g->gcpause); l_mem estimate = g->GCestimate / PAUSEADJ; /* adjust 'estimate' */ lua_assert(estimate > 0); threshold = (pause < MAX_LMEM / estimate) /* overflow? */ ? estimate * pause /* no overflow */ : MAX_LMEM; /* overflow; truncate to maximum */ debt = gettotalbytes(g) - threshold; if (debt > 0) debt = 0; luaE_setdebt(g, debt); } /* ** Enter first sweep phase. ** The call to 'sweeptolive' makes the pointer point to an object ** inside the list (instead of to the header), so that the real sweep do ** not need to skip objects created between "now" and the start of the ** real sweep. */ static void entersweep (lua_State *L) { global_State *g = G(L); g->gcstate = GCSswpallgc; lua_assert(g->sweepgc == NULL); g->sweepgc = sweeptolive(L, &g->allgc); } /* ** Delete all objects in list 'p' until (but not including) object ** 'limit'. */ static void deletelist (lua_State *L, GCObject *p, GCObject *limit) { while (p != limit) { GCObject *next = p->next; freeobj(L, p); p = next; } } /* ** Call all finalizers of the objects in the given Lua state, and ** then free all objects, except for the main thread. */ void luaC_freeallobjects (lua_State *L) { global_State *g = G(L); luaC_changemode(L, KGC_INC); separatetobefnz(g, 1); /* separate all objects with finalizers */ lua_assert(g->finobj == NULL); callallpendingfinalizers(L); deletelist(L, g->allgc, obj2gco(g->mainthread)); deletelist(L, g->finobj, NULL); deletelist(L, g->fixedgc, NULL); /* collect fixed objects */ lua_assert(g->strt.nuse == 0); } static lu_mem atomic (lua_State *L) { global_State *g = G(L); lu_mem work = 0; GCObject *origweak, *origall; GCObject *grayagain = g->grayagain; /* save original list */ g->grayagain = NULL; lua_assert(g->ephemeron == NULL && g->weak == NULL); lua_assert(!iswhite(g->mainthread)); g->gcstate = GCSatomic; markobject(g, L); /* mark running thread */ /* registry and global metatables may be changed by API */ markvalue(g, &g->l_registry); markmt(g); /* mark global metatables */ work += propagateall(g); /* empties 'gray' list */ /* remark occasional upvalues of (maybe) dead threads */ work += remarkupvals(g); work += propagateall(g); /* propagate changes */ g->gray = grayagain; work += propagateall(g); /* traverse 'grayagain' list */ convergeephemerons(g); /* at this point, all strongly accessible objects are marked. */ /* Clear values from weak tables, before checking finalizers */ clearbyvalues(g, g->weak, NULL); clearbyvalues(g, g->allweak, NULL); origweak = g->weak; origall = g->allweak; separatetobefnz(g, 0); /* separate objects to be finalized */ work += markbeingfnz(g); /* mark objects that will be finalized */ work += propagateall(g); /* remark, to propagate 'resurrection' */ convergeephemerons(g); /* at this point, all resurrected objects are marked. */ /* remove dead objects from weak tables */ clearbykeys(g, g->ephemeron); /* clear keys from all ephemeron tables */ clearbykeys(g, g->allweak); /* clear keys from all 'allweak' tables */ /* clear values from resurrected weak tables */ clearbyvalues(g, g->weak, origweak); clearbyvalues(g, g->allweak, origall); luaS_clearcache(g); g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */ lua_assert(g->gray == NULL); return work; /* estimate of slots marked by 'atomic' */ } static int sweepstep (lua_State *L, global_State *g, int nextstate, GCObject **nextlist) { if (g->sweepgc) { l_mem olddebt = g->GCdebt; int count; g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX, &count); g->GCestimate += g->GCdebt - olddebt; /* update estimate */ return count; } else { /* enter next state */ g->gcstate = nextstate; g->sweepgc = nextlist; return 0; /* no work done */ } } static lu_mem singlestep (lua_State *L) { global_State *g = G(L); switch (g->gcstate) { case GCSpause: { restartcollection(g); g->gcstate = GCSpropagate; return 1; } case GCSpropagate: { if (g->gray == NULL) { /* no more gray objects? */ g->gcstate = GCSenteratomic; /* finish propagate phase */ return 0; } else return propagatemark(g); /* traverse one gray object */ } case GCSenteratomic: { lu_mem work = atomic(L); /* work is what was traversed by 'atomic' */ entersweep(L); g->GCestimate = gettotalbytes(g); /* first estimate */; return work; } case GCSswpallgc: { /* sweep "regular" objects */ return sweepstep(L, g, GCSswpfinobj, &g->finobj); } case GCSswpfinobj: { /* sweep objects with finalizers */ return sweepstep(L, g, GCSswptobefnz, &g->tobefnz); } case GCSswptobefnz: { /* sweep objects to be finalized */ return sweepstep(L, g, GCSswpend, NULL); } case GCSswpend: { /* finish sweeps */ checkSizes(L, g); g->gcstate = GCScallfin; return 0; } case GCScallfin: { /* call remaining finalizers */ if (g->tobefnz && !g->gcemergency) { int n = runafewfinalizers(L, GCFINMAX); return n * GCFINALIZECOST; } else { /* emergency mode or no more finalizers */ g->gcstate = GCSpause; /* finish collection */ return 0; } } default: lua_assert(0); return 0; } } /* ** advances the garbage collector until it reaches a state allowed ** by 'statemask' */ void luaC_runtilstate (lua_State *L, int statesmask) { global_State *g = G(L); while (!testbit(statesmask, g->gcstate)) singlestep(L); } /* ** Performs a basic incremental step. The debt and step size are ** converted from bytes to "units of work"; then the function loops ** running single steps until adding that many units of work or ** finishing a cycle (pause state). Finally, it sets the debt that ** controls when next step will be performed. */ static void incstep (lua_State *L, global_State *g) { int stepmul = (getgcparam(g->gcstepmul) | 1); /* avoid division by 0 */ l_mem debt = (g->GCdebt / WORK2MEM) * stepmul; l_mem stepsize = (g->gcstepsize <= log2maxs(l_mem)) ? ((cast(l_mem, 1) << g->gcstepsize) / WORK2MEM) * stepmul : MAX_LMEM; /* overflow; keep maximum value */ do { /* repeat until pause or enough "credit" (negative debt) */ lu_mem work = singlestep(L); /* perform one single step */ debt -= work; } while (debt > -stepsize && g->gcstate != GCSpause); if (g->gcstate == GCSpause) setpause(g); /* pause until next cycle */ else { debt = (debt / stepmul) * WORK2MEM; /* convert 'work units' to bytes */ luaE_setdebt(g, debt); } } /* ** performs a basic GC step if collector is running */ void luaC_step (lua_State *L) { global_State *g = G(L); lua_assert(!g->gcemergency); if (g->gcrunning) { /* running? */ if(isdecGCmodegen(g)) genstep(L, g); else incstep(L, g); } } /* ** Perform a full collection in incremental mode. ** Before running the collection, check 'keepinvariant'; if it is true, ** there may be some objects marked as black, so the collector has ** to sweep all objects to turn them back to white (as white has not ** changed, nothing will be collected). */ static void fullinc (lua_State *L, global_State *g) { if (keepinvariant(g)) /* black objects? */ entersweep(L); /* sweep everything to turn them back to white */ /* finish any pending sweep phase to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpause)); luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */ /* estimate must be correct after a full GC cycle */ lua_assert(g->GCestimate == gettotalbytes(g)); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); } /* ** Performs a full GC cycle; if 'isemergency', set a flag to avoid ** some operations which could change the interpreter state in some ** unexpected ways (running finalizers and shrinking some structures). */ void luaC_fullgc (lua_State *L, int isemergency) { global_State *g = G(L); lua_assert(!g->gcemergency); g->gcemergency = isemergency; /* set flag */ if (g->gckind == KGC_INC) fullinc(L, g); else fullgen(L, g); g->gcemergency = 0; } /* }====================================================== */
./CrossVul/dataset_final_sorted/CWE-763/c/bad_4273_0
crossvul-cpp_data_good_2997_0
/* * pcrypt - Parallel crypto wrapper. * * Copyright (C) 2009 secunet Security Networks AG * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <crypto/algapi.h> #include <crypto/internal/aead.h> #include <linux/atomic.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/kobject.h> #include <linux/cpu.h> #include <crypto/pcrypt.h> struct padata_pcrypt { struct padata_instance *pinst; struct workqueue_struct *wq; /* * Cpumask for callback CPUs. It should be * equal to serial cpumask of corresponding padata instance, * so it is updated when padata notifies us about serial * cpumask change. * * cb_cpumask is protected by RCU. This fact prevents us from * using cpumask_var_t directly because the actual type of * cpumsak_var_t depends on kernel configuration(particularly on * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration * cpumask_var_t may be either a pointer to the struct cpumask * or a variable allocated on the stack. Thus we can not safely use * cpumask_var_t with RCU operations such as rcu_assign_pointer or * rcu_dereference. So cpumask_var_t is wrapped with struct * pcrypt_cpumask which makes possible to use it with RCU. */ struct pcrypt_cpumask { cpumask_var_t mask; } *cb_cpumask; struct notifier_block nblock; }; static struct padata_pcrypt pencrypt; static struct padata_pcrypt pdecrypt; static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { struct crypto_aead_spawn spawn; atomic_t tfm_count; }; struct pcrypt_aead_ctx { struct crypto_aead *child; unsigned int cb_cpu; }; static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, struct padata_pcrypt *pcrypt) { unsigned int cpu_index, cpu, i; struct pcrypt_cpumask *cpumask; cpu = *cb_cpu; rcu_read_lock_bh(); cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); if (cpumask_test_cpu(cpu, cpumask->mask)) goto out; if (!cpumask_weight(cpumask->mask)) goto out; cpu_index = cpu % cpumask_weight(cpumask->mask); cpu = cpumask_first(cpumask->mask); for (i = 0; i < cpu_index; i++) cpu = cpumask_next(cpu, cpumask->mask); *cb_cpu = cpu; out: rcu_read_unlock_bh(); return padata_do_parallel(pcrypt->pinst, padata, cpu); } static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setkey(ctx->child, key, keylen); } static int pcrypt_aead_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setauthsize(ctx->child, authsize); } static void pcrypt_aead_serial(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); aead_request_complete(req->base.data, padata->info); } static void pcrypt_aead_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); } static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_encrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); } static int pcrypt_aead_encrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_enc; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; return err; } static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_decrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); } static int pcrypt_aead_decrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_dec; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); if (!err) return -EINPROGRESS; return err; } static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { int cpu, cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *cipher; cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + sizeof(struct aead_request) + crypto_aead_reqsize(cipher)); return 0; } static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static void pcrypt_free(struct aead_instance *inst) { struct pcrypt_instance_ctx *ctx = aead_instance_ctx(inst); crypto_drop_aead(&ctx->spawn); kfree(inst); } static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_priority = alg->cra_priority + 100; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; return 0; } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) { struct pcrypt_instance_ctx *ctx; struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; const char *name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); name = crypto_attr_alg_name(tb[1]); if (IS_ERR(name)) return PTR_ERR(name); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); err = crypto_grab_aead(&ctx->spawn, name, 0, 0); if (err) goto out_free_inst; alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); if (err) goto out_drop_aead; inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); inst->alg.init = pcrypt_aead_init_tfm; inst->alg.exit = pcrypt_aead_exit_tfm; inst->alg.setkey = pcrypt_aead_setkey; inst->alg.setauthsize = pcrypt_aead_setauthsize; inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; inst->free = pcrypt_free; err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; out: return err; out_drop_aead: crypto_drop_aead(&ctx->spawn); out_free_inst: kfree(inst); goto out; } static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); } return -EINVAL; } static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { struct padata_pcrypt *pcrypt; struct pcrypt_cpumask *new_mask, *old_mask; struct padata_cpumask *cpumask = (struct padata_cpumask *)data; if (!(val & PADATA_CPU_SERIAL)) return 0; pcrypt = container_of(self, struct padata_pcrypt, nblock); new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); if (!new_mask) return -ENOMEM; if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { kfree(new_mask); return -ENOMEM; } old_mask = pcrypt->cb_cpumask; cpumask_copy(new_mask->mask, cpumask->cbcpu); rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); synchronize_rcu_bh(); free_cpumask_var(old_mask->mask); kfree(old_mask); return 0; } static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) { int ret; pinst->kobj.kset = pcrypt_kset; ret = kobject_add(&pinst->kobj, NULL, name); if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); return ret; } static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, const char *name) { int ret = -ENOMEM; struct pcrypt_cpumask *mask; get_online_cpus(); pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name); if (!pcrypt->wq) goto err; pcrypt->pinst = padata_alloc_possible(pcrypt->wq); if (!pcrypt->pinst) goto err_destroy_workqueue; mask = kmalloc(sizeof(*mask), GFP_KERNEL); if (!mask) goto err_free_padata; if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { kfree(mask); goto err_free_padata; } cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); rcu_assign_pointer(pcrypt->cb_cpumask, mask); pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); if (ret) goto err_free_cpumask; ret = pcrypt_sysfs_add(pcrypt->pinst, name); if (ret) goto err_unregister_notifier; put_online_cpus(); return ret; err_unregister_notifier: padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); err_free_cpumask: free_cpumask_var(mask->mask); kfree(mask); err_free_padata: padata_free(pcrypt->pinst); err_destroy_workqueue: destroy_workqueue(pcrypt->wq); err: put_online_cpus(); return ret; } static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) { free_cpumask_var(pcrypt->cb_cpumask->mask); kfree(pcrypt->cb_cpumask); padata_stop(pcrypt->pinst); padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); destroy_workqueue(pcrypt->wq); padata_free(pcrypt->pinst); } static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, .module = THIS_MODULE, }; static int __init pcrypt_init(void) { int err = -ENOMEM; pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); if (!pcrypt_kset) goto err; err = pcrypt_init_padata(&pencrypt, "pencrypt"); if (err) goto err_unreg_kset; err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); if (err) goto err_deinit_pencrypt; padata_start(pencrypt.pinst); padata_start(pdecrypt.pinst); return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: pcrypt_fini_padata(&pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: return err; } static void __exit pcrypt_exit(void) { pcrypt_fini_padata(&pencrypt); pcrypt_fini_padata(&pdecrypt); kset_unregister(pcrypt_kset); crypto_unregister_template(&pcrypt_tmpl); } module_init(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("Parallel crypto wrapper"); MODULE_ALIAS_CRYPTO("pcrypt");
./CrossVul/dataset_final_sorted/CWE-763/c/good_2997_0
crossvul-cpp_data_bad_2997_0
/* * pcrypt - Parallel crypto wrapper. * * Copyright (C) 2009 secunet Security Networks AG * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. */ #include <crypto/algapi.h> #include <crypto/internal/aead.h> #include <linux/atomic.h> #include <linux/err.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/kobject.h> #include <linux/cpu.h> #include <crypto/pcrypt.h> struct padata_pcrypt { struct padata_instance *pinst; struct workqueue_struct *wq; /* * Cpumask for callback CPUs. It should be * equal to serial cpumask of corresponding padata instance, * so it is updated when padata notifies us about serial * cpumask change. * * cb_cpumask is protected by RCU. This fact prevents us from * using cpumask_var_t directly because the actual type of * cpumsak_var_t depends on kernel configuration(particularly on * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration * cpumask_var_t may be either a pointer to the struct cpumask * or a variable allocated on the stack. Thus we can not safely use * cpumask_var_t with RCU operations such as rcu_assign_pointer or * rcu_dereference. So cpumask_var_t is wrapped with struct * pcrypt_cpumask which makes possible to use it with RCU. */ struct pcrypt_cpumask { cpumask_var_t mask; } *cb_cpumask; struct notifier_block nblock; }; static struct padata_pcrypt pencrypt; static struct padata_pcrypt pdecrypt; static struct kset *pcrypt_kset; struct pcrypt_instance_ctx { struct crypto_aead_spawn spawn; atomic_t tfm_count; }; struct pcrypt_aead_ctx { struct crypto_aead *child; unsigned int cb_cpu; }; static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu, struct padata_pcrypt *pcrypt) { unsigned int cpu_index, cpu, i; struct pcrypt_cpumask *cpumask; cpu = *cb_cpu; rcu_read_lock_bh(); cpumask = rcu_dereference_bh(pcrypt->cb_cpumask); if (cpumask_test_cpu(cpu, cpumask->mask)) goto out; if (!cpumask_weight(cpumask->mask)) goto out; cpu_index = cpu % cpumask_weight(cpumask->mask); cpu = cpumask_first(cpumask->mask); for (i = 0; i < cpu_index; i++) cpu = cpumask_next(cpu, cpumask->mask); *cb_cpu = cpu; out: rcu_read_unlock_bh(); return padata_do_parallel(pcrypt->pinst, padata, cpu); } static int pcrypt_aead_setkey(struct crypto_aead *parent, const u8 *key, unsigned int keylen) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setkey(ctx->child, key, keylen); } static int pcrypt_aead_setauthsize(struct crypto_aead *parent, unsigned int authsize) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent); return crypto_aead_setauthsize(ctx->child, authsize); } static void pcrypt_aead_serial(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); aead_request_complete(req->base.data, padata->info); } static void pcrypt_aead_done(struct crypto_async_request *areq, int err) { struct aead_request *req = areq->data; struct pcrypt_request *preq = aead_request_ctx(req); struct padata_priv *padata = pcrypt_request_padata(preq); padata->info = err; req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; padata_do_serial(padata); } static void pcrypt_aead_enc(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_encrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); } static int pcrypt_aead_encrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_enc; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt); if (!err) return -EINPROGRESS; return err; } static void pcrypt_aead_dec(struct padata_priv *padata) { struct pcrypt_request *preq = pcrypt_padata_request(padata); struct aead_request *req = pcrypt_request_ctx(preq); padata->info = crypto_aead_decrypt(req); if (padata->info == -EINPROGRESS) return; padata_do_serial(padata); } static int pcrypt_aead_decrypt(struct aead_request *req) { int err; struct pcrypt_request *preq = aead_request_ctx(req); struct aead_request *creq = pcrypt_request_ctx(preq); struct padata_priv *padata = pcrypt_request_padata(preq); struct crypto_aead *aead = crypto_aead_reqtfm(req); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead); u32 flags = aead_request_flags(req); memset(padata, 0, sizeof(struct padata_priv)); padata->parallel = pcrypt_aead_dec; padata->serial = pcrypt_aead_serial; aead_request_set_tfm(creq, ctx->child); aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP, pcrypt_aead_done, req); aead_request_set_crypt(creq, req->src, req->dst, req->cryptlen, req->iv); aead_request_set_ad(creq, req->assoclen); err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt); if (!err) return -EINPROGRESS; return err; } static int pcrypt_aead_init_tfm(struct crypto_aead *tfm) { int cpu, cpu_index; struct aead_instance *inst = aead_alg_instance(tfm); struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst); struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); struct crypto_aead *cipher; cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) % cpumask_weight(cpu_online_mask); ctx->cb_cpu = cpumask_first(cpu_online_mask); for (cpu = 0; cpu < cpu_index; cpu++) ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask); cipher = crypto_spawn_aead(&ictx->spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); ctx->child = cipher; crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) + sizeof(struct aead_request) + crypto_aead_reqsize(cipher)); return 0; } static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm) { struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm); crypto_free_aead(ctx->child); } static int pcrypt_init_instance(struct crypto_instance *inst, struct crypto_alg *alg) { if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME, "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME) return -ENAMETOOLONG; memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME); inst->alg.cra_priority = alg->cra_priority + 100; inst->alg.cra_blocksize = alg->cra_blocksize; inst->alg.cra_alignmask = alg->cra_alignmask; return 0; } static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb, u32 type, u32 mask) { struct pcrypt_instance_ctx *ctx; struct crypto_attr_type *algt; struct aead_instance *inst; struct aead_alg *alg; const char *name; int err; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); name = crypto_attr_alg_name(tb[1]); if (IS_ERR(name)) return PTR_ERR(name); inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL); if (!inst) return -ENOMEM; ctx = aead_instance_ctx(inst); crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst)); err = crypto_grab_aead(&ctx->spawn, name, 0, 0); if (err) goto out_free_inst; alg = crypto_spawn_aead_alg(&ctx->spawn); err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base); if (err) goto out_drop_aead; inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC; inst->alg.ivsize = crypto_aead_alg_ivsize(alg); inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg); inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx); inst->alg.init = pcrypt_aead_init_tfm; inst->alg.exit = pcrypt_aead_exit_tfm; inst->alg.setkey = pcrypt_aead_setkey; inst->alg.setauthsize = pcrypt_aead_setauthsize; inst->alg.encrypt = pcrypt_aead_encrypt; inst->alg.decrypt = pcrypt_aead_decrypt; err = aead_register_instance(tmpl, inst); if (err) goto out_drop_aead; out: return err; out_drop_aead: crypto_drop_aead(&ctx->spawn); out_free_inst: kfree(inst); goto out; } static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb) { struct crypto_attr_type *algt; algt = crypto_get_attr_type(tb); if (IS_ERR(algt)) return PTR_ERR(algt); switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) { case CRYPTO_ALG_TYPE_AEAD: return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask); } return -EINVAL; } static void pcrypt_free(struct crypto_instance *inst) { struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst); crypto_drop_aead(&ctx->spawn); kfree(inst); } static int pcrypt_cpumask_change_notify(struct notifier_block *self, unsigned long val, void *data) { struct padata_pcrypt *pcrypt; struct pcrypt_cpumask *new_mask, *old_mask; struct padata_cpumask *cpumask = (struct padata_cpumask *)data; if (!(val & PADATA_CPU_SERIAL)) return 0; pcrypt = container_of(self, struct padata_pcrypt, nblock); new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL); if (!new_mask) return -ENOMEM; if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) { kfree(new_mask); return -ENOMEM; } old_mask = pcrypt->cb_cpumask; cpumask_copy(new_mask->mask, cpumask->cbcpu); rcu_assign_pointer(pcrypt->cb_cpumask, new_mask); synchronize_rcu_bh(); free_cpumask_var(old_mask->mask); kfree(old_mask); return 0; } static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name) { int ret; pinst->kobj.kset = pcrypt_kset; ret = kobject_add(&pinst->kobj, NULL, name); if (!ret) kobject_uevent(&pinst->kobj, KOBJ_ADD); return ret; } static int pcrypt_init_padata(struct padata_pcrypt *pcrypt, const char *name) { int ret = -ENOMEM; struct pcrypt_cpumask *mask; get_online_cpus(); pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name); if (!pcrypt->wq) goto err; pcrypt->pinst = padata_alloc_possible(pcrypt->wq); if (!pcrypt->pinst) goto err_destroy_workqueue; mask = kmalloc(sizeof(*mask), GFP_KERNEL); if (!mask) goto err_free_padata; if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) { kfree(mask); goto err_free_padata; } cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask); rcu_assign_pointer(pcrypt->cb_cpumask, mask); pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify; ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); if (ret) goto err_free_cpumask; ret = pcrypt_sysfs_add(pcrypt->pinst, name); if (ret) goto err_unregister_notifier; put_online_cpus(); return ret; err_unregister_notifier: padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); err_free_cpumask: free_cpumask_var(mask->mask); kfree(mask); err_free_padata: padata_free(pcrypt->pinst); err_destroy_workqueue: destroy_workqueue(pcrypt->wq); err: put_online_cpus(); return ret; } static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt) { free_cpumask_var(pcrypt->cb_cpumask->mask); kfree(pcrypt->cb_cpumask); padata_stop(pcrypt->pinst); padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock); destroy_workqueue(pcrypt->wq); padata_free(pcrypt->pinst); } static struct crypto_template pcrypt_tmpl = { .name = "pcrypt", .create = pcrypt_create, .free = pcrypt_free, .module = THIS_MODULE, }; static int __init pcrypt_init(void) { int err = -ENOMEM; pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj); if (!pcrypt_kset) goto err; err = pcrypt_init_padata(&pencrypt, "pencrypt"); if (err) goto err_unreg_kset; err = pcrypt_init_padata(&pdecrypt, "pdecrypt"); if (err) goto err_deinit_pencrypt; padata_start(pencrypt.pinst); padata_start(pdecrypt.pinst); return crypto_register_template(&pcrypt_tmpl); err_deinit_pencrypt: pcrypt_fini_padata(&pencrypt); err_unreg_kset: kset_unregister(pcrypt_kset); err: return err; } static void __exit pcrypt_exit(void) { pcrypt_fini_padata(&pencrypt); pcrypt_fini_padata(&pdecrypt); kset_unregister(pcrypt_kset); crypto_unregister_template(&pcrypt_tmpl); } module_init(pcrypt_init); module_exit(pcrypt_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>"); MODULE_DESCRIPTION("Parallel crypto wrapper"); MODULE_ALIAS_CRYPTO("pcrypt");
./CrossVul/dataset_final_sorted/CWE-763/c/bad_2997_0
crossvul-cpp_data_good_4273_0
/* ** $Id: lgc.c $ ** Garbage Collector ** See Copyright Notice in lua.h */ #define lgc_c #define LUA_CORE #include "lprefix.h" #include <stdio.h> #include <string.h> #include "lua.h" #include "ldebug.h" #include "ldo.h" #include "lfunc.h" #include "lgc.h" #include "lmem.h" #include "lobject.h" #include "lstate.h" #include "lstring.h" #include "ltable.h" #include "ltm.h" /* ** Maximum number of elements to sweep in each single step. ** (Large enough to dissipate fixed overheads but small enough ** to allow small steps for the collector.) */ #define GCSWEEPMAX 100 /* ** Maximum number of finalizers to call in each single step. */ #define GCFINMAX 10 /* ** Cost of calling one finalizer. */ #define GCFINALIZECOST 50 /* ** The equivalent, in bytes, of one unit of "work" (visiting a slot, ** sweeping an object, etc.) */ #define WORK2MEM sizeof(TValue) /* ** macro to adjust 'pause': 'pause' is actually used like ** 'pause / PAUSEADJ' (value chosen by tests) */ #define PAUSEADJ 100 /* mask to erase all color bits (plus gen. related stuff) */ #define maskcolors (~(bitmask(BLACKBIT) | WHITEBITS | AGEBITS)) /* macro to erase all color bits then sets only the current white bit */ #define makewhite(g,x) \ (x->marked = cast_byte((x->marked & maskcolors) | luaC_white(g))) #define white2gray(x) resetbits(x->marked, WHITEBITS) #define black2gray(x) resetbit(x->marked, BLACKBIT) #define valiswhite(x) (iscollectable(x) && iswhite(gcvalue(x))) #define keyiswhite(n) (keyiscollectable(n) && iswhite(gckey(n))) #define checkconsistency(obj) \ lua_longassert(!iscollectable(obj) || righttt(obj)) /* ** Protected access to objects in values */ #define gcvalueN(o) (iscollectable(o) ? gcvalue(o) : NULL) #define markvalue(g,o) { checkconsistency(o); \ if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); } #define markkey(g, n) { if keyiswhite(n) reallymarkobject(g,gckey(n)); } #define markobject(g,t) { if (iswhite(t)) reallymarkobject(g, obj2gco(t)); } /* ** mark an object that can be NULL (either because it is really optional, ** or it was stripped as debug info, or inside an uncompleted structure) */ #define markobjectN(g,t) { if (t) markobject(g,t); } static void reallymarkobject (global_State *g, GCObject *o); static lu_mem atomic (lua_State *L); static void entersweep (lua_State *L); /* ** {====================================================== ** Generic functions ** ======================================================= */ /* ** one after last element in a hash array */ #define gnodelast(h) gnode(h, cast_sizet(sizenode(h))) static GCObject **getgclist (GCObject *o) { switch (o->tt) { case LUA_VTABLE: return &gco2t(o)->gclist; case LUA_VLCL: return &gco2lcl(o)->gclist; case LUA_VCCL: return &gco2ccl(o)->gclist; case LUA_VTHREAD: return &gco2th(o)->gclist; case LUA_VPROTO: return &gco2p(o)->gclist; case LUA_VUSERDATA: { Udata *u = gco2u(o); lua_assert(u->nuvalue > 0); return &u->gclist; } default: lua_assert(0); return 0; } } /* ** Link a collectable object 'o' with a known type into list pointed by 'p'. */ #define linkgclist(o,p) ((o)->gclist = (p), (p) = obj2gco(o)) /* ** Link a generic collectable object 'o' into list pointed by 'p'. */ #define linkobjgclist(o,p) (*getgclist(o) = (p), (p) = obj2gco(o)) /* ** Clear keys for empty entries in tables. If entry is empty ** and its key is not marked, mark its entry as dead. This allows the ** collection of the key, but keeps its entry in the table (its removal ** could break a chain). The main feature of a dead key is that it must ** be different from any other value, to do not disturb searches. ** Other places never manipulate dead keys, because its associated empty ** value is enough to signal that the entry is logically empty. */ static void clearkey (Node *n) { lua_assert(isempty(gval(n))); if (keyiswhite(n)) setdeadkey(n); /* unused and unmarked key; remove it */ } /* ** tells whether a key or value can be cleared from a weak ** table. Non-collectable objects are never removed from weak ** tables. Strings behave as 'values', so are never removed too. for ** other objects: if really collected, cannot keep them; for objects ** being finalized, keep them in keys, but not in values */ static int iscleared (global_State *g, const GCObject *o) { if (o == NULL) return 0; /* non-collectable value */ else if (novariant(o->tt) == LUA_TSTRING) { markobject(g, o); /* strings are 'values', so are never weak */ return 0; } else return iswhite(o); } /* ** Barrier that moves collector forward, that is, marks the white object ** 'v' being pointed by the black object 'o'. In the generational ** mode, 'v' must also become old, if 'o' is old; however, it cannot ** be changed directly to OLD, because it may still point to non-old ** objects. So, it is marked as OLD0. In the next cycle it will become ** OLD1, and in the next it will finally become OLD (regular old). By ** then, any object it points to will also be old. If called in the ** incremental sweep phase, it clears the black object to white (sweep ** it) to avoid other barrier calls for this same object. (That cannot ** be done is generational mode, as its sweep does not distinguish ** whites from deads.) */ void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) { global_State *g = G(L); lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o)); if (keepinvariant(g)) { /* must keep invariant? */ reallymarkobject(g, v); /* restore invariant */ if (isold(o)) { lua_assert(!isold(v)); /* white object could not be old */ setage(v, G_OLD0); /* restore generational invariant */ } } else { /* sweep phase */ lua_assert(issweepphase(g)); if (g->gckind == KGC_INC) /* incremental mode? */ makewhite(g, o); /* mark 'o' as white to avoid other barriers */ } } /* ** barrier that moves collector backward, that is, mark the black object ** pointing to a white object as gray again. */ void luaC_barrierback_ (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(isblack(o) && !isdead(g, o)); lua_assert(g->gckind != KGC_GEN || (isold(o) && getage(o) != G_TOUCHED1)); if (getage(o) != G_TOUCHED2) /* not already in gray list? */ linkobjgclist(o, g->grayagain); /* link it in 'grayagain' */ black2gray(o); /* make object gray (again) */ setage(o, G_TOUCHED1); /* touched in current cycle */ } void luaC_fix (lua_State *L, GCObject *o) { global_State *g = G(L); lua_assert(g->allgc == o); /* object must be 1st in 'allgc' list! */ white2gray(o); /* they will be gray forever */ setage(o, G_OLD); /* and old forever */ g->allgc = o->next; /* remove object from 'allgc' list */ o->next = g->fixedgc; /* link it to 'fixedgc' list */ g->fixedgc = o; } /* ** create a new collectable object (with given type and size) and link ** it to 'allgc' list. */ GCObject *luaC_newobj (lua_State *L, int tt, size_t sz) { global_State *g = G(L); GCObject *o = cast(GCObject *, luaM_newobject(L, novariant(tt), sz)); o->marked = luaC_white(g); o->tt = tt; o->next = g->allgc; g->allgc = o; return o; } /* }====================================================== */ /* ** {====================================================== ** Mark functions ** ======================================================= */ /* ** Mark an object. Userdata, strings, and closed upvalues are visited ** and turned black here. Other objects are marked gray and added ** to appropriate list to be visited (and turned black) later. (Open ** upvalues are already linked in 'headuv' list. They are kept gray ** to avoid barriers, as their values will be revisited by the thread.) */ static void reallymarkobject (global_State *g, GCObject *o) { white2gray(o); switch (o->tt) { case LUA_VSHRSTR: case LUA_VLNGSTR: { gray2black(o); break; } case LUA_VUPVAL: { UpVal *uv = gco2upv(o); if (!upisopen(uv)) /* open upvalues are kept gray */ gray2black(o); markvalue(g, uv->v); /* mark its content */ break; } case LUA_VUSERDATA: { Udata *u = gco2u(o); if (u->nuvalue == 0) { /* no user values? */ markobjectN(g, u->metatable); /* mark its metatable */ gray2black(o); /* nothing else to mark */ break; } /* else... */ } /* FALLTHROUGH */ case LUA_VLCL: case LUA_VCCL: case LUA_VTABLE: case LUA_VTHREAD: case LUA_VPROTO: { linkobjgclist(o, g->gray); break; } default: lua_assert(0); break; } } /* ** mark metamethods for basic types */ static void markmt (global_State *g) { int i; for (i=0; i < LUA_NUMTAGS; i++) markobjectN(g, g->mt[i]); } /* ** mark all objects in list of being-finalized */ static lu_mem markbeingfnz (global_State *g) { GCObject *o; lu_mem count = 0; for (o = g->tobefnz; o != NULL; o = o->next) { count++; markobject(g, o); } return count; } /* ** For each non-marked thread, simulates a barrier between each open ** upvalue and its value. (If the thread is collected, the value will be ** assigned to the upvalue, but then it can be too late for the barrier ** to act. The "barrier" does not need to check colors: A non-marked ** thread must be young; upvalues cannot be older than their threads; so ** any visited upvalue must be young too.) Also removes the thread from ** the list, as it was already visited. Removes also threads with no ** upvalues, as they have nothing to be checked. (If the thread gets an ** upvalue later, it will be linked in the list again.) */ static int remarkupvals (global_State *g) { lua_State *thread; lua_State **p = &g->twups; int work = 0; while ((thread = *p) != NULL) { work++; lua_assert(!isblack(thread)); /* threads are never black */ if (isgray(thread) && thread->openupval != NULL) p = &thread->twups; /* keep marked thread with upvalues in the list */ else { /* thread is not marked or without upvalues */ UpVal *uv; lua_assert(!isold(thread) || thread->openupval == NULL); *p = thread->twups; /* remove thread from the list */ thread->twups = thread; /* mark that it is out of list */ for (uv = thread->openupval; uv != NULL; uv = uv->u.open.next) { lua_assert(getage(uv) <= getage(thread)); work++; if (!iswhite(uv)) /* upvalue already visited? */ markvalue(g, uv->v); /* mark its value */ } } } return work; } /* ** mark root set and reset all gray lists, to start a new collection */ static void restartcollection (global_State *g) { g->gray = g->grayagain = NULL; g->weak = g->allweak = g->ephemeron = NULL; markobject(g, g->mainthread); markvalue(g, &g->l_registry); markmt(g); markbeingfnz(g); /* mark any finalizing object left from previous cycle */ } /* }====================================================== */ /* ** {====================================================== ** Traverse functions ** ======================================================= */ /* ** Traverse a table with weak values and link it to proper list. During ** propagate phase, keep it in 'grayagain' list, to be revisited in the ** atomic phase. In the atomic phase, if table has any white value, ** put it in 'weak' list, to be cleared. */ static void traverseweakvalue (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); /* if there is array part, assume it may have white values (it is not worth traversing it now just to check) */ int hasclears = (h->alimit > 0); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); if (!hasclears && iscleared(g, gcvalueN(gval(n)))) /* a white value? */ hasclears = 1; /* table will have to be cleared */ } } if (g->gcstate == GCSatomic && hasclears) linkgclist(h, g->weak); /* has to be cleared later */ else linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ } /* ** Traverse an ephemeron table and link it to proper list. Returns true ** iff any object was marked during this traversal (which implies that ** convergence has to continue). During propagation phase, keep table ** in 'grayagain' list, to be visited again in the atomic phase. In ** the atomic phase, if table has any white->white entry, it has to ** be revisited during ephemeron convergence (as that key may turn ** black). Otherwise, if it has any white key, table has to be cleared ** (in the atomic phase). In generational mode, it (like all visited ** tables) must be kept in some gray list for post-processing. */ static int traverseephemeron (global_State *g, Table *h, int inv) { int marked = 0; /* true if an object is marked in this traversal */ int hasclears = 0; /* true if table has white keys */ int hasww = 0; /* true if table has entry "white-key -> white-value" */ unsigned int i; unsigned int asize = luaH_realasize(h); unsigned int nsize = sizenode(h); /* traverse array part */ for (i = 0; i < asize; i++) { if (valiswhite(&h->array[i])) { marked = 1; reallymarkobject(g, gcvalue(&h->array[i])); } } /* traverse hash part; if 'inv', traverse descending (see 'convergeephemerons') */ for (i = 0; i < nsize; i++) { Node *n = inv ? gnode(h, nsize - 1 - i) : gnode(h, i); if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else if (iscleared(g, gckeyN(n))) { /* key is not marked (yet)? */ hasclears = 1; /* table must be cleared */ if (valiswhite(gval(n))) /* value not marked yet? */ hasww = 1; /* white-white entry */ } else if (valiswhite(gval(n))) { /* value not marked yet? */ marked = 1; reallymarkobject(g, gcvalue(gval(n))); /* mark it now */ } } /* link table into proper list */ if (g->gcstate == GCSpropagate) linkgclist(h, g->grayagain); /* must retraverse it in atomic phase */ else if (hasww) /* table has white->white entries? */ linkgclist(h, g->ephemeron); /* have to propagate again */ else if (hasclears) /* table has white keys? */ linkgclist(h, g->allweak); /* may have to clean white keys */ else if (g->gckind == KGC_GEN) linkgclist(h, g->grayagain); /* keep it in some list */ else gray2black(h); return marked; } static void traversestrongtable (global_State *g, Table *h) { Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) /* traverse array part */ markvalue(g, &h->array[i]); for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */ if (isempty(gval(n))) /* entry is empty? */ clearkey(n); /* clear its key */ else { lua_assert(!keyisnil(n)); markkey(g, n); markvalue(g, gval(n)); } } if (g->gckind == KGC_GEN) { linkgclist(h, g->grayagain); /* keep it in some gray list */ black2gray(h); } } static lu_mem traversetable (global_State *g, Table *h) { const char *weakkey, *weakvalue; const TValue *mode = gfasttm(g, h->metatable, TM_MODE); markobjectN(g, h->metatable); if (mode && ttisstring(mode) && /* is there a weak mode? */ (cast_void(weakkey = strchr(svalue(mode), 'k')), cast_void(weakvalue = strchr(svalue(mode), 'v')), (weakkey || weakvalue))) { /* is really weak? */ black2gray(h); /* keep table gray */ if (!weakkey) /* strong keys? */ traverseweakvalue(g, h); else if (!weakvalue) /* strong values? */ traverseephemeron(g, h, 0); else /* all weak */ linkgclist(h, g->allweak); /* nothing to traverse now */ } else /* not weak */ traversestrongtable(g, h); return 1 + h->alimit + 2 * allocsizenode(h); } static int traverseudata (global_State *g, Udata *u) { int i; markobjectN(g, u->metatable); /* mark its metatable */ for (i = 0; i < u->nuvalue; i++) markvalue(g, &u->uv[i].uv); if (g->gckind == KGC_GEN) { linkgclist(u, g->grayagain); /* keep it in some gray list */ black2gray(u); } return 1 + u->nuvalue; } /* ** Traverse a prototype. (While a prototype is being build, its ** arrays can be larger than needed; the extra slots are filled with ** NULL, so the use of 'markobjectN') */ static int traverseproto (global_State *g, Proto *f) { int i; markobjectN(g, f->source); for (i = 0; i < f->sizek; i++) /* mark literals */ markvalue(g, &f->k[i]); for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */ markobjectN(g, f->upvalues[i].name); for (i = 0; i < f->sizep; i++) /* mark nested protos */ markobjectN(g, f->p[i]); for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */ markobjectN(g, f->locvars[i].varname); return 1 + f->sizek + f->sizeupvalues + f->sizep + f->sizelocvars; } static int traverseCclosure (global_State *g, CClosure *cl) { int i; for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */ markvalue(g, &cl->upvalue[i]); return 1 + cl->nupvalues; } /* ** Traverse a Lua closure, marking its prototype and its upvalues. ** (Both can be NULL while closure is being created.) */ static int traverseLclosure (global_State *g, LClosure *cl) { int i; markobjectN(g, cl->p); /* mark its prototype */ for (i = 0; i < cl->nupvalues; i++) { /* visit its upvalues */ UpVal *uv = cl->upvals[i]; markobjectN(g, uv); /* mark upvalue */ } return 1 + cl->nupvalues; } /* ** Traverse a thread, marking the elements in the stack up to its top ** and cleaning the rest of the stack in the final traversal. ** That ensures that the entire stack have valid (non-dead) objects. */ static int traversethread (global_State *g, lua_State *th) { UpVal *uv; StkId o = th->stack; if (o == NULL) return 1; /* stack not completely built yet */ lua_assert(g->gcstate == GCSatomic || th->openupval == NULL || isintwups(th)); for (; o < th->top; o++) /* mark live elements in the stack */ markvalue(g, s2v(o)); for (uv = th->openupval; uv != NULL; uv = uv->u.open.next) markobject(g, uv); /* open upvalues cannot be collected */ if (g->gcstate == GCSatomic) { /* final traversal? */ StkId lim = th->stack + th->stacksize; /* real end of stack */ for (; o < lim; o++) /* clear not-marked stack slice */ setnilvalue(s2v(o)); /* 'remarkupvals' may have removed thread from 'twups' list */ if (!isintwups(th) && th->openupval != NULL) { th->twups = g->twups; /* link it back to the list */ g->twups = th; } } else if (!g->gcemergency) luaD_shrinkstack(th); /* do not change stack in emergency cycle */ return 1 + th->stacksize; } /* ** traverse one gray object, turning it to black (except for threads, ** which are always gray). */ static lu_mem propagatemark (global_State *g) { GCObject *o = g->gray; gray2black(o); g->gray = *getgclist(o); /* remove from 'gray' list */ switch (o->tt) { case LUA_VTABLE: return traversetable(g, gco2t(o)); case LUA_VUSERDATA: return traverseudata(g, gco2u(o)); case LUA_VLCL: return traverseLclosure(g, gco2lcl(o)); case LUA_VCCL: return traverseCclosure(g, gco2ccl(o)); case LUA_VPROTO: return traverseproto(g, gco2p(o)); case LUA_VTHREAD: { lua_State *th = gco2th(o); linkgclist(th, g->grayagain); /* insert into 'grayagain' list */ black2gray(o); return traversethread(g, th); } default: lua_assert(0); return 0; } } static lu_mem propagateall (global_State *g) { lu_mem tot = 0; while (g->gray) tot += propagatemark(g); return tot; } /* ** Traverse all ephemeron tables propagating marks from keys to values. ** Repeat until it converges, that is, nothing new is marked. 'dir' ** inverts the direction of the traversals, trying to speed up ** convergence on chains in the same table. ** */ static void convergeephemerons (global_State *g) { int changed; int dir = 0; do { GCObject *w; GCObject *next = g->ephemeron; /* get ephemeron list */ g->ephemeron = NULL; /* tables may return to this list when traversed */ changed = 0; while ((w = next) != NULL) { /* for each ephemeron table */ next = gco2t(w)->gclist; /* list is rebuilt during loop */ if (traverseephemeron(g, gco2t(w), dir)) { /* marked some value? */ propagateall(g); /* propagate changes */ changed = 1; /* will have to revisit all ephemeron tables */ } } dir = !dir; /* invert direction next time */ } while (changed); /* repeat until no more changes */ } /* }====================================================== */ /* ** {====================================================== ** Sweep Functions ** ======================================================= */ /* ** clear entries with unmarked keys from all weaktables in list 'l' */ static void clearbykeys (global_State *g, GCObject *l) { for (; l; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *limit = gnodelast(h); Node *n; for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gckeyN(n))) /* unmarked key? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } /* ** clear entries with unmarked values from all weaktables in list 'l' up ** to element 'f' */ static void clearbyvalues (global_State *g, GCObject *l, GCObject *f) { for (; l != f; l = gco2t(l)->gclist) { Table *h = gco2t(l); Node *n, *limit = gnodelast(h); unsigned int i; unsigned int asize = luaH_realasize(h); for (i = 0; i < asize; i++) { TValue *o = &h->array[i]; if (iscleared(g, gcvalueN(o))) /* value was collected? */ setempty(o); /* remove entry */ } for (n = gnode(h, 0); n < limit; n++) { if (iscleared(g, gcvalueN(gval(n)))) /* unmarked value? */ setempty(gval(n)); /* remove entry */ if (isempty(gval(n))) /* is entry empty? */ clearkey(n); /* clear its key */ } } } static void freeupval (lua_State *L, UpVal *uv) { if (upisopen(uv)) luaF_unlinkupval(uv); luaM_free(L, uv); } static void freeobj (lua_State *L, GCObject *o) { switch (o->tt) { case LUA_VPROTO: luaF_freeproto(L, gco2p(o)); break; case LUA_VUPVAL: freeupval(L, gco2upv(o)); break; case LUA_VLCL: luaM_freemem(L, o, sizeLclosure(gco2lcl(o)->nupvalues)); break; case LUA_VCCL: luaM_freemem(L, o, sizeCclosure(gco2ccl(o)->nupvalues)); break; case LUA_VTABLE: luaH_free(L, gco2t(o)); break; case LUA_VTHREAD: luaE_freethread(L, gco2th(o)); break; case LUA_VUSERDATA: { Udata *u = gco2u(o); luaM_freemem(L, o, sizeudata(u->nuvalue, u->len)); break; } case LUA_VSHRSTR: luaS_remove(L, gco2ts(o)); /* remove it from hash table */ luaM_freemem(L, o, sizelstring(gco2ts(o)->shrlen)); break; case LUA_VLNGSTR: luaM_freemem(L, o, sizelstring(gco2ts(o)->u.lnglen)); break; default: lua_assert(0); } } /* ** sweep at most 'countin' elements from a list of GCObjects erasing dead ** objects, where a dead object is one marked with the old (non current) ** white; change all non-dead objects back to white, preparing for next ** collection cycle. Return where to continue the traversal or NULL if ** list is finished. ('*countout' gets the number of elements traversed.) */ static GCObject **sweeplist (lua_State *L, GCObject **p, int countin, int *countout) { global_State *g = G(L); int ow = otherwhite(g); int i; int white = luaC_white(g); /* current white */ for (i = 0; *p != NULL && i < countin; i++) { GCObject *curr = *p; int marked = curr->marked; if (isdeadm(ow, marked)) { /* is 'curr' dead? */ *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* change mark to 'white' */ curr->marked = cast_byte((marked & maskcolors) | white); p = &curr->next; /* go to next element */ } } if (countout) *countout = i; /* number of elements traversed */ return (*p == NULL) ? NULL : p; } /* ** sweep a list until a live object (or end of list) */ static GCObject **sweeptolive (lua_State *L, GCObject **p) { GCObject **old = p; do { p = sweeplist(L, p, 1, NULL); } while (p == old); return p; } /* }====================================================== */ /* ** {====================================================== ** Finalization ** ======================================================= */ /* ** If possible, shrink string table. */ static void checkSizes (lua_State *L, global_State *g) { if (!g->gcemergency) { if (g->strt.nuse < g->strt.size / 4) { /* string table too big? */ l_mem olddebt = g->GCdebt; luaS_resize(L, g->strt.size / 2); g->GCestimate += g->GCdebt - olddebt; /* correct estimate */ } } } /* ** Get the next udata to be finalized from the 'tobefnz' list, and ** link it back into the 'allgc' list. */ static GCObject *udata2finalize (global_State *g) { GCObject *o = g->tobefnz; /* get first element */ lua_assert(tofinalize(o)); g->tobefnz = o->next; /* remove it from 'tobefnz' list */ o->next = g->allgc; /* return it to 'allgc' list */ g->allgc = o; resetbit(o->marked, FINALIZEDBIT); /* object is "normal" again */ if (issweepphase(g)) makewhite(g, o); /* "sweep" object */ return o; } static void dothecall (lua_State *L, void *ud) { UNUSED(ud); luaD_callnoyield(L, L->top - 2, 0); } static void GCTM (lua_State *L) { global_State *g = G(L); const TValue *tm; TValue v; lua_assert(!g->gcemergency); setgcovalue(L, &v, udata2finalize(g)); tm = luaT_gettmbyobj(L, &v, TM_GC); if (!notm(tm)) { /* is there a finalizer? */ int status; lu_byte oldah = L->allowhook; int running = g->gcrunning; L->allowhook = 0; /* stop debug hooks during GC metamethod */ g->gcrunning = 0; /* avoid GC steps */ setobj2s(L, L->top++, tm); /* push finalizer... */ setobj2s(L, L->top++, &v); /* ... and its argument */ L->ci->callstatus |= CIST_FIN; /* will run a finalizer */ status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0); L->ci->callstatus &= ~CIST_FIN; /* not running a finalizer anymore */ L->allowhook = oldah; /* restore hooks */ g->gcrunning = running; /* restore state */ if (unlikely(status != LUA_OK)) { /* error while running __gc? */ luaE_warnerror(L, "__gc metamethod"); L->top--; /* pops error object */ } } } /* ** Call a few finalizers */ static int runafewfinalizers (lua_State *L, int n) { global_State *g = G(L); int i; for (i = 0; i < n && g->tobefnz; i++) GCTM(L); /* call one finalizer */ return i; } /* ** call all pending finalizers */ static void callallpendingfinalizers (lua_State *L) { global_State *g = G(L); while (g->tobefnz) GCTM(L); } /* ** find last 'next' field in list 'p' list (to add elements in its end) */ static GCObject **findlast (GCObject **p) { while (*p != NULL) p = &(*p)->next; return p; } /* ** Move all unreachable objects (or 'all' objects) that need ** finalization from list 'finobj' to list 'tobefnz' (to be finalized). ** (Note that objects after 'finobjold' cannot be white, so they ** don't need to be traversed. In incremental mode, 'finobjold' is NULL, ** so the whole list is traversed.) */ static void separatetobefnz (global_State *g, int all) { GCObject *curr; GCObject **p = &g->finobj; GCObject **lastnext = findlast(&g->tobefnz); while ((curr = *p) != g->finobjold) { /* traverse all finalizable objects */ lua_assert(tofinalize(curr)); if (!(iswhite(curr) || all)) /* not being collected? */ p = &curr->next; /* don't bother with it */ else { if (curr == g->finobjsur) /* removing 'finobjsur'? */ g->finobjsur = curr->next; /* correct it */ *p = curr->next; /* remove 'curr' from 'finobj' list */ curr->next = *lastnext; /* link at the end of 'tobefnz' list */ *lastnext = curr; lastnext = &curr->next; } } } /* ** if object 'o' has a finalizer, remove it from 'allgc' list (must ** search the list to find it) and link it in 'finobj' list. */ void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) { global_State *g = G(L); if (tofinalize(o) || /* obj. is already marked... */ gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */ return; /* nothing to be done */ else { /* move 'o' to 'finobj' list */ GCObject **p; if (issweepphase(g)) { makewhite(g, o); /* "sweep" object 'o' */ if (g->sweepgc == &o->next) /* should not remove 'sweepgc' object */ g->sweepgc = sweeptolive(L, g->sweepgc); /* change 'sweepgc' */ } else { /* correct pointers into 'allgc' list, if needed */ if (o == g->survival) g->survival = o->next; if (o == g->old) g->old = o->next; if (o == g->reallyold) g->reallyold = o->next; } /* search for pointer pointing to 'o' */ for (p = &g->allgc; *p != o; p = &(*p)->next) { /* empty */ } *p = o->next; /* remove 'o' from 'allgc' list */ o->next = g->finobj; /* link it in 'finobj' list */ g->finobj = o; l_setbit(o->marked, FINALIZEDBIT); /* mark it as such */ } } /* }====================================================== */ /* ** {====================================================== ** Generational Collector ** ======================================================= */ static void setpause (global_State *g); /* mask to erase all color bits, not changing gen-related stuff */ #define maskgencolors (~(bitmask(BLACKBIT) | WHITEBITS)) /* ** Sweep a list of objects, deleting dead ones and turning ** the non dead to old (without changing their colors). */ static void sweep2old (lua_State *L, GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(isdead(G(L), curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* all surviving objects become old */ setage(curr, G_OLD); p = &curr->next; /* go to next element */ } } } /* ** Sweep for generational mode. Delete dead objects. (Because the ** collection is not incremental, there are no "new white" objects ** during the sweep. So, any white object must be dead.) For ** non-dead objects, advance their ages and clear the color of ** new objects. (Old objects keep their colors.) ** The ages of G_TOUCHED1 and G_TOUCHED2 objects will advance ** in 'correctgraylist'. (That function will also remove objects ** turned white here from any gray list.) */ static GCObject **sweepgen (lua_State *L, global_State *g, GCObject **p, GCObject *limit) { static const lu_byte nextage[] = { G_SURVIVAL, /* from G_NEW */ G_OLD1, /* from G_SURVIVAL */ G_OLD1, /* from G_OLD0 */ G_OLD, /* from G_OLD1 */ G_OLD, /* from G_OLD (do not change) */ G_TOUCHED1, /* from G_TOUCHED1 (do not change) */ G_TOUCHED2 /* from G_TOUCHED2 (do not change) */ }; int white = luaC_white(g); GCObject *curr; while ((curr = *p) != limit) { if (iswhite(curr)) { /* is 'curr' dead? */ lua_assert(!isold(curr) && isdead(g, curr)); *p = curr->next; /* remove 'curr' from list */ freeobj(L, curr); /* erase 'curr' */ } else { /* correct mark and age */ if (getage(curr) == G_NEW) curr->marked = cast_byte((curr->marked & maskgencolors) | white); setage(curr, nextage[getage(curr)]); p = &curr->next; /* go to next element */ } } return p; } /* ** Traverse a list making all its elements white and clearing their ** age. */ static void whitelist (global_State *g, GCObject *p) { int white = luaC_white(g); for (; p != NULL; p = p->next) p->marked = cast_byte((p->marked & maskcolors) | white); } /* ** Correct a list of gray objects. ** Because this correction is done after sweeping, young objects might ** be turned white and still be in the list. They are only removed. ** For tables and userdata, advance 'touched1' to 'touched2'; 'touched2' ** objects become regular old and are removed from the list. ** For threads, just remove white ones from the list. */ static GCObject **correctgraylist (GCObject **p) { GCObject *curr; while ((curr = *p) != NULL) { switch (curr->tt) { case LUA_VTABLE: case LUA_VUSERDATA: { GCObject **next = getgclist(curr); if (getage(curr) == G_TOUCHED1) { /* touched in this cycle? */ lua_assert(isgray(curr)); gray2black(curr); /* make it black, for next barrier */ changeage(curr, G_TOUCHED1, G_TOUCHED2); p = next; /* keep it in the list and go to next element */ } else { /* everything else is removed */ /* white objects are simply removed */ if (!iswhite(curr)) { /* not white? */ lua_assert(isold(curr)); if (getage(curr) == G_TOUCHED2) /* advance from G_TOUCHED2... */ changeage(curr, G_TOUCHED2, G_OLD); /* ... to G_OLD */ gray2black(curr); /* make it black */ } *p = *next; /* remove 'curr' from gray list */ } break; } case LUA_VTHREAD: { lua_State *th = gco2th(curr); lua_assert(!isblack(th)); if (iswhite(th)) /* new object? */ *p = th->gclist; /* remove from gray list */ else /* old threads remain gray */ p = &th->gclist; /* go to next element */ break; } default: lua_assert(0); /* nothing more could be gray here */ } } return p; } /* ** Correct all gray lists, coalescing them into 'grayagain'. */ static void correctgraylists (global_State *g) { GCObject **list = correctgraylist(&g->grayagain); *list = g->weak; g->weak = NULL; list = correctgraylist(list); *list = g->allweak; g->allweak = NULL; list = correctgraylist(list); *list = g->ephemeron; g->ephemeron = NULL; correctgraylist(list); } /* ** Mark 'OLD1' objects when starting a new young collection. ** Gray objects are already in some gray list, and so will be visited ** in the atomic step. */ static void markold (global_State *g, GCObject *from, GCObject *to) { GCObject *p; for (p = from; p != to; p = p->next) { if (getage(p) == G_OLD1) { lua_assert(!iswhite(p)); if (isblack(p)) { black2gray(p); /* should be '2white', but gray works too */ reallymarkobject(g, p); } } } } /* ** Finish a young-generation collection. */ static void finishgencycle (lua_State *L, global_State *g) { correctgraylists(g); checkSizes(L, g); g->gcstate = GCSpropagate; /* skip restart */ if (!g->gcemergency) callallpendingfinalizers(L); } /* ** Does a young collection. First, mark 'OLD1' objects. Then does the ** atomic step. Then, sweep all lists and advance pointers. Finally, ** finish the collection. */ static void youngcollection (lua_State *L, global_State *g) { GCObject **psurvival; /* to point to first non-dead survival object */ lua_assert(g->gcstate == GCSpropagate); markold(g, g->allgc, g->reallyold); markold(g, g->finobj, g->finobjrold); atomic(L); /* sweep nursery and get a pointer to its last live element */ g->gcstate = GCSswpallgc; psurvival = sweepgen(L, g, &g->allgc, g->survival); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->reallyold); g->reallyold = g->old; g->old = *psurvival; /* 'survival' survivals are old now */ g->survival = g->allgc; /* all news are survivals */ /* repeat for 'finobj' lists */ psurvival = sweepgen(L, g, &g->finobj, g->finobjsur); /* sweep 'survival' and 'old' */ sweepgen(L, g, psurvival, g->finobjrold); g->finobjrold = g->finobjold; g->finobjold = *psurvival; /* 'survival' survivals are old now */ g->finobjsur = g->finobj; /* all news are survivals */ sweepgen(L, g, &g->tobefnz, NULL); finishgencycle(L, g); } static void atomic2gen (lua_State *L, global_State *g) { /* sweep all elements making them old */ g->gcstate = GCSswpallgc; sweep2old(L, &g->allgc); /* everything alive now is old */ g->reallyold = g->old = g->survival = g->allgc; /* repeat for 'finobj' lists */ sweep2old(L, &g->finobj); g->finobjrold = g->finobjold = g->finobjsur = g->finobj; sweep2old(L, &g->tobefnz); g->gckind = KGC_GEN; g->lastatomic = 0; g->GCestimate = gettotalbytes(g); /* base for memory control */ finishgencycle(L, g); } /* ** Enter generational mode. Must go until the end of an atomic cycle ** to ensure that all threads and weak tables are in the gray lists. ** Then, turn all objects into old and finishes the collection. */ static lu_mem entergen (lua_State *L, global_State *g) { lu_mem numobjs; luaC_runtilstate(L, bitmask(GCSpause)); /* prepare to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ numobjs = atomic(L); /* propagates all and then do the atomic stuff */ atomic2gen(L, g); return numobjs; } /* ** Enter incremental mode. Turn all objects white, make all ** intermediate lists point to NULL (to avoid invalid pointers), ** and go to the pause state. */ static void enterinc (global_State *g) { whitelist(g, g->allgc); g->reallyold = g->old = g->survival = NULL; whitelist(g, g->finobj); whitelist(g, g->tobefnz); g->finobjrold = g->finobjold = g->finobjsur = NULL; g->gcstate = GCSpause; g->gckind = KGC_INC; g->lastatomic = 0; } /* ** Change collector mode to 'newmode'. */ void luaC_changemode (lua_State *L, int newmode) { global_State *g = G(L); if (newmode != g->gckind) { if (newmode == KGC_GEN) /* entering generational mode? */ entergen(L, g); else enterinc(g); /* entering incremental mode */ } g->lastatomic = 0; } /* ** Does a full collection in generational mode. */ static lu_mem fullgen (lua_State *L, global_State *g) { enterinc(g); return entergen(L, g); } /* ** Set debt for the next minor collection, which will happen when ** memory grows 'genminormul'%. */ static void setminordebt (global_State *g) { luaE_setdebt(g, -(cast(l_mem, (gettotalbytes(g) / 100)) * g->genminormul)); } /* ** Does a major collection after last collection was a "bad collection". ** ** When the program is building a big structure, it allocates lots of ** memory but generates very little garbage. In those scenarios, ** the generational mode just wastes time doing small collections, and ** major collections are frequently what we call a "bad collection", a ** collection that frees too few objects. To avoid the cost of switching ** between generational mode and the incremental mode needed for full ** (major) collections, the collector tries to stay in incremental mode ** after a bad collection, and to switch back to generational mode only ** after a "good" collection (one that traverses less than 9/8 objects ** of the previous one). ** The collector must choose whether to stay in incremental mode or to ** switch back to generational mode before sweeping. At this point, it ** does not know the real memory in use, so it cannot use memory to ** decide whether to return to generational mode. Instead, it uses the ** number of objects traversed (returned by 'atomic') as a proxy. The ** field 'g->lastatomic' keeps this count from the last collection. ** ('g->lastatomic != 0' also means that the last collection was bad.) */ static void stepgenfull (lua_State *L, global_State *g) { lu_mem newatomic; /* count of traversed objects */ lu_mem lastatomic = g->lastatomic; /* count from last collection */ if (g->gckind == KGC_GEN) /* still in generational mode? */ enterinc(g); /* enter incremental mode */ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */ newatomic = atomic(L); /* mark everybody */ if (newatomic < lastatomic + (lastatomic >> 3)) { /* good collection? */ atomic2gen(L, g); /* return to generational mode */ setminordebt(g); } else { /* another bad collection; stay in incremental mode */ g->GCestimate = gettotalbytes(g); /* first estimate */; entersweep(L); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); g->lastatomic = newatomic; } } /* ** Does a generational "step". ** Usually, this means doing a minor collection and setting the debt to ** make another collection when memory grows 'genminormul'% larger. ** ** However, there are exceptions. If memory grows 'genmajormul'% ** larger than it was at the end of the last major collection (kept ** in 'g->GCestimate'), the function does a major collection. At the ** end, it checks whether the major collection was able to free a ** decent amount of memory (at least half the growth in memory since ** previous major collection). If so, the collector keeps its state, ** and the next collection will probably be minor again. Otherwise, ** we have what we call a "bad collection". In that case, set the field ** 'g->lastatomic' to signal that fact, so that the next collection will ** go to 'stepgenfull'. ** ** 'GCdebt <= 0' means an explicit call to GC step with "size" zero; ** in that case, do a minor collection. */ static void genstep (lua_State *L, global_State *g) { if (g->lastatomic != 0) /* last collection was a bad one? */ stepgenfull(L, g); /* do a full step */ else { lu_mem majorbase = g->GCestimate; /* memory after last major collection */ lu_mem majorinc = (majorbase / 100) * getgcparam(g->genmajormul); if (g->GCdebt > 0 && gettotalbytes(g) > majorbase + majorinc) { lu_mem numobjs = fullgen(L, g); /* do a major collection */ if (gettotalbytes(g) < majorbase + (majorinc / 2)) { /* collected at least half of memory growth since last major collection; keep doing minor collections */ setminordebt(g); } else { /* bad collection */ g->lastatomic = numobjs; /* signal that last collection was bad */ setpause(g); /* do a long wait for next (major) collection */ } } else { /* regular case; do a minor collection */ youngcollection(L, g); setminordebt(g); g->GCestimate = majorbase; /* preserve base value */ } } lua_assert(isdecGCmodegen(g)); } /* }====================================================== */ /* ** {====================================================== ** GC control ** ======================================================= */ /* ** Set the "time" to wait before starting a new GC cycle; cycle will ** start when memory use hits the threshold of ('estimate' * pause / ** PAUSEADJ). (Division by 'estimate' should be OK: it cannot be zero, ** because Lua cannot even start with less than PAUSEADJ bytes). */ static void setpause (global_State *g) { l_mem threshold, debt; int pause = getgcparam(g->gcpause); l_mem estimate = g->GCestimate / PAUSEADJ; /* adjust 'estimate' */ lua_assert(estimate > 0); threshold = (pause < MAX_LMEM / estimate) /* overflow? */ ? estimate * pause /* no overflow */ : MAX_LMEM; /* overflow; truncate to maximum */ debt = gettotalbytes(g) - threshold; if (debt > 0) debt = 0; luaE_setdebt(g, debt); } /* ** Enter first sweep phase. ** The call to 'sweeptolive' makes the pointer point to an object ** inside the list (instead of to the header), so that the real sweep do ** not need to skip objects created between "now" and the start of the ** real sweep. */ static void entersweep (lua_State *L) { global_State *g = G(L); g->gcstate = GCSswpallgc; lua_assert(g->sweepgc == NULL); g->sweepgc = sweeptolive(L, &g->allgc); } /* ** Delete all objects in list 'p' until (but not including) object ** 'limit'. */ static void deletelist (lua_State *L, GCObject *p, GCObject *limit) { while (p != limit) { GCObject *next = p->next; freeobj(L, p); p = next; } } /* ** Call all finalizers of the objects in the given Lua state, and ** then free all objects, except for the main thread. */ void luaC_freeallobjects (lua_State *L) { global_State *g = G(L); luaC_changemode(L, KGC_INC); separatetobefnz(g, 1); /* separate all objects with finalizers */ lua_assert(g->finobj == NULL); callallpendingfinalizers(L); deletelist(L, g->allgc, obj2gco(g->mainthread)); deletelist(L, g->finobj, NULL); deletelist(L, g->fixedgc, NULL); /* collect fixed objects */ lua_assert(g->strt.nuse == 0); } static lu_mem atomic (lua_State *L) { global_State *g = G(L); lu_mem work = 0; GCObject *origweak, *origall; GCObject *grayagain = g->grayagain; /* save original list */ g->grayagain = NULL; lua_assert(g->ephemeron == NULL && g->weak == NULL); lua_assert(!iswhite(g->mainthread)); g->gcstate = GCSatomic; markobject(g, L); /* mark running thread */ /* registry and global metatables may be changed by API */ markvalue(g, &g->l_registry); markmt(g); /* mark global metatables */ work += propagateall(g); /* empties 'gray' list */ /* remark occasional upvalues of (maybe) dead threads */ work += remarkupvals(g); work += propagateall(g); /* propagate changes */ g->gray = grayagain; work += propagateall(g); /* traverse 'grayagain' list */ convergeephemerons(g); /* at this point, all strongly accessible objects are marked. */ /* Clear values from weak tables, before checking finalizers */ clearbyvalues(g, g->weak, NULL); clearbyvalues(g, g->allweak, NULL); origweak = g->weak; origall = g->allweak; separatetobefnz(g, 0); /* separate objects to be finalized */ work += markbeingfnz(g); /* mark objects that will be finalized */ work += propagateall(g); /* remark, to propagate 'resurrection' */ convergeephemerons(g); /* at this point, all resurrected objects are marked. */ /* remove dead objects from weak tables */ clearbykeys(g, g->ephemeron); /* clear keys from all ephemeron tables */ clearbykeys(g, g->allweak); /* clear keys from all 'allweak' tables */ /* clear values from resurrected weak tables */ clearbyvalues(g, g->weak, origweak); clearbyvalues(g, g->allweak, origall); luaS_clearcache(g); g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */ lua_assert(g->gray == NULL); return work; /* estimate of slots marked by 'atomic' */ } static int sweepstep (lua_State *L, global_State *g, int nextstate, GCObject **nextlist) { if (g->sweepgc) { l_mem olddebt = g->GCdebt; int count; g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX, &count); g->GCestimate += g->GCdebt - olddebt; /* update estimate */ return count; } else { /* enter next state */ g->gcstate = nextstate; g->sweepgc = nextlist; return 0; /* no work done */ } } static lu_mem singlestep (lua_State *L) { global_State *g = G(L); switch (g->gcstate) { case GCSpause: { restartcollection(g); g->gcstate = GCSpropagate; return 1; } case GCSpropagate: { if (g->gray == NULL) { /* no more gray objects? */ g->gcstate = GCSenteratomic; /* finish propagate phase */ return 0; } else return propagatemark(g); /* traverse one gray object */ } case GCSenteratomic: { lu_mem work = atomic(L); /* work is what was traversed by 'atomic' */ entersweep(L); g->GCestimate = gettotalbytes(g); /* first estimate */; return work; } case GCSswpallgc: { /* sweep "regular" objects */ return sweepstep(L, g, GCSswpfinobj, &g->finobj); } case GCSswpfinobj: { /* sweep objects with finalizers */ return sweepstep(L, g, GCSswptobefnz, &g->tobefnz); } case GCSswptobefnz: { /* sweep objects to be finalized */ return sweepstep(L, g, GCSswpend, NULL); } case GCSswpend: { /* finish sweeps */ checkSizes(L, g); g->gcstate = GCScallfin; return 0; } case GCScallfin: { /* call remaining finalizers */ if (g->tobefnz && !g->gcemergency) { int n = runafewfinalizers(L, GCFINMAX); return n * GCFINALIZECOST; } else { /* emergency mode or no more finalizers */ g->gcstate = GCSpause; /* finish collection */ return 0; } } default: lua_assert(0); return 0; } } /* ** advances the garbage collector until it reaches a state allowed ** by 'statemask' */ void luaC_runtilstate (lua_State *L, int statesmask) { global_State *g = G(L); while (!testbit(statesmask, g->gcstate)) singlestep(L); } /* ** Performs a basic incremental step. The debt and step size are ** converted from bytes to "units of work"; then the function loops ** running single steps until adding that many units of work or ** finishing a cycle (pause state). Finally, it sets the debt that ** controls when next step will be performed. */ static void incstep (lua_State *L, global_State *g) { int stepmul = (getgcparam(g->gcstepmul) | 1); /* avoid division by 0 */ l_mem debt = (g->GCdebt / WORK2MEM) * stepmul; l_mem stepsize = (g->gcstepsize <= log2maxs(l_mem)) ? ((cast(l_mem, 1) << g->gcstepsize) / WORK2MEM) * stepmul : MAX_LMEM; /* overflow; keep maximum value */ do { /* repeat until pause or enough "credit" (negative debt) */ lu_mem work = singlestep(L); /* perform one single step */ debt -= work; } while (debt > -stepsize && g->gcstate != GCSpause); if (g->gcstate == GCSpause) setpause(g); /* pause until next cycle */ else { debt = (debt / stepmul) * WORK2MEM; /* convert 'work units' to bytes */ luaE_setdebt(g, debt); } } /* ** performs a basic GC step if collector is running */ void luaC_step (lua_State *L) { global_State *g = G(L); lua_assert(!g->gcemergency); if (g->gcrunning) { /* running? */ if(isdecGCmodegen(g)) genstep(L, g); else incstep(L, g); } } /* ** Perform a full collection in incremental mode. ** Before running the collection, check 'keepinvariant'; if it is true, ** there may be some objects marked as black, so the collector has ** to sweep all objects to turn them back to white (as white has not ** changed, nothing will be collected). */ static void fullinc (lua_State *L, global_State *g) { if (keepinvariant(g)) /* black objects? */ entersweep(L); /* sweep everything to turn them back to white */ /* finish any pending sweep phase to start a new cycle */ luaC_runtilstate(L, bitmask(GCSpause)); luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */ /* estimate must be correct after a full GC cycle */ lua_assert(g->GCestimate == gettotalbytes(g)); luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */ setpause(g); } /* ** Performs a full GC cycle; if 'isemergency', set a flag to avoid ** some operations which could change the interpreter state in some ** unexpected ways (running finalizers and shrinking some structures). */ void luaC_fullgc (lua_State *L, int isemergency) { global_State *g = G(L); lua_assert(!g->gcemergency); g->gcemergency = isemergency; /* set flag */ if (g->gckind == KGC_INC) fullinc(L, g); else fullgen(L, g); g->gcemergency = 0; } /* }====================================================== */
./CrossVul/dataset_final_sorted/CWE-763/c/good_4273_0
crossvul-cpp_data_good_724_0
/* Copyright (C) 2007-2017 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \file * * \author Victor Julien <victor@inliniac.net> * * Basic detection engine */ #include "suricata-common.h" #include "suricata.h" #include "conf.h" #include "decode.h" #include "flow.h" #include "stream-tcp.h" #include "app-layer.h" #include "app-layer-parser.h" #include "detect.h" #include "detect-engine.h" #include "detect-engine-profile.h" #include "detect-engine-alert.h" #include "detect-engine-siggroup.h" #include "detect-engine-address.h" #include "detect-engine-proto.h" #include "detect-engine-port.h" #include "detect-engine-mpm.h" #include "detect-engine-iponly.h" #include "detect-engine-threshold.h" #include "detect-engine-prefilter.h" #include "detect-engine-state.h" #include "detect-engine-analyzer.h" #include "detect-engine-filedata.h" #include "detect-engine-payload.h" #include "detect-engine-event.h" #include "detect-engine-hcbd.h" #include "detect-engine-hsbd.h" #include "detect-filestore.h" #include "detect-flowvar.h" #include "detect-replace.h" #include "util-validate.h" #include "util-detect.h" typedef struct DetectRunScratchpad { const AppProto alproto; const uint8_t flow_flags; /* flow/state flags: STREAM_* */ const bool app_decoder_events; const SigGroupHead *sgh; SignatureMask pkt_mask; } DetectRunScratchpad; /* prototypes */ static DetectRunScratchpad DetectRunSetup(const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow); static void DetectRunInspectIPOnly(ThreadVars *tv, const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Flow * const pflow, Packet * const p); static inline void DetectRunGetRuleGroup(const DetectEngineCtx *de_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch); static inline void DetectRunPrefilterPkt(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, DetectRunScratchpad *scratch); static inline void DetectRulePacketRules(ThreadVars * const tv, DetectEngineCtx * const de_ctx, DetectEngineThreadCtx * const det_ctx, Packet * const p, Flow * const pflow, const DetectRunScratchpad *scratch); static void DetectRunTx(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, Flow *f, DetectRunScratchpad *scratch); static inline void DetectRunPostRules(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch); static void DetectRunCleanup(DetectEngineThreadCtx *det_ctx, Packet *p, Flow * const pflow); /** \internal */ static void DetectRun(ThreadVars *th_v, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { SCEnter(); SCLogDebug("pcap_cnt %"PRIu64, p->pcap_cnt); /* bail early if packet should not be inspected */ if (p->flags & PKT_NOPACKET_INSPECTION) { /* nothing to do */ SCReturn; } /* Load the Packet's flow early, even though it might not be needed. * Mark as a constant pointer, although the flow itself can change. */ Flow * const pflow = p->flow; DetectRunScratchpad scratch = DetectRunSetup(de_ctx, det_ctx, p, pflow); /* run the IPonly engine */ DetectRunInspectIPOnly(th_v, de_ctx, det_ctx, pflow, p); /* get our rule group */ DetectRunGetRuleGroup(de_ctx, p, pflow, &scratch); /* if we didn't get a sig group head, we * have nothing to do.... */ if (scratch.sgh == NULL) { SCLogDebug("no sgh for this packet, nothing to match against"); goto end; } /* run the prefilters for packets */ DetectRunPrefilterPkt(th_v, de_ctx, det_ctx, p, &scratch); PACKET_PROFILING_DETECT_START(p, PROF_DETECT_RULES); /* inspect the rules against the packet */ DetectRulePacketRules(th_v, de_ctx, det_ctx, p, pflow, &scratch); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_RULES); /* run tx/state inspection */ if (pflow && pflow->alstate) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX); DetectRunTx(th_v, de_ctx, det_ctx, p, pflow, &scratch); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX); } end: DetectRunPostRules(th_v, de_ctx, det_ctx, p, pflow, &scratch); DetectRunCleanup(det_ctx, p, pflow); SCReturn; } static void DetectRunPostMatch(ThreadVars *tv, DetectEngineThreadCtx *det_ctx, Packet *p, const Signature *s) { /* run the packet match functions */ const SigMatchData *smd = s->sm_arrays[DETECT_SM_LIST_POSTMATCH]; if (smd != NULL) { KEYWORD_PROFILING_SET_LIST(det_ctx, DETECT_SM_LIST_POSTMATCH); SCLogDebug("running match functions, sm %p", smd); while (1) { KEYWORD_PROFILING_START; (void)sigmatch_table[smd->type].Match(tv, det_ctx, p, s, smd->ctx); KEYWORD_PROFILING_END(det_ctx, smd->type, 1); if (smd->is_last) break; smd++; } } DetectReplaceExecute(p, det_ctx); if (s->flags & SIG_FLAG_FILESTORE) DetectFilestorePostMatch(tv, det_ctx, p, s); return; } /** * \brief Get the SigGroupHead for a packet. * * \param de_ctx detection engine context * \param p packet * * \retval sgh the SigGroupHead or NULL if non applies to the packet */ const SigGroupHead *SigMatchSignaturesGetSgh(const DetectEngineCtx *de_ctx, const Packet *p) { SCEnter(); int f; SigGroupHead *sgh = NULL; /* if the packet proto is 0 (not set), we're inspecting it against * the decoder events sgh we have. */ if (p->proto == 0 && p->events.cnt > 0) { SCReturnPtr(de_ctx->decoder_event_sgh, "SigGroupHead"); } else if (p->proto == 0) { if (!(PKT_IS_IPV4(p) || PKT_IS_IPV6(p))) { /* not IP, so nothing to do */ SCReturnPtr(NULL, "SigGroupHead"); } } /* select the flow_gh */ if (p->flowflags & FLOW_PKT_TOCLIENT) f = 0; else f = 1; int proto = IP_GET_IPPROTO(p); if (proto == IPPROTO_TCP) { DetectPort *list = de_ctx->flow_gh[f].tcp; SCLogDebug("tcp toserver %p, tcp toclient %p: going to use %p", de_ctx->flow_gh[1].tcp, de_ctx->flow_gh[0].tcp, de_ctx->flow_gh[f].tcp); uint16_t port = f ? p->dp : p->sp; SCLogDebug("tcp port %u -> %u:%u", port, p->sp, p->dp); DetectPort *sghport = DetectPortLookupGroup(list, port); if (sghport != NULL) sgh = sghport->sh; SCLogDebug("TCP list %p, port %u, direction %s, sghport %p, sgh %p", list, port, f ? "toserver" : "toclient", sghport, sgh); } else if (proto == IPPROTO_UDP) { DetectPort *list = de_ctx->flow_gh[f].udp; uint16_t port = f ? p->dp : p->sp; DetectPort *sghport = DetectPortLookupGroup(list, port); if (sghport != NULL) sgh = sghport->sh; SCLogDebug("UDP list %p, port %u, direction %s, sghport %p, sgh %p", list, port, f ? "toserver" : "toclient", sghport, sgh); } else { sgh = de_ctx->flow_gh[f].sgh[proto]; } SCReturnPtr(sgh, "SigGroupHead"); } static inline void DetectPrefilterMergeSort(DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx) { SigIntId mpm, nonmpm; det_ctx->match_array_cnt = 0; SigIntId *mpm_ptr = det_ctx->pmq.rule_id_array; SigIntId *nonmpm_ptr = det_ctx->non_pf_id_array; uint32_t m_cnt = det_ctx->pmq.rule_id_array_cnt; uint32_t n_cnt = det_ctx->non_pf_id_cnt; SigIntId *final_ptr; uint32_t final_cnt; SigIntId id; SigIntId previous_id = (SigIntId)-1; Signature **sig_array = de_ctx->sig_array; Signature **match_array = det_ctx->match_array; Signature *s; SCLogDebug("PMQ rule id array count %d", det_ctx->pmq.rule_id_array_cnt); /* Load first values. */ if (likely(m_cnt)) { mpm = *mpm_ptr; } else { /* mpm list is empty */ final_ptr = nonmpm_ptr; final_cnt = n_cnt; goto final; } if (likely(n_cnt)) { nonmpm = *nonmpm_ptr; } else { /* non-mpm list is empty. */ final_ptr = mpm_ptr; final_cnt = m_cnt; goto final; } while (1) { if (mpm < nonmpm) { /* Take from mpm list */ id = mpm; s = sig_array[id]; /* As the mpm list can contain duplicates, check for that here. */ if (likely(id != previous_id)) { *match_array++ = s; previous_id = id; } if (unlikely(--m_cnt == 0)) { /* mpm list is now empty */ final_ptr = nonmpm_ptr; final_cnt = n_cnt; goto final; } mpm_ptr++; mpm = *mpm_ptr; } else if (mpm > nonmpm) { id = nonmpm; s = sig_array[id]; /* As the mpm list can contain duplicates, check for that here. */ if (likely(id != previous_id)) { *match_array++ = s; previous_id = id; } if (unlikely(--n_cnt == 0)) { final_ptr = mpm_ptr; final_cnt = m_cnt; goto final; } nonmpm_ptr++; nonmpm = *nonmpm_ptr; } else { /* implied mpm == nonmpm */ /* special case: if on both lists, it's a negated mpm pattern */ /* mpm list may have dups, so skip past them here */ while (--m_cnt != 0) { mpm_ptr++; mpm = *mpm_ptr; if (mpm != nonmpm) break; } /* if mpm is done, update nonmpm_ptrs and jump to final */ if (unlikely(m_cnt == 0)) { n_cnt--; /* mpm list is now empty */ final_ptr = ++nonmpm_ptr; final_cnt = n_cnt; goto final; } /* otherwise, if nonmpm is done jump to final for mpm * mpm ptrs alrady updated */ if (unlikely(--n_cnt == 0)) { final_ptr = mpm_ptr; final_cnt = m_cnt; goto final; } /* not at end of the lists, update nonmpm. Mpm already * updated in while loop above. */ nonmpm_ptr++; nonmpm = *nonmpm_ptr; } } final: /* Only one list remaining. Just walk that list. */ while (final_cnt-- > 0) { id = *final_ptr++; s = sig_array[id]; /* As the mpm list can contain duplicates, check for that here. */ if (likely(id != previous_id)) { *match_array++ = s; previous_id = id; } } det_ctx->match_array_cnt = match_array - det_ctx->match_array; BUG_ON((det_ctx->pmq.rule_id_array_cnt + det_ctx->non_pf_id_cnt) < det_ctx->match_array_cnt); } static inline void DetectPrefilterBuildNonPrefilterList(DetectEngineThreadCtx *det_ctx, SignatureMask mask, uint8_t alproto) { uint32_t x = 0; for (x = 0; x < det_ctx->non_pf_store_cnt; x++) { /* only if the mask matches this rule can possibly match, * so build the non_mpm array only for match candidates */ const SignatureMask rule_mask = det_ctx->non_pf_store_ptr[x].mask; const uint8_t rule_alproto = det_ctx->non_pf_store_ptr[x].alproto; if ((rule_mask & mask) == rule_mask && (rule_alproto == 0 || rule_alproto == alproto)) { det_ctx->non_pf_id_array[det_ctx->non_pf_id_cnt++] = det_ctx->non_pf_store_ptr[x].id; } } } /** \internal * \brief select non-mpm list * Based on the packet properties, select the non-mpm list to use * \todo move non_pf_store* into scratchpad */ static inline void DetectPrefilterSetNonPrefilterList(const Packet *p, DetectEngineThreadCtx *det_ctx, DetectRunScratchpad *scratch) { if ((p->proto == IPPROTO_TCP) && (p->tcph != NULL) && (p->tcph->th_flags & TH_SYN)) { det_ctx->non_pf_store_ptr = scratch->sgh->non_pf_syn_store_array; det_ctx->non_pf_store_cnt = scratch->sgh->non_pf_syn_store_cnt; } else { det_ctx->non_pf_store_ptr = scratch->sgh->non_pf_other_store_array; det_ctx->non_pf_store_cnt = scratch->sgh->non_pf_other_store_cnt; } SCLogDebug("sgh non_pf ptr %p cnt %u (syn %p/%u, other %p/%u)", det_ctx->non_pf_store_ptr, det_ctx->non_pf_store_cnt, scratch->sgh->non_pf_syn_store_array, scratch->sgh->non_pf_syn_store_cnt, scratch->sgh->non_pf_other_store_array, scratch->sgh->non_pf_other_store_cnt); } /** \internal * \brief update flow's file tracking flags based on the detection engine */ static inline void DetectPostInspectFileFlagsUpdate(Flow *pflow, const SigGroupHead *sgh, uint8_t direction) { /* see if this sgh requires us to consider file storing */ if (!FileForceFilestore() && (sgh == NULL || sgh->filestore_cnt == 0)) { FileDisableStoring(pflow, direction); } #ifdef HAVE_MAGIC /* see if this sgh requires us to consider file magic */ if (!FileForceMagic() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILEMAGIC))) { SCLogDebug("disabling magic for flow"); FileDisableMagic(pflow, direction); } #endif /* see if this sgh requires us to consider file md5 */ if (!FileForceMd5() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILEMD5))) { SCLogDebug("disabling md5 for flow"); FileDisableMd5(pflow, direction); } /* see if this sgh requires us to consider file sha1 */ if (!FileForceSha1() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILESHA1))) { SCLogDebug("disabling sha1 for flow"); FileDisableSha1(pflow, direction); } /* see if this sgh requires us to consider file sha256 */ if (!FileForceSha256() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILESHA256))) { SCLogDebug("disabling sha256 for flow"); FileDisableSha256(pflow, direction); } /* see if this sgh requires us to consider filesize */ if (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILESIZE)) { SCLogDebug("disabling filesize for flow"); FileDisableFilesize(pflow, direction); } } static inline void DetectRunPostGetFirstRuleGroup(const Packet *p, Flow *pflow, const SigGroupHead *sgh) { if ((p->flowflags & FLOW_PKT_TOSERVER) && !(pflow->flags & FLOW_SGH_TOSERVER)) { /* first time we see this toserver sgh, store it */ pflow->sgh_toserver = sgh; pflow->flags |= FLOW_SGH_TOSERVER; if (p->proto == IPPROTO_TCP && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVERAWSTREAM))) { if (pflow->protoctx != NULL) { TcpSession *ssn = pflow->protoctx; SCLogDebug("STREAMTCP_STREAM_FLAG_DISABLE_RAW ssn.client"); ssn->client.flags |= STREAMTCP_STREAM_FLAG_DISABLE_RAW; } } DetectPostInspectFileFlagsUpdate(pflow, pflow->sgh_toserver, STREAM_TOSERVER); } else if ((p->flowflags & FLOW_PKT_TOCLIENT) && !(pflow->flags & FLOW_SGH_TOCLIENT)) { pflow->sgh_toclient = sgh; pflow->flags |= FLOW_SGH_TOCLIENT; if (p->proto == IPPROTO_TCP && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVERAWSTREAM))) { if (pflow->protoctx != NULL) { TcpSession *ssn = pflow->protoctx; SCLogDebug("STREAMTCP_STREAM_FLAG_DISABLE_RAW ssn.server"); ssn->server.flags |= STREAMTCP_STREAM_FLAG_DISABLE_RAW; } } DetectPostInspectFileFlagsUpdate(pflow, pflow->sgh_toclient, STREAM_TOCLIENT); } } static inline void DetectRunGetRuleGroup( const DetectEngineCtx *de_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch) { const SigGroupHead *sgh = NULL; if (pflow) { bool use_flow_sgh = false; /* Get the stored sgh from the flow (if any). Make sure we're not using * the sgh for icmp error packets part of the same stream. */ if (IP_GET_IPPROTO(p) == pflow->proto) { /* filter out icmp */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH); if ((p->flowflags & FLOW_PKT_TOSERVER) && (pflow->flags & FLOW_SGH_TOSERVER)) { sgh = pflow->sgh_toserver; SCLogDebug("sgh = pflow->sgh_toserver; => %p", sgh); use_flow_sgh = true; } else if ((p->flowflags & FLOW_PKT_TOCLIENT) && (pflow->flags & FLOW_SGH_TOCLIENT)) { sgh = pflow->sgh_toclient; SCLogDebug("sgh = pflow->sgh_toclient; => %p", sgh); use_flow_sgh = true; } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH); } if (!(use_flow_sgh)) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH); sgh = SigMatchSignaturesGetSgh(de_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH); /* HACK: prevent the wrong sgh (or NULL) from being stored in the * flow's sgh pointers */ if (PKT_IS_ICMPV4(p) && ICMPV4_DEST_UNREACH_IS_VALID(p)) { ; /* no-op */ } else { /* store the found sgh (or NULL) in the flow to save us * from looking it up again for the next packet. * Also run other tasks */ DetectRunPostGetFirstRuleGroup(p, pflow, sgh); } } } else { /* p->flags & PKT_HAS_FLOW */ /* no flow */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH); sgh = SigMatchSignaturesGetSgh(de_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH); } scratch->sgh = sgh; } static void DetectRunInspectIPOnly(ThreadVars *tv, const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Flow * const pflow, Packet * const p) { if (pflow) { /* set the iponly stuff */ if (pflow->flags & FLOW_TOCLIENT_IPONLY_SET) p->flowflags |= FLOW_PKT_TOCLIENT_IPONLY_SET; if (pflow->flags & FLOW_TOSERVER_IPONLY_SET) p->flowflags |= FLOW_PKT_TOSERVER_IPONLY_SET; if (((p->flowflags & FLOW_PKT_TOSERVER) && !(p->flowflags & FLOW_PKT_TOSERVER_IPONLY_SET)) || ((p->flowflags & FLOW_PKT_TOCLIENT) && !(p->flowflags & FLOW_PKT_TOCLIENT_IPONLY_SET))) { SCLogDebug("testing against \"ip-only\" signatures"); PACKET_PROFILING_DETECT_START(p, PROF_DETECT_IPONLY); IPOnlyMatchPacket(tv, de_ctx, det_ctx, &de_ctx->io_ctx, &det_ctx->io_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_IPONLY); /* save in the flow that we scanned this direction... */ FlowSetIPOnlyFlag(pflow, p->flowflags & FLOW_PKT_TOSERVER ? 1 : 0); } else if (((p->flowflags & FLOW_PKT_TOSERVER) && (pflow->flags & FLOW_TOSERVER_IPONLY_SET)) || ((p->flowflags & FLOW_PKT_TOCLIENT) && (pflow->flags & FLOW_TOCLIENT_IPONLY_SET))) { /* If we have a drop from IP only module, * we will drop the rest of the flow packets * This will apply only to inline/IPS */ if (pflow->flags & FLOW_ACTION_DROP) { PACKET_DROP(p); } } } else { /* p->flags & PKT_HAS_FLOW */ /* no flow */ /* Even without flow we should match the packet src/dst */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_IPONLY); IPOnlyMatchPacket(tv, de_ctx, det_ctx, &de_ctx->io_ctx, &det_ctx->io_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_IPONLY); } } /* returns 0 if no match, 1 if match */ static inline int DetectRunInspectRuleHeader( const Packet *p, const Flow *f, const Signature *s, const uint32_t sflags, const uint8_t s_proto_flags) { /* check if this signature has a requirement for flowvars of some type * and if so, if we actually have any in the flow. If not, the sig * can't match and we skip it. */ if ((p->flags & PKT_HAS_FLOW) && (sflags & SIG_FLAG_REQUIRE_FLOWVAR)) { DEBUG_VALIDATE_BUG_ON(f == NULL); int m = f->flowvar ? 1 : 0; /* no flowvars? skip this sig */ if (m == 0) { SCLogDebug("skipping sig as the flow has no flowvars and sig " "has SIG_FLAG_REQUIRE_FLOWVAR flag set."); return 0; } } if ((s_proto_flags & DETECT_PROTO_IPV4) && !PKT_IS_IPV4(p)) { SCLogDebug("ip version didn't match"); return 0; } if ((s_proto_flags & DETECT_PROTO_IPV6) && !PKT_IS_IPV6(p)) { SCLogDebug("ip version didn't match"); return 0; } if (DetectProtoContainsProto(&s->proto, IP_GET_IPPROTO(p)) == 0) { SCLogDebug("proto didn't match"); return 0; } /* check the source & dst port in the sig */ if (p->proto == IPPROTO_TCP || p->proto == IPPROTO_UDP || p->proto == IPPROTO_SCTP) { if (!(sflags & SIG_FLAG_DP_ANY)) { if (p->flags & PKT_IS_FRAGMENT) return 0; DetectPort *dport = DetectPortLookupGroup(s->dp,p->dp); if (dport == NULL) { SCLogDebug("dport didn't match."); return 0; } } if (!(sflags & SIG_FLAG_SP_ANY)) { if (p->flags & PKT_IS_FRAGMENT) return 0; DetectPort *sport = DetectPortLookupGroup(s->sp,p->sp); if (sport == NULL) { SCLogDebug("sport didn't match."); return 0; } } } else if ((sflags & (SIG_FLAG_DP_ANY|SIG_FLAG_SP_ANY)) != (SIG_FLAG_DP_ANY|SIG_FLAG_SP_ANY)) { SCLogDebug("port-less protocol and sig needs ports"); return 0; } /* check the destination address */ if (!(sflags & SIG_FLAG_DST_ANY)) { if (PKT_IS_IPV4(p)) { if (DetectAddressMatchIPv4(s->addr_dst_match4, s->addr_dst_match4_cnt, &p->dst) == 0) return 0; } else if (PKT_IS_IPV6(p)) { if (DetectAddressMatchIPv6(s->addr_dst_match6, s->addr_dst_match6_cnt, &p->dst) == 0) return 0; } } /* check the source address */ if (!(sflags & SIG_FLAG_SRC_ANY)) { if (PKT_IS_IPV4(p)) { if (DetectAddressMatchIPv4(s->addr_src_match4, s->addr_src_match4_cnt, &p->src) == 0) return 0; } else if (PKT_IS_IPV6(p)) { if (DetectAddressMatchIPv6(s->addr_src_match6, s->addr_src_match6_cnt, &p->src) == 0) return 0; } } return 1; } /* returns 0 if no match, 1 if match */ static inline int DetectRunInspectRulePacketMatches( ThreadVars *tv, DetectEngineThreadCtx *det_ctx, Packet *p, const Flow *f, const Signature *s) { /* run the packet match functions */ if (s->sm_arrays[DETECT_SM_LIST_MATCH] != NULL) { KEYWORD_PROFILING_SET_LIST(det_ctx, DETECT_SM_LIST_MATCH); SigMatchData *smd = s->sm_arrays[DETECT_SM_LIST_MATCH]; SCLogDebug("running match functions, sm %p", smd); if (smd != NULL) { while (1) { KEYWORD_PROFILING_START; if (sigmatch_table[smd->type].Match(tv, det_ctx, p, s, smd->ctx) <= 0) { KEYWORD_PROFILING_END(det_ctx, smd->type, 0); SCLogDebug("no match"); return 0; } KEYWORD_PROFILING_END(det_ctx, smd->type, 1); if (smd->is_last) { SCLogDebug("match and is_last"); break; } smd++; } } } return 1; } /** \internal * \brief run packet/stream prefilter engines */ static inline void DetectRunPrefilterPkt( ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, DetectRunScratchpad *scratch ) { DetectPrefilterSetNonPrefilterList(p, det_ctx, scratch); /* create our prefilter mask */ PacketCreateMask(p, &scratch->pkt_mask, scratch->alproto, scratch->app_decoder_events); /* build and prefilter non_pf list against the mask of the packet */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_NONMPMLIST); det_ctx->non_pf_id_cnt = 0; if (likely(det_ctx->non_pf_store_cnt > 0)) { DetectPrefilterBuildNonPrefilterList(det_ctx, scratch->pkt_mask, scratch->alproto); } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_NONMPMLIST); /* run the prefilter engines */ Prefilter(det_ctx, scratch->sgh, p, scratch->flow_flags); /* create match list if we have non-pf and/or pf */ if (det_ctx->non_pf_store_cnt || det_ctx->pmq.rule_id_array_cnt) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_PF_SORT2); DetectPrefilterMergeSort(de_ctx, det_ctx); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_PF_SORT2); } #ifdef PROFILING if (tv) { StatsAddUI64(tv, det_ctx->counter_mpm_list, (uint64_t)det_ctx->pmq.rule_id_array_cnt); StatsAddUI64(tv, det_ctx->counter_nonmpm_list, (uint64_t)det_ctx->non_pf_store_cnt); /* non mpm sigs after mask prefilter */ StatsAddUI64(tv, det_ctx->counter_fnonmpm_list, (uint64_t)det_ctx->non_pf_id_cnt); } #endif } static inline void DetectRulePacketRules( ThreadVars * const tv, DetectEngineCtx * const de_ctx, DetectEngineThreadCtx * const det_ctx, Packet * const p, Flow * const pflow, const DetectRunScratchpad *scratch ) { const Signature *s = NULL; const Signature *next_s = NULL; /* inspect the sigs against the packet */ /* Prefetch the next signature. */ SigIntId match_cnt = det_ctx->match_array_cnt; #ifdef PROFILING if (tv) { StatsAddUI64(tv, det_ctx->counter_match_list, (uint64_t)match_cnt); } #endif Signature **match_array = det_ctx->match_array; SGH_PROFILING_RECORD(det_ctx, scratch->sgh); #ifdef PROFILING #ifdef HAVE_LIBJANSSON if (match_cnt >= de_ctx->profile_match_logging_threshold) RulesDumpMatchArray(det_ctx, scratch->sgh, p); #endif #endif uint32_t sflags, next_sflags = 0; if (match_cnt) { next_s = *match_array++; next_sflags = next_s->flags; } while (match_cnt--) { RULE_PROFILING_START(p); uint8_t alert_flags = 0; bool state_alert = false; #ifdef PROFILING bool smatch = false; /* signature match */ #endif s = next_s; sflags = next_sflags; if (match_cnt) { next_s = *match_array++; next_sflags = next_s->flags; } const uint8_t s_proto_flags = s->proto.flags; SCLogDebug("inspecting signature id %"PRIu32"", s->id); if (s->app_inspect != NULL) { goto next; // handle sig in DetectRunTx } /* don't run mask check for stateful rules. * There we depend on prefilter */ if ((s->mask & scratch->pkt_mask) != s->mask) { SCLogDebug("mask mismatch %x & %x != %x", s->mask, scratch->pkt_mask, s->mask); goto next; } if (unlikely(sflags & SIG_FLAG_DSIZE)) { if (likely(p->payload_len < s->dsize_low || p->payload_len > s->dsize_high)) { SCLogDebug("kicked out as p->payload_len %u, dsize low %u, hi %u", p->payload_len, s->dsize_low, s->dsize_high); goto next; } } /* if the sig has alproto and the session as well they should match */ if (likely(sflags & SIG_FLAG_APPLAYER)) { if (s->alproto != ALPROTO_UNKNOWN && s->alproto != scratch->alproto) { if (s->alproto == ALPROTO_DCERPC) { if (scratch->alproto != ALPROTO_SMB && scratch->alproto != ALPROTO_SMB2) { SCLogDebug("DCERPC sig, alproto not SMB or SMB2"); goto next; } } else { SCLogDebug("alproto mismatch"); goto next; } } } if (DetectRunInspectRuleHeader(p, pflow, s, sflags, s_proto_flags) == 0) { goto next; } /* Check the payload keywords. If we are a MPM sig and we've made * to here, we've had at least one of the patterns match */ if (s->sm_arrays[DETECT_SM_LIST_PMATCH] != NULL) { KEYWORD_PROFILING_SET_LIST(det_ctx, DETECT_SM_LIST_PMATCH); /* if we have stream msgs, inspect against those first, * but not for a "dsize" signature */ if (sflags & SIG_FLAG_REQUIRE_STREAM) { int pmatch = 0; if (p->flags & PKT_DETECT_HAS_STREAMDATA) { pmatch = DetectEngineInspectStreamPayload(de_ctx, det_ctx, s, pflow, p); if (pmatch) { det_ctx->flags |= DETECT_ENGINE_THREAD_CTX_STREAM_CONTENT_MATCH; /* Tell the engine that this reassembled stream can drop the * rest of the pkts with no further inspection */ if (s->action & ACTION_DROP) alert_flags |= PACKET_ALERT_FLAG_DROP_FLOW; alert_flags |= PACKET_ALERT_FLAG_STREAM_MATCH; } } /* no match? then inspect packet payload */ if (pmatch == 0) { SCLogDebug("no match in stream, fall back to packet payload"); /* skip if we don't have to inspect the packet and segment was * added to stream */ if (!(sflags & SIG_FLAG_REQUIRE_PACKET) && (p->flags & PKT_STREAM_ADD)) { goto next; } if (DetectEngineInspectPacketPayload(de_ctx, det_ctx, s, pflow, p) != 1) { goto next; } } } else { if (DetectEngineInspectPacketPayload(de_ctx, det_ctx, s, pflow, p) != 1) { goto next; } } } if (DetectRunInspectRulePacketMatches(tv, det_ctx, p, pflow, s) == 0) goto next; #ifdef PROFILING smatch = true; #endif DetectRunPostMatch(tv, det_ctx, p, s); if (!(sflags & SIG_FLAG_NOALERT)) { /* stateful sigs call PacketAlertAppend from DeStateDetectStartDetection */ if (!state_alert) PacketAlertAppend(det_ctx, s, p, 0, alert_flags); } else { /* apply actions even if not alerting */ DetectSignatureApplyActions(p, s, alert_flags); } next: DetectVarProcessList(det_ctx, pflow, p); DetectReplaceFree(det_ctx); RULE_PROFILING_END(det_ctx, s, smatch, p); det_ctx->flags = 0; continue; } } static DetectRunScratchpad DetectRunSetup( const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow) { AppProto alproto = ALPROTO_UNKNOWN; uint8_t flow_flags = 0; /* flow/state flags */ bool app_decoder_events = false; PACKET_PROFILING_DETECT_START(p, PROF_DETECT_SETUP); #ifdef UNITTESTS p->alerts.cnt = 0; #endif det_ctx->ticker++; det_ctx->filestore_cnt = 0; det_ctx->base64_decoded_len = 0; det_ctx->raw_stream_progress = 0; #ifdef DEBUG if (p->flags & PKT_STREAM_ADD) { det_ctx->pkt_stream_add_cnt++; } #endif /* grab the protocol state we will detect on */ if (p->flags & PKT_HAS_FLOW) { DEBUG_VALIDATE_BUG_ON(pflow == NULL); if (p->flowflags & FLOW_PKT_TOSERVER) { flow_flags = STREAM_TOSERVER; SCLogDebug("flag STREAM_TOSERVER set"); } else if (p->flowflags & FLOW_PKT_TOCLIENT) { flow_flags = STREAM_TOCLIENT; SCLogDebug("flag STREAM_TOCLIENT set"); } SCLogDebug("p->flowflags 0x%02x", p->flowflags); if (p->flags & PKT_STREAM_EOF) { flow_flags |= STREAM_EOF; SCLogDebug("STREAM_EOF set"); } /* store tenant_id in the flow so that we can use it * for creating pseudo packets */ if (p->tenant_id > 0 && pflow->tenant_id == 0) { pflow->tenant_id = p->tenant_id; } /* live ruleswap check for flow updates */ if (pflow->de_ctx_version == 0) { /* first time this flow is inspected, set id */ pflow->de_ctx_version = de_ctx->version; } else if (pflow->de_ctx_version != de_ctx->version) { /* first time we inspect flow with this de_ctx, reset */ pflow->flags &= ~FLOW_SGH_TOSERVER; pflow->flags &= ~FLOW_SGH_TOCLIENT; pflow->sgh_toserver = NULL; pflow->sgh_toclient = NULL; pflow->de_ctx_version = de_ctx->version; GenericVarFree(pflow->flowvar); pflow->flowvar = NULL; DetectEngineStateResetTxs(pflow); } /* Retrieve the app layer state and protocol and the tcp reassembled * stream chunks. */ if ((p->proto == IPPROTO_TCP && (p->flags & PKT_STREAM_EST)) || (p->proto == IPPROTO_UDP) || (p->proto == IPPROTO_SCTP && (p->flowflags & FLOW_PKT_ESTABLISHED))) { /* update flow flags with knowledge on disruptions */ flow_flags = FlowGetDisruptionFlags(pflow, flow_flags); alproto = FlowGetAppProtocol(pflow); if (p->proto == IPPROTO_TCP && pflow->protoctx && StreamReassembleRawHasDataReady(pflow->protoctx, p)) { p->flags |= PKT_DETECT_HAS_STREAMDATA; flow_flags |= STREAM_FLUSH; } SCLogDebug("alproto %u", alproto); } else { SCLogDebug("packet doesn't have established flag set (proto %d)", p->proto); } app_decoder_events = AppLayerParserHasDecoderEvents(pflow->alparser); } DetectRunScratchpad pad = { alproto, flow_flags, app_decoder_events, NULL, 0 }; PACKET_PROFILING_DETECT_END(p, PROF_DETECT_SETUP); return pad; } static inline void DetectRunPostRules( ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch) { /* see if we need to increment the inspect_id and reset the de_state */ if (pflow && pflow->alstate && AppLayerParserProtocolSupportsTxs(p->proto, scratch->alproto)) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX_UPDATE); DeStateUpdateInspectTransactionId(pflow, scratch->flow_flags, (scratch->sgh == NULL)); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX_UPDATE); } /* so now let's iterate the alerts and remove the ones after a pass rule * matched (if any). This is done inside PacketAlertFinalize() */ /* PR: installed "tag" keywords are handled after the threshold inspection */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_ALERT); PacketAlertFinalize(de_ctx, det_ctx, p); if (p->alerts.cnt > 0) { StatsAddUI64(tv, det_ctx->counter_alerts, (uint64_t)p->alerts.cnt); } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_ALERT); } static void DetectRunCleanup(DetectEngineThreadCtx *det_ctx, Packet *p, Flow * const pflow) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_CLEANUP); /* cleanup pkt specific part of the patternmatcher */ PacketPatternCleanup(det_ctx); if (pflow != NULL) { /* update inspected tracker for raw reassembly */ if (p->proto == IPPROTO_TCP && pflow->protoctx != NULL && (p->flags & PKT_STREAM_EST)) { StreamReassembleRawUpdateProgress(pflow->protoctx, p, det_ctx->raw_stream_progress); DetectEngineCleanHCBDBuffers(det_ctx); } } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_CLEANUP); SCReturn; } void RuleMatchCandidateTxArrayInit(DetectEngineThreadCtx *det_ctx, uint32_t size) { DEBUG_VALIDATE_BUG_ON(det_ctx->tx_candidates); det_ctx->tx_candidates = SCCalloc(size, sizeof(RuleMatchCandidateTx)); if (det_ctx->tx_candidates == NULL) { FatalError(SC_ERR_MEM_ALLOC, "failed to allocate %"PRIu64" bytes", (uint64_t)(size * sizeof(RuleMatchCandidateTx))); } det_ctx->tx_candidates_size = size; SCLogDebug("array initialized to %u elements (%"PRIu64" bytes)", size, (uint64_t)(size * sizeof(RuleMatchCandidateTx))); } void RuleMatchCandidateTxArrayFree(DetectEngineThreadCtx *det_ctx) { SCFree(det_ctx->tx_candidates); det_ctx->tx_candidates_size = 0; } /* if size >= cur_space */ static inline bool RuleMatchCandidateTxArrayHasSpace(const DetectEngineThreadCtx *det_ctx, const uint32_t need) { if (det_ctx->tx_candidates_size >= need) return 1; return 0; } /* realloc */ static int RuleMatchCandidateTxArrayExpand(DetectEngineThreadCtx *det_ctx, const uint32_t needed) { const uint32_t old_size = det_ctx->tx_candidates_size; uint32_t new_size = needed; void *ptmp = SCRealloc(det_ctx->tx_candidates, (new_size * sizeof(RuleMatchCandidateTx))); if (ptmp == NULL) { FatalError(SC_ERR_MEM_ALLOC, "failed to expand to %"PRIu64" bytes", (uint64_t)(new_size * sizeof(RuleMatchCandidateTx))); // TODO can this be handled more gracefully? } det_ctx->tx_candidates = ptmp; det_ctx->tx_candidates_size = new_size; SCLogDebug("array expanded from %u to %u elements (%"PRIu64" bytes -> %"PRIu64" bytes)", old_size, new_size, (uint64_t)(old_size * sizeof(RuleMatchCandidateTx)), (uint64_t)(new_size * sizeof(RuleMatchCandidateTx))); (void)old_size; return 1; } /* TODO maybe let one with flags win if equal? */ static int DetectRunTxSortHelper(const void *a, const void *b) { const RuleMatchCandidateTx *s0 = a; const RuleMatchCandidateTx *s1 = b; if (s1->id == s0->id) return 0; else return s0->id > s1->id ? -1 : 1; } #if 0 #define TRACE_SID_TXS(sid,txs,...) \ do { \ char _trace_buf[2048]; \ snprintf(_trace_buf, sizeof(_trace_buf), __VA_ARGS__); \ SCLogNotice("%p/%"PRIu64"/%u: %s", txs->tx_ptr, txs->tx_id, sid, _trace_buf); \ } while(0) #else #define TRACE_SID_TXS(sid,txs,...) #endif /** \internal * \brief inspect a rule against a transaction * * Inspect a rule. New detection or continued stateful * detection. * * \param stored_flags pointer to stored flags or NULL. * If stored_flags is set it means we're continueing * inspection from an earlier run. * * \retval bool true sig matched, false didn't match */ static bool DetectRunTxInspectRule(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, Flow *f, const uint8_t in_flow_flags, // direction, EOF, etc void *alstate, DetectTransaction *tx, const Signature *s, uint32_t *stored_flags, RuleMatchCandidateTx *can, DetectRunScratchpad *scratch) { uint8_t flow_flags = in_flow_flags; const int direction = (flow_flags & STREAM_TOSERVER) ? 0 : 1; uint32_t inspect_flags = stored_flags ? *stored_flags : 0; int total_matches = 0; int file_no_match = 0; bool retval = false; bool mpm_before_progress = false; // is mpm engine before progress? bool mpm_in_progress = false; // is mpm engine in a buffer we will revisit? /* see if we want to pass on the FLUSH flag */ if ((s->flags & SIG_FLAG_FLUSH) == 0) flow_flags &=~ STREAM_FLUSH; TRACE_SID_TXS(s->id, tx, "starting %s", direction ? "toclient" : "toserver"); /* for a new inspection we inspect pkt header and packet matches */ if (likely(stored_flags == NULL)) { TRACE_SID_TXS(s->id, tx, "first inspect, run packet matches"); if (DetectRunInspectRuleHeader(p, f, s, s->flags, s->proto.flags) == 0) { TRACE_SID_TXS(s->id, tx, "DetectRunInspectRuleHeader() no match"); return false; } if (DetectRunInspectRulePacketMatches(tv, det_ctx, p, f, s) == 0) { TRACE_SID_TXS(s->id, tx, "DetectRunInspectRulePacketMatches no match"); return false; } /* stream mpm and negated mpm sigs can end up here with wrong proto */ if (!(f->alproto == s->alproto || s->alproto == ALPROTO_UNKNOWN)) { TRACE_SID_TXS(s->id, tx, "alproto mismatch"); return false; } } const DetectEngineAppInspectionEngine *engine = s->app_inspect; while (engine != NULL) { // TODO could be do {} while as s->app_inspect cannot be null TRACE_SID_TXS(s->id, tx, "engine %p inspect_flags %x", engine, inspect_flags); if (!(inspect_flags & BIT_U32(engine->id)) && direction == engine->dir) { const bool skip_engine = (engine->alproto != 0 && engine->alproto != f->alproto); /* special case: file_data on 'alert tcp' will have engines * in the list that are not for us. */ if (unlikely(skip_engine)) { engine = engine->next; continue; } /* engines are sorted per progress, except that the one with * mpm/prefilter enabled is first */ if (tx->tx_progress < engine->progress) { SCLogDebug("tx progress %d < engine progress %d", tx->tx_progress, engine->progress); break; } if (engine->mpm) { if (tx->tx_progress > engine->progress) { mpm_before_progress = true; } else if (tx->tx_progress == engine->progress) { mpm_in_progress = true; } } /* run callback: but bypass stream callback if we can */ int match; if (unlikely(engine->stream && can->stream_stored)) { match = can->stream_result; TRACE_SID_TXS(s->id, tx, "stream skipped, stored result %d used instead", match); } else { KEYWORD_PROFILING_SET_LIST(det_ctx, engine->sm_list); if (engine->Callback) { match = engine->Callback(tv, de_ctx, det_ctx, s, engine->smd, f, flow_flags, alstate, tx->tx_ptr, tx->tx_id); } else { BUG_ON(engine->v2.Callback == NULL); match = engine->v2.Callback(de_ctx, det_ctx, engine, s, f, flow_flags, alstate, tx->tx_ptr, tx->tx_id); } TRACE_SID_TXS(s->id, tx, "engine %p match %d", engine, match); if (engine->stream) { can->stream_stored = true; can->stream_result = match; TRACE_SID_TXS(s->id, tx, "stream ran, store result %d for next tx (if any)", match); } } if (match == DETECT_ENGINE_INSPECT_SIG_MATCH) { inspect_flags |= BIT_U32(engine->id); engine = engine->next; total_matches++; continue; } else if (match == DETECT_ENGINE_INSPECT_SIG_MATCH_MORE_FILES) { /* if the file engine matched, but indicated more * files are still in progress, we don't set inspect * flags as these would end inspection for this tx */ engine = engine->next; total_matches++; continue; } else if (match == DETECT_ENGINE_INSPECT_SIG_CANT_MATCH) { inspect_flags |= DE_STATE_FLAG_SIG_CANT_MATCH; inspect_flags |= BIT_U32(engine->id); } else if (match == DETECT_ENGINE_INSPECT_SIG_CANT_MATCH_FILESTORE) { inspect_flags |= DE_STATE_FLAG_SIG_CANT_MATCH; inspect_flags |= BIT_U32(engine->id); file_no_match = 1; } /* implied DETECT_ENGINE_INSPECT_SIG_NO_MATCH */ if (engine->mpm && mpm_before_progress) { inspect_flags |= DE_STATE_FLAG_SIG_CANT_MATCH; inspect_flags |= BIT_U32(engine->id); } break; } engine = engine->next; } TRACE_SID_TXS(s->id, tx, "inspect_flags %x, total_matches %u, engine %p", inspect_flags, total_matches, engine); if (engine == NULL && total_matches) { inspect_flags |= DE_STATE_FLAG_FULL_INSPECT; TRACE_SID_TXS(s->id, tx, "MATCH"); retval = true; } if (stored_flags) { *stored_flags = inspect_flags; TRACE_SID_TXS(s->id, tx, "continue inspect flags %08x", inspect_flags); } else { // store... or? If tx is done we might not want to come back to this tx // also... if mpmid tracking is enabled, we won't do a sig again for this tx... TRACE_SID_TXS(s->id, tx, "start inspect flags %08x", inspect_flags); if (inspect_flags & DE_STATE_FLAG_SIG_CANT_MATCH) { if (file_no_match) { /* if we have a mismatch on a file sig, we need to keep state. * We may get another file on the same tx (for http and smtp * at least), so for a new file we need to re-eval the sig. * Thoughts / TODO: * - not for some protos that have 1 file per tx (e.g. nfs) * - maybe we only need this for file sigs that mix with * other matches? E.g. 'POST + filename', is different than * just 'filename'. */ DetectRunStoreStateTx(scratch->sgh, f, tx->tx_ptr, tx->tx_id, s, inspect_flags, flow_flags, file_no_match); } } else if ((inspect_flags & DE_STATE_FLAG_FULL_INSPECT) && mpm_before_progress) { TRACE_SID_TXS(s->id, tx, "no need to store match sig, " "mpm won't trigger for it anymore"); if (inspect_flags & DE_STATE_FLAG_FILE_INSPECT) { TRACE_SID_TXS(s->id, tx, "except that for new files, " "we may have to revisit anyway"); DetectRunStoreStateTx(scratch->sgh, f, tx->tx_ptr, tx->tx_id, s, inspect_flags, flow_flags, file_no_match); } } else if ((inspect_flags & DE_STATE_FLAG_FULL_INSPECT) == 0 && mpm_in_progress) { TRACE_SID_TXS(s->id, tx, "no need to store no-match sig, " "mpm will revisit it"); } else { TRACE_SID_TXS(s->id, tx, "storing state: flags %08x", inspect_flags); DetectRunStoreStateTx(scratch->sgh, f, tx->tx_ptr, tx->tx_id, s, inspect_flags, flow_flags, file_no_match); } } return retval; } /** \internal * \brief get a DetectTransaction object * \retval struct filled with relevant info or all nulls/0s */ static DetectTransaction GetDetectTx(const uint8_t ipproto, const AppProto alproto, void *alstate, const uint64_t tx_id, void *tx_ptr, const int tx_end_state, const uint8_t flow_flags) { const uint64_t detect_flags = AppLayerParserGetTxDetectFlags(ipproto, alproto, tx_ptr, flow_flags); if (detect_flags & APP_LAYER_TX_INSPECTED_FLAG) { SCLogDebug("%"PRIu64" tx already fully inspected for %s. Flags %016"PRIx64, tx_id, flow_flags & STREAM_TOSERVER ? "toserver" : "toclient", detect_flags); DetectTransaction no_tx = { NULL, 0, NULL, 0, 0, 0, 0, 0, }; return no_tx; } const int tx_progress = AppLayerParserGetStateProgress(ipproto, alproto, tx_ptr, flow_flags); const int dir_int = (flow_flags & STREAM_TOSERVER) ? 0 : 1; DetectEngineState *tx_de_state = AppLayerParserGetTxDetectState(ipproto, alproto, tx_ptr); DetectEngineStateDirection *tx_dir_state = tx_de_state ? &tx_de_state->dir_state[dir_int] : NULL; uint64_t prefilter_flags = detect_flags & APP_LAYER_TX_PREFILTER_MASK; DetectTransaction tx = { .tx_ptr = tx_ptr, .tx_id = tx_id, .de_state = tx_dir_state, .detect_flags = detect_flags, .prefilter_flags = prefilter_flags, .prefilter_flags_orig = prefilter_flags, .tx_progress = tx_progress, .tx_end_state = tx_end_state, }; return tx; } static void DetectRunTx(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, Flow *f, DetectRunScratchpad *scratch) { const uint8_t flow_flags = scratch->flow_flags; const SigGroupHead * const sgh = scratch->sgh; void * const alstate = f->alstate; const uint8_t ipproto = f->proto; const AppProto alproto = f->alproto; const uint64_t total_txs = AppLayerParserGetTxCnt(f, alstate); uint64_t tx_id_min = AppLayerParserGetTransactionInspectId(f->alparser, flow_flags); const int tx_end_state = AppLayerParserGetStateProgressCompletionStatus(alproto, flow_flags); AppLayerGetTxIteratorFunc IterFunc = AppLayerGetTxIterator(ipproto, alproto); AppLayerGetTxIterState state; memset(&state, 0, sizeof(state)); while (1) { AppLayerGetTxIterTuple ires = IterFunc(ipproto, alproto, alstate, tx_id_min, total_txs, &state); if (ires.tx_ptr == NULL) break; DetectTransaction tx = GetDetectTx(ipproto, alproto, alstate, ires.tx_id, ires.tx_ptr, tx_end_state, flow_flags); if (tx.tx_ptr == NULL) { SCLogDebug("%p/%"PRIu64" no transaction to inspect", tx.tx_ptr, tx_id_min); tx_id_min++; // next (if any) run look for +1 goto next; } tx_id_min = tx.tx_id + 1; // next look for cur + 1 uint32_t array_idx = 0; uint32_t total_rules = det_ctx->match_array_cnt; total_rules += (tx.de_state ? tx.de_state->cnt : 0); /* run prefilter engines and merge results into a candidates array */ if (sgh->tx_engines) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_PF_TX); DetectRunPrefilterTx(det_ctx, sgh, p, ipproto, flow_flags, alproto, alstate, &tx); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_PF_TX); SCLogDebug("%p/%"PRIu64" rules added from prefilter: %u candidates", tx.tx_ptr, tx.tx_id, det_ctx->pmq.rule_id_array_cnt); total_rules += det_ctx->pmq.rule_id_array_cnt; if (!(RuleMatchCandidateTxArrayHasSpace(det_ctx, total_rules))) { RuleMatchCandidateTxArrayExpand(det_ctx, total_rules); } for (uint32_t i = 0; i < det_ctx->pmq.rule_id_array_cnt; i++) { const Signature *s = de_ctx->sig_array[det_ctx->pmq.rule_id_array[i]]; const SigIntId id = s->num; det_ctx->tx_candidates[array_idx].s = s; det_ctx->tx_candidates[array_idx].id = id; det_ctx->tx_candidates[array_idx].flags = NULL; det_ctx->tx_candidates[array_idx].stream_reset = 0; array_idx++; } } else { if (!(RuleMatchCandidateTxArrayHasSpace(det_ctx, total_rules))) { RuleMatchCandidateTxArrayExpand(det_ctx, total_rules); } } /* merge 'state' rules from the regular prefilter */ uint32_t x = array_idx; for (uint32_t i = 0; i < det_ctx->match_array_cnt; i++) { const Signature *s = det_ctx->match_array[i]; if (s->app_inspect != NULL) { const SigIntId id = s->num; det_ctx->tx_candidates[array_idx].s = s; det_ctx->tx_candidates[array_idx].id = id; det_ctx->tx_candidates[array_idx].flags = NULL; det_ctx->tx_candidates[array_idx].stream_reset = 0; array_idx++; SCLogDebug("%p/%"PRIu64" rule %u (%u) added from 'match' list", tx.tx_ptr, tx.tx_id, s->id, id); } } SCLogDebug("%p/%"PRIu64" rules added from 'match' list: %u", tx.tx_ptr, tx.tx_id, array_idx - x); (void)x; /* merge stored state into results */ if (tx.de_state != NULL) { const uint32_t old = array_idx; /* if tx.de_state->flags has 'new file' set and sig below has * 'file inspected' flag, reset the file part of the state */ const bool have_new_file = (tx.de_state->flags & DETECT_ENGINE_STATE_FLAG_FILE_NEW); if (have_new_file) { SCLogDebug("%p/%"PRIu64" destate: need to consider new file", tx.tx_ptr, tx.tx_id); tx.de_state->flags &= ~DETECT_ENGINE_STATE_FLAG_FILE_NEW; } SigIntId state_cnt = 0; DeStateStore *tx_store = tx.de_state->head; for (; tx_store != NULL; tx_store = tx_store->next) { SCLogDebug("tx_store %p", tx_store); SigIntId store_cnt = 0; for (store_cnt = 0; store_cnt < DE_STATE_CHUNK_SIZE && state_cnt < tx.de_state->cnt; store_cnt++, state_cnt++) { DeStateStoreItem *item = &tx_store->store[store_cnt]; SCLogDebug("rule id %u, inspect_flags %u", item->sid, item->flags); if (have_new_file && (item->flags & DE_STATE_FLAG_FILE_INSPECT)) { /* remove part of the state. File inspect engine will now * be able to run again */ item->flags &= ~(DE_STATE_FLAG_SIG_CANT_MATCH|DE_STATE_FLAG_FULL_INSPECT|DE_STATE_FLAG_FILE_INSPECT); SCLogDebug("rule id %u, post file reset inspect_flags %u", item->sid, item->flags); } det_ctx->tx_candidates[array_idx].s = de_ctx->sig_array[item->sid]; det_ctx->tx_candidates[array_idx].id = item->sid; det_ctx->tx_candidates[array_idx].flags = &item->flags; det_ctx->tx_candidates[array_idx].stream_reset = 0; array_idx++; } } if (old && old != array_idx) { qsort(det_ctx->tx_candidates, array_idx, sizeof(RuleMatchCandidateTx), DetectRunTxSortHelper); SCLogDebug("%p/%"PRIu64" rules added from 'continue' list: %u", tx.tx_ptr, tx.tx_id, array_idx - old); } } det_ctx->tx_id = tx.tx_id; det_ctx->tx_id_set = 1; det_ctx->p = p; /* run rules: inspect the match candidates */ for (uint32_t i = 0; i < array_idx; i++) { RuleMatchCandidateTx *can = &det_ctx->tx_candidates[i]; const Signature *s = det_ctx->tx_candidates[i].s; uint32_t *inspect_flags = det_ctx->tx_candidates[i].flags; /* deduplicate: rules_array is sorted, but not deduplicated: * both mpm and stored state could give us the same sid. * As they are back to back in that case we can check for it * here. We select the stored state one. */ if ((i + 1) < array_idx) { if (det_ctx->tx_candidates[i].s == det_ctx->tx_candidates[i+1].s) { if (det_ctx->tx_candidates[i].flags != NULL) { i++; SCLogDebug("%p/%"PRIu64" inspecting SKIP NEXT: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); } else if (det_ctx->tx_candidates[i+1].flags != NULL) { SCLogDebug("%p/%"PRIu64" inspecting SKIP CURRENT: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); continue; } else { // if it's all the same, inspect the current one and skip next. i++; SCLogDebug("%p/%"PRIu64" inspecting SKIP NEXT: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); } } } SCLogDebug("%p/%"PRIu64" inspecting: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); if (inspect_flags) { if (*inspect_flags & (DE_STATE_FLAG_FULL_INSPECT|DE_STATE_FLAG_SIG_CANT_MATCH)) { SCLogDebug("%p/%"PRIu64" inspecting: sid %u (%u), flags %08x ALREADY COMPLETE", tx.tx_ptr, tx.tx_id, s->id, s->num, *inspect_flags); continue; } } if (inspect_flags) { /* continue previous inspection */ SCLogDebug("%p/%"PRIu64" Continueing sid %u", tx.tx_ptr, tx.tx_id, s->id); } else { /* start new inspection */ SCLogDebug("%p/%"PRIu64" Start sid %u", tx.tx_ptr, tx.tx_id, s->id); } /* call individual rule inspection */ RULE_PROFILING_START(p); const int r = DetectRunTxInspectRule(tv, de_ctx, det_ctx, p, f, flow_flags, alstate, &tx, s, inspect_flags, can, scratch); if (r == 1) { /* match */ DetectRunPostMatch(tv, det_ctx, p, s); uint8_t alert_flags = (PACKET_ALERT_FLAG_STATE_MATCH|PACKET_ALERT_FLAG_TX); if (s->action & ACTION_DROP) alert_flags |= PACKET_ALERT_FLAG_DROP_FLOW; SCLogDebug("%p/%"PRIu64" sig %u (%u) matched", tx.tx_ptr, tx.tx_id, s->id, s->num); if (!(s->flags & SIG_FLAG_NOALERT)) { PacketAlertAppend(det_ctx, s, p, tx.tx_id, alert_flags); } else { DetectSignatureApplyActions(p, s, alert_flags); } } DetectVarProcessList(det_ctx, p->flow, p); RULE_PROFILING_END(det_ctx, s, r, p); } det_ctx->tx_id = 0; det_ctx->tx_id_set = 0; det_ctx->p = NULL; /* see if we have any updated state to store in the tx */ uint64_t new_detect_flags = 0; /* this side of the tx is done */ if (tx.tx_progress >= tx.tx_end_state) { new_detect_flags |= APP_LAYER_TX_INSPECTED_FLAG; SCLogDebug("%p/%"PRIu64" tx is done for direction %s. Flag %016"PRIx64, tx.tx_ptr, tx.tx_id, flow_flags & STREAM_TOSERVER ? "toserver" : "toclient", new_detect_flags); } if (tx.prefilter_flags != tx.prefilter_flags_orig) { new_detect_flags |= tx.prefilter_flags; SCLogDebug("%p/%"PRIu64" updated prefilter flags %016"PRIx64" " "(was: %016"PRIx64") for direction %s. Flag %016"PRIx64, tx.tx_ptr, tx.tx_id, tx.prefilter_flags, tx.prefilter_flags_orig, flow_flags & STREAM_TOSERVER ? "toserver" : "toclient", new_detect_flags); } if (new_detect_flags != 0 && (new_detect_flags | tx.detect_flags) != tx.detect_flags) { new_detect_flags |= tx.detect_flags; SCLogDebug("%p/%"PRIu64" Storing new flags %016"PRIx64" (was %016"PRIx64")", tx.tx_ptr, tx.tx_id, new_detect_flags, tx.detect_flags); AppLayerParserSetTxDetectFlags(ipproto, alproto, tx.tx_ptr, flow_flags, new_detect_flags); } next: InspectionBufferClean(det_ctx); if (!ires.has_next) break; } } /** \brief Apply action(s) and Set 'drop' sig info, * if applicable */ void DetectSignatureApplyActions(Packet *p, const Signature *s, const uint8_t alert_flags) { PACKET_UPDATE_ACTION(p, s->action); if (s->action & ACTION_DROP) { if (p->alerts.drop.action == 0) { p->alerts.drop.num = s->num; p->alerts.drop.action = s->action; p->alerts.drop.s = (Signature *)s; } } else if (s->action & ACTION_PASS) { /* if an stream/app-layer match we enforce the pass for the flow */ if ((p->flow != NULL) && (alert_flags & (PACKET_ALERT_FLAG_STATE_MATCH|PACKET_ALERT_FLAG_STREAM_MATCH))) { FlowSetNoPacketInspectionFlag(p->flow); } } } static DetectEngineThreadCtx *GetTenantById(HashTable *h, uint32_t id) { /* technically we need to pass a DetectEngineThreadCtx struct with the * tentant_id member. But as that member is the first in the struct, we * can use the id directly. */ return HashTableLookup(h, &id, 0); } static void DetectFlow(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { if (p->flags & PKT_NOPACKET_INSPECTION) { /* hack: if we are in pass the entire flow mode, we need to still * update the inspect_id forward. So test for the condition here, * and call the update code if necessary. */ const int pass = ((p->flow->flags & FLOW_NOPACKET_INSPECTION)); const AppProto alproto = FlowGetAppProtocol(p->flow); if (pass && AppLayerParserProtocolSupportsTxs(p->proto, alproto)) { uint8_t flags; if (p->flowflags & FLOW_PKT_TOSERVER) { flags = STREAM_TOSERVER; } else { flags = STREAM_TOCLIENT; } flags = FlowGetDisruptionFlags(p->flow, flags); DeStateUpdateInspectTransactionId(p->flow, flags, true); } SCLogDebug("p->pcap %"PRIu64": no detection on packet, " "PKT_NOPACKET_INSPECTION is set", p->pcap_cnt); return; } /* see if the packet matches one or more of the sigs */ (void)DetectRun(tv, de_ctx, det_ctx, p); } static void DetectNoFlow(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { /* No need to perform any detection on this packet, if the the given flag is set.*/ if ((p->flags & PKT_NOPACKET_INSPECTION) || (PACKET_TEST_ACTION(p, ACTION_DROP))) { return; } /* see if the packet matches one or more of the sigs */ DetectRun(tv, de_ctx, det_ctx, p); return; } /** \brief Detection engine thread wrapper. * \param tv thread vars * \param p packet to inspect * \param data thread specific data * \param pq packet queue * \retval TM_ECODE_FAILED error * \retval TM_ECODE_OK ok */ TmEcode Detect(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { DEBUG_VALIDATE_PACKET(p); DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = (DetectEngineThreadCtx *)data; if (det_ctx == NULL) { printf("ERROR: Detect has no thread ctx\n"); goto error; } if (unlikely(SC_ATOMIC_GET(det_ctx->so_far_used_by_detect) == 0)) { (void)SC_ATOMIC_SET(det_ctx->so_far_used_by_detect, 1); SCLogDebug("Detect Engine using new det_ctx - %p", det_ctx); } /* if in MT mode _and_ we have tenants registered, use * MT logic. */ if (det_ctx->mt_det_ctxs_cnt > 0 && det_ctx->TenantGetId != NULL) { uint32_t tenant_id = p->tenant_id; if (tenant_id == 0) tenant_id = det_ctx->TenantGetId(det_ctx, p); if (tenant_id > 0 && tenant_id < det_ctx->mt_det_ctxs_cnt) { p->tenant_id = tenant_id; det_ctx = GetTenantById(det_ctx->mt_det_ctxs_hash, tenant_id); if (det_ctx == NULL) return TM_ECODE_OK; de_ctx = det_ctx->de_ctx; if (de_ctx == NULL) return TM_ECODE_OK; if (unlikely(SC_ATOMIC_GET(det_ctx->so_far_used_by_detect) == 0)) { (void)SC_ATOMIC_SET(det_ctx->so_far_used_by_detect, 1); SCLogDebug("MT de_ctx %p det_ctx %p (tenant %u)", de_ctx, det_ctx, tenant_id); } } else { /* use default if no tenants are registered for this packet */ de_ctx = det_ctx->de_ctx; } } else { de_ctx = det_ctx->de_ctx; } if (p->flow) { DetectFlow(tv, de_ctx, det_ctx, p); } else { DetectNoFlow(tv, de_ctx, det_ctx, p); } return TM_ECODE_OK; error: return TM_ECODE_FAILED; } /** \brief disable file features we don't need * Called if we have no detection engine. */ void DisableDetectFlowFileFlags(Flow *f) { DetectPostInspectFileFlagsUpdate(f, NULL /* no sgh */, STREAM_TOSERVER); DetectPostInspectFileFlagsUpdate(f, NULL /* no sgh */, STREAM_TOCLIENT); } #ifdef UNITTESTS /** * \brief wrapper for old tests */ void SigMatchSignatures(ThreadVars *th_v, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { DetectRun(th_v, de_ctx, det_ctx, p); } #endif /* * TESTS */ #ifdef UNITTESTS #include "tests/detect.c" #endif
./CrossVul/dataset_final_sorted/CWE-347/c/good_724_0
crossvul-cpp_data_bad_315_0
/* * interfaces to the secrets.c library functions in libopenswan. * for now, just stupid wrappers! * * Copyright (C) 1998-2001 D. Hugh Redelmeier. * Copyright (C) 2003-2015 Michael Richardson <mcr@xelerance.com> * Copyright (C) 2003-2010 Paul Wouters <paul@xelerance.com> * Copyright (C) 2008 David McCullough <david_mccullough@securecomputing.com> * Copyright (C) 2009 Avesh Agarwal <avagarwa@redhat.com> * Copyright (C) 2010 Tuomo Soini <tis@foobar.fi> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * Modifications to use OCF interface written by * Daniel Djamaludin <danield@cyberguard.com> * Copyright (C) 2004-2005 Intel Corporation. * */ #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <unistd.h> #include <errno.h> #include <time.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <resolv.h> #include <glob.h> #ifndef GLOB_ABORTED # define GLOB_ABORTED GLOB_ABEND /* fix for old versions */ #endif #include <openswan.h> #include <openswan/ipsec_policy.h> #include "sysdep.h" #include "constants.h" #include "pluto/defs.h" #include "id.h" #include "x509.h" #include "pgp.h" #include "certs.h" #ifdef XAUTH_USEPAM #include <security/pam_appl.h> #endif #include "oswlog.h" #include "mpzfuncs.h" #include "oswcrypto.h" #include "pluto/keys.h" /* * compute an RSA signature with PKCS#1 padding: Note that this assumes that any DER encoding is * **INCLUDED** as part of the hash_val/hash_len. */ void sign_hash(const struct private_key_stuff *pks , const u_char *hash_val, size_t hash_len , u_char *sig_val, size_t sig_len) { chunk_t ch; mpz_t t1; size_t padlen; u_char *p = sig_val; const struct RSA_private_key *k = &pks->u.RSA_private_key; DBG(DBG_CONTROL | DBG_CRYPT, DBG_log("signing hash with RSA Key *%s", pks->pub->u.rsa.keyid) ); /* PKCS#1 v1.5 8.1 encryption-block formatting */ *p++ = 0x00; *p++ = 0x01; /* BT (block type) 01 */ padlen = sig_len - 3 - hash_len; memset(p, 0xFF, padlen); p += padlen; *p++ = 0x00; memcpy(p, hash_val, hash_len); passert(p + hash_len - sig_val == (ptrdiff_t)sig_len); /* PKCS#1 v1.5 8.2 octet-string-to-integer conversion */ n_to_mpz(t1, sig_val, sig_len); /* (could skip leading 0x00) */ /* PKCS#1 v1.5 8.3 RSA computation y = x^c mod n * Better described in PKCS#1 v2.0 5.1 RSADP. * There are two methods, depending on the form of the private key. * We use the one based on the Chinese Remainder Theorem. */ oswcrypto.rsa_mod_exp_crt(t1, t1, &k->p, &k->dP, &k->q, &k->dQ, &k->qInv); /* PKCS#1 v1.5 8.4 integer-to-octet-string conversion */ ch = mpz_to_n(t1, sig_len); memcpy(sig_val, ch.ptr, sig_len); pfree(ch.ptr); mpz_clear(t1); } /* * verify an RSA signature with PKCS#1 padding. * psig, which must be non-NULL, is set to where the decoded signature is * s, is some working area which is of size "s_max_octets" * hash_len is expected result size. * sig_val is actual signature blob. * */ err_t verify_signed_hash(const struct RSA_public_key *k , u_char *s, unsigned int s_max_octets , u_char **psig , size_t hash_len , const u_char *sig_val, size_t sig_len) { unsigned int padlen; /* actual exponentiation; see PKCS#1 v2.0 5.1 */ { chunk_t temp_s; MP_INT c; n_to_mpz(&c, sig_val, sig_len); oswcrypto.mod_exp(&c, &c, &k->e, &k->n); temp_s = mpz_to_n(&c, sig_len); /* back to octets */ if(s_max_octets < sig_len) { return "2""exponentiation failed; too many octets"; } memcpy(s, temp_s.ptr, sig_len); pfree(temp_s.ptr); mpz_clear(&c); } /* check signature contents */ /* verify padding (not including any DER digest info! */ padlen = sig_len - 3 - hash_len; /* now check padding */ DBG(DBG_CRYPT, DBG_dump("verify_sh decrypted SIG1:", s, sig_len)); DBG(DBG_CRYPT, DBG_log("pad_len calculated: %d hash_len: %d", padlen, (int)hash_len)); /* skip padding */ if(s[0] != 0x00 || s[1] != 0x01 || s[padlen+2] != 0x00) { return "3""SIG padding does not check out"; } s += padlen + 3; (*psig) = s; /* return SUCCESS */ return NULL; } /* * Local Variables: * c-basic-offset:4 * c-style: pluto * End: */
./CrossVul/dataset_final_sorted/CWE-347/c/bad_315_0
crossvul-cpp_data_good_315_0
/* * interfaces to the secrets.c library functions in libopenswan. * for now, just stupid wrappers! * * Copyright (C) 1998-2001 D. Hugh Redelmeier. * Copyright (C) 2003-2015 Michael Richardson <mcr@xelerance.com> * Copyright (C) 2003-2010 Paul Wouters <paul@xelerance.com> * Copyright (C) 2008 David McCullough <david_mccullough@securecomputing.com> * Copyright (C) 2009 Avesh Agarwal <avagarwa@redhat.com> * Copyright (C) 2010 Tuomo Soini <tis@foobar.fi> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. See <http://www.fsf.org/copyleft/gpl.txt>. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * Modifications to use OCF interface written by * Daniel Djamaludin <danield@cyberguard.com> * Copyright (C) 2004-2005 Intel Corporation. * */ #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <ctype.h> #include <unistd.h> #include <errno.h> #include <time.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <resolv.h> #include <glob.h> #ifndef GLOB_ABORTED # define GLOB_ABORTED GLOB_ABEND /* fix for old versions */ #endif #include <openswan.h> #include <openswan/ipsec_policy.h> #include "sysdep.h" #include "constants.h" #include "pluto/defs.h" #include "id.h" #include "x509.h" #include "pgp.h" #include "certs.h" #ifdef XAUTH_USEPAM #include <security/pam_appl.h> #endif #include "oswlog.h" #include "mpzfuncs.h" #include "oswcrypto.h" #include "pluto/keys.h" /* * compute an RSA signature with PKCS#1 padding: Note that this assumes that any DER encoding is * **INCLUDED** as part of the hash_val/hash_len. */ void sign_hash(const struct private_key_stuff *pks , const u_char *hash_val, size_t hash_len , u_char *sig_val, size_t sig_len) { chunk_t ch; mpz_t t1; size_t padlen; u_char *p = sig_val; const struct RSA_private_key *k = &pks->u.RSA_private_key; DBG(DBG_CONTROL | DBG_CRYPT, DBG_log("signing hash with RSA Key *%s", pks->pub->u.rsa.keyid) ); /* PKCS#1 v1.5 8.1 encryption-block formatting */ *p++ = 0x00; *p++ = 0x01; /* BT (block type) 01 */ padlen = sig_len - 3 - hash_len; memset(p, 0xFF, padlen); p += padlen; *p++ = 0x00; memcpy(p, hash_val, hash_len); passert(p + hash_len - sig_val == (ptrdiff_t)sig_len); /* PKCS#1 v1.5 8.2 octet-string-to-integer conversion */ n_to_mpz(t1, sig_val, sig_len); /* (could skip leading 0x00) */ /* PKCS#1 v1.5 8.3 RSA computation y = x^c mod n * Better described in PKCS#1 v2.0 5.1 RSADP. * There are two methods, depending on the form of the private key. * We use the one based on the Chinese Remainder Theorem. */ oswcrypto.rsa_mod_exp_crt(t1, t1, &k->p, &k->dP, &k->q, &k->dQ, &k->qInv); /* PKCS#1 v1.5 8.4 integer-to-octet-string conversion */ ch = mpz_to_n(t1, sig_len); memcpy(sig_val, ch.ptr, sig_len); pfree(ch.ptr); mpz_clear(t1); } /* * verify an RSA signature with PKCS#1 padding. * psig, which must be non-NULL, is set to where the decoded signature is * s, is some working area which is of size "s_max_octets" * hash_len is expected result size. * sig_val is actual signature blob. * */ err_t verify_signed_hash(const struct RSA_public_key *k , u_char *s, unsigned int s_max_octets , u_char **psig , size_t hash_len , const u_char *sig_val, size_t sig_len) { unsigned int padlen; /* actual exponentiation; see PKCS#1 v2.0 5.1 */ { chunk_t temp_s; MP_INT c; n_to_mpz(&c, sig_val, sig_len); oswcrypto.mod_exp(&c, &c, &k->e, &k->n); temp_s = mpz_to_n(&c, sig_len); /* back to octets */ if(s_max_octets < sig_len) { return "2""exponentiation failed; too many octets"; } memcpy(s, temp_s.ptr, sig_len); pfree(temp_s.ptr); mpz_clear(&c); } /* check signature contents */ /* verify padding (not including any DER digest info! */ padlen = sig_len - 3 - hash_len; /* now check padding */ DBG(DBG_CRYPT, DBG_dump("verify_sh decrypted SIG1:", s, sig_len)); DBG(DBG_CRYPT, DBG_log("pad_len calculated: %d hash_len: %d", padlen, (int)hash_len)); /* skip padding */ if(s[0] != 0x00 || s[1] != 0x01 || s[padlen+2] != 0x00) { return "3""SIG padding does not check out"; } /* signature starts after ASN wrapped padding [00,01,FF..FF,00] */ (*psig) = s + padlen + 3; /* verify padding contents */ { const u_char *p; size_t cnt_ffs = 0; for (p = s+2; p < s+padlen+2; p++) if (*p == 0xFF) cnt_ffs ++; if (cnt_ffs != padlen) return "4" "invalid Padding String"; } /* return SUCCESS */ return NULL; } /* * Local Variables: * c-basic-offset:4 * c-style: pluto * End: */
./CrossVul/dataset_final_sorted/CWE-347/c/good_315_0
crossvul-cpp_data_bad_327_1
/* * Copyright (c) 2007-2017, Cameron Rich * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the axTLS project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file x509.c * * Certificate processing. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/time.h> #include "os_port.h" #include "crypto_misc.h" #ifdef CONFIG_SSL_CERT_VERIFICATION static int x509_v3_subject_alt_name(const uint8_t *cert, int offset, X509_CTX *x509_ctx); static int x509_v3_basic_constraints(const uint8_t *cert, int offset, X509_CTX *x509_ctx); static int x509_v3_key_usage(const uint8_t *cert, int offset, X509_CTX *x509_ctx); /** * Retrieve the signature from a certificate. */ static const uint8_t *get_signature(const uint8_t *asn1_sig, int *len) { int offset = 0; const uint8_t *ptr = NULL; if (asn1_next_obj(asn1_sig, &offset, ASN1_SEQUENCE) < 0 || asn1_skip_obj(asn1_sig, &offset, ASN1_SEQUENCE)) goto end_get_sig; if (asn1_sig[offset++] != ASN1_OCTET_STRING) goto end_get_sig; *len = get_asn1_length(asn1_sig, &offset); ptr = &asn1_sig[offset]; /* all ok */ end_get_sig: return ptr; } #endif /** * Construct a new x509 object. * @return 0 if ok. < 0 if there was a problem. */ int x509_new(const uint8_t *cert, int *len, X509_CTX **ctx) { int begin_tbs, end_tbs, begin_spki, end_spki; int ret = X509_NOT_OK, offset = 0, cert_size = 0; int version = 0; X509_CTX *x509_ctx; #ifdef CONFIG_SSL_CERT_VERIFICATION /* only care if doing verification */ BI_CTX *bi_ctx; #endif *ctx = (X509_CTX *)calloc(1, sizeof(X509_CTX)); x509_ctx = *ctx; /* get the certificate size */ asn1_skip_obj(cert, &cert_size, ASN1_SEQUENCE); if (asn1_next_obj(cert, &offset, ASN1_SEQUENCE) < 0) goto end_cert; begin_tbs = offset; /* start of the tbs */ end_tbs = begin_tbs; /* work out the end of the tbs */ asn1_skip_obj(cert, &end_tbs, ASN1_SEQUENCE); if (asn1_next_obj(cert, &offset, ASN1_SEQUENCE) < 0) goto end_cert; /* optional version */ if (cert[offset] == ASN1_EXPLICIT_TAG && asn1_version(cert, &offset, &version) == X509_NOT_OK) goto end_cert; if (asn1_skip_obj(cert, &offset, ASN1_INTEGER) || /* serial number */ asn1_next_obj(cert, &offset, ASN1_SEQUENCE) < 0) goto end_cert; /* make sure the signature is ok */ if (asn1_signature_type(cert, &offset, x509_ctx)) { ret = X509_VFY_ERROR_UNSUPPORTED_DIGEST; goto end_cert; } if (asn1_name(cert, &offset, x509_ctx->ca_cert_dn) || asn1_validity(cert, &offset, x509_ctx) || asn1_name(cert, &offset, x509_ctx->cert_dn)) { goto end_cert; } begin_spki = offset; if (asn1_public_key(cert, &offset, x509_ctx)) goto end_cert; end_spki = offset; x509_ctx->fingerprint = malloc(SHA1_SIZE); SHA1_CTX sha_fp_ctx; SHA1_Init(&sha_fp_ctx); SHA1_Update(&sha_fp_ctx, &cert[0], cert_size); SHA1_Final(x509_ctx->fingerprint, &sha_fp_ctx); x509_ctx->spki_sha256 = malloc(SHA256_SIZE); SHA256_CTX spki_hash_ctx; SHA256_Init(&spki_hash_ctx); SHA256_Update(&spki_hash_ctx, &cert[begin_spki], end_spki-begin_spki); SHA256_Final(x509_ctx->spki_sha256, &spki_hash_ctx); #ifdef CONFIG_SSL_CERT_VERIFICATION /* only care if doing verification */ bi_ctx = x509_ctx->rsa_ctx->bi_ctx; /* use the appropriate signature algorithm */ switch (x509_ctx->sig_type) { case SIG_TYPE_MD5: { MD5_CTX md5_ctx; uint8_t md5_dgst[MD5_SIZE]; MD5_Init(&md5_ctx); MD5_Update(&md5_ctx, &cert[begin_tbs], end_tbs-begin_tbs); MD5_Final(md5_dgst, &md5_ctx); x509_ctx->digest = bi_import(bi_ctx, md5_dgst, MD5_SIZE); } break; case SIG_TYPE_SHA1: { SHA1_CTX sha_ctx; uint8_t sha_dgst[SHA1_SIZE]; SHA1_Init(&sha_ctx); SHA1_Update(&sha_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA1_Final(sha_dgst, &sha_ctx); x509_ctx->digest = bi_import(bi_ctx, sha_dgst, SHA1_SIZE); } break; case SIG_TYPE_SHA256: { SHA256_CTX sha256_ctx; uint8_t sha256_dgst[SHA256_SIZE]; SHA256_Init(&sha256_ctx); SHA256_Update(&sha256_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA256_Final(sha256_dgst, &sha256_ctx); x509_ctx->digest = bi_import(bi_ctx, sha256_dgst, SHA256_SIZE); } break; case SIG_TYPE_SHA384: { SHA384_CTX sha384_ctx; uint8_t sha384_dgst[SHA384_SIZE]; SHA384_Init(&sha384_ctx); SHA384_Update(&sha384_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA384_Final(sha384_dgst, &sha384_ctx); x509_ctx->digest = bi_import(bi_ctx, sha384_dgst, SHA384_SIZE); } break; case SIG_TYPE_SHA512: { SHA512_CTX sha512_ctx; uint8_t sha512_dgst[SHA512_SIZE]; SHA512_Init(&sha512_ctx); SHA512_Update(&sha512_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA512_Final(sha512_dgst, &sha512_ctx); x509_ctx->digest = bi_import(bi_ctx, sha512_dgst, SHA512_SIZE); } break; } if (version == 2 && asn1_next_obj(cert, &offset, ASN1_V3_DATA) > 0) { x509_v3_subject_alt_name(cert, offset, x509_ctx); x509_v3_basic_constraints(cert, offset, x509_ctx); x509_v3_key_usage(cert, offset, x509_ctx); } offset = end_tbs; /* skip the rest of v3 data */ if (asn1_skip_obj(cert, &offset, ASN1_SEQUENCE) || asn1_signature(cert, &offset, x509_ctx)) goto end_cert; /* Saves a few bytes of memory */ bi_clear_cache(bi_ctx); #endif ret = X509_OK; end_cert: if (len) { *len = cert_size; } if (ret) { #ifdef CONFIG_SSL_FULL_MODE char buff[64]; printf("Error: Invalid X509 ASN.1 file (%s)\n", x509_display_error(ret, buff)); #endif x509_free(x509_ctx); *ctx = NULL; } return ret; } #ifdef CONFIG_SSL_CERT_VERIFICATION /* only care if doing verification */ static int x509_v3_subject_alt_name(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { if ((offset = asn1_is_subject_alt_name(cert, offset)) > 0) { x509_ctx->subject_alt_name_present = true; x509_ctx->subject_alt_name_is_critical = asn1_is_critical_ext(cert, &offset); if (asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) > 0) { int altlen; if ((altlen = asn1_next_obj(cert, &offset, ASN1_SEQUENCE)) > 0) { int endalt = offset + altlen; int totalnames = 0; while (offset < endalt) { int type = cert[offset++]; int dnslen = get_asn1_length(cert, &offset); if (type == ASN1_CONTEXT_DNSNAME) { x509_ctx->subject_alt_dnsnames = (char**) realloc(x509_ctx->subject_alt_dnsnames, (totalnames + 2) * sizeof(char*)); x509_ctx->subject_alt_dnsnames[totalnames] = (char*)malloc(dnslen + 1); x509_ctx->subject_alt_dnsnames[totalnames+1] = NULL; memcpy(x509_ctx->subject_alt_dnsnames[totalnames], cert + offset, dnslen); x509_ctx->subject_alt_dnsnames[totalnames][dnslen] = 0; totalnames++; } offset += dnslen; } } } } return X509_OK; } /** * Basic constraints - see https://tools.ietf.org/html/rfc5280#page-39 */ static int x509_v3_basic_constraints(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { int ret = X509_OK; int lenSeq = 0; if ((offset = asn1_is_basic_constraints(cert, offset)) == 0) goto end_contraints; x509_ctx->basic_constraint_present = true; x509_ctx->basic_constraint_is_critical = asn1_is_critical_ext(cert, &offset); /* Assign Defaults in case not specified basic_constraint_cA will already by zero by virtue of the calloc */ x509_ctx->basic_constraint_cA = 0; /* basic_constraint_pathLenConstraint is unlimited by default. 10000 is just a large number (limits.h is not already included) */ x509_ctx->basic_constraint_pathLenConstraint = 10000; if ((asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) < 0) || ((lenSeq = asn1_next_obj(cert, &offset, ASN1_SEQUENCE)) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than zero, continue with the basic_constraint_cA */ if ((lenSeq>0)&&(asn1_get_bool(cert, &offset, &x509_ctx->basic_constraint_cA) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than 3, it has more content than the basic_constraint_cA bool, so grab the pathLenConstraint */ if ((lenSeq>3) && (asn1_get_int(cert, &offset, &x509_ctx->basic_constraint_pathLenConstraint) < 0)) { ret = X509_NOT_OK; } end_contraints: return ret; } /* * Key usage - see https://tools.ietf.org/html/rfc5280#section-4.2.1.3 */ static int x509_v3_key_usage(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { int ret = X509_OK; if ((offset = asn1_is_key_usage(cert, offset)) == 0) goto end_key_usage; x509_ctx->key_usage_present = true; x509_ctx->key_usage_is_critical = asn1_is_critical_ext(cert, &offset); if (asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) < 0 || asn1_get_bit_string_as_int(cert, &offset, &x509_ctx->key_usage)) { ret = X509_NOT_OK; } end_key_usage: return ret; } #endif /** * Free an X.509 object's resources. */ void x509_free(X509_CTX *x509_ctx) { X509_CTX *next; int i; if (x509_ctx == NULL) /* if already null, then don't bother */ return; for (i = 0; i < X509_NUM_DN_TYPES; i++) { free(x509_ctx->ca_cert_dn[i]); free(x509_ctx->cert_dn[i]); } free(x509_ctx->signature); #ifdef CONFIG_SSL_CERT_VERIFICATION if (x509_ctx->digest) { bi_free(x509_ctx->rsa_ctx->bi_ctx, x509_ctx->digest); } if (x509_ctx->fingerprint) { free(x509_ctx->fingerprint); } if (x509_ctx->spki_sha256) { free(x509_ctx->spki_sha256); } if (x509_ctx->subject_alt_dnsnames) { for (i = 0; x509_ctx->subject_alt_dnsnames[i]; ++i) free(x509_ctx->subject_alt_dnsnames[i]); free(x509_ctx->subject_alt_dnsnames); } #endif RSA_free(x509_ctx->rsa_ctx); next = x509_ctx->next; free(x509_ctx); x509_free(next); /* clear the chain */ } #ifdef CONFIG_SSL_CERT_VERIFICATION /** * Take a signature and decrypt it. */ static bigint *sig_verify(BI_CTX *ctx, const uint8_t *sig, int sig_len, bigint *modulus, bigint *pub_exp) { int i, size; bigint *decrypted_bi, *dat_bi; bigint *bir = NULL; uint8_t *block = (uint8_t *)malloc(sig_len); /* decrypt */ dat_bi = bi_import(ctx, sig, sig_len); ctx->mod_offset = BIGINT_M_OFFSET; /* convert to a normal block */ decrypted_bi = bi_mod_power2(ctx, dat_bi, modulus, pub_exp); bi_export(ctx, decrypted_bi, block, sig_len); ctx->mod_offset = BIGINT_M_OFFSET; i = 10; /* start at the first possible non-padded byte */ while (block[i++] && i < sig_len); size = sig_len - i; /* get only the bit we want */ if (size > 0) { int len; const uint8_t *sig_ptr = get_signature(&block[i], &len); if (sig_ptr) { bir = bi_import(ctx, sig_ptr, len); } } free(block); /* save a few bytes of memory */ bi_clear_cache(ctx); return bir; } /** * Do some basic checks on the certificate chain. * * Certificate verification consists of a number of checks: * - The date of the certificate is after the start date. * - The date of the certificate is before the finish date. * - A root certificate exists in the certificate store. * - That the certificate(s) are not self-signed. * - The certificate chain is valid. * - The signature of the certificate is valid. * - Basic constraints */ int x509_verify(const CA_CERT_CTX *ca_cert_ctx, const X509_CTX *cert, int *pathLenConstraint) { int ret = X509_OK, i = 0; bigint *cert_sig; X509_CTX *next_cert = NULL; BI_CTX *ctx = NULL; bigint *mod = NULL, *expn = NULL; int match_ca_cert = 0; struct timeval tv; uint8_t is_self_signed = 0; if (cert == NULL) { ret = X509_VFY_ERROR_NO_TRUSTED_CERT; goto end_verify; } /* a self-signed certificate that is not in the CA store - use this to check the signature */ if (asn1_compare_dn(cert->ca_cert_dn, cert->cert_dn) == 0) { is_self_signed = 1; ctx = cert->rsa_ctx->bi_ctx; mod = cert->rsa_ctx->m; expn = cert->rsa_ctx->e; } gettimeofday(&tv, NULL); /* check the not before date */ if (tv.tv_sec < cert->not_before) { ret = X509_VFY_ERROR_NOT_YET_VALID; goto end_verify; } /* check the not after date */ if (tv.tv_sec > cert->not_after) { ret = X509_VFY_ERROR_EXPIRED; goto end_verify; } if (cert->basic_constraint_present) { /* If the cA boolean is not asserted, then the keyCertSign bit in the key usage extension MUST NOT be asserted. */ if (!cert->basic_constraint_cA && IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) { ret = X509_VFY_ERROR_BASIC_CONSTRAINT; goto end_verify; } /* The pathLenConstraint field is meaningful only if the cA boolean is asserted and the key usage extension, if present, asserts the keyCertSign bit. In this case, it gives the maximum number of non-self-issued intermediate certificates that may follow this certificate in a valid certification path. */ if (cert->basic_constraint_cA && (!cert->key_usage_present || IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) && (cert->basic_constraint_pathLenConstraint+1) < *pathLenConstraint) { ret = X509_VFY_ERROR_BASIC_CONSTRAINT; goto end_verify; } } next_cert = cert->next; /* last cert in the chain - look for a trusted cert */ if (next_cert == NULL) { if (ca_cert_ctx != NULL) { /* go thru the CA store */ while (i < CONFIG_X509_MAX_CA_CERTS && ca_cert_ctx->cert[i]) { /* the extension is present but the cA boolean is not asserted, then the certified public key MUST NOT be used to verify certificate signatures. */ if (cert->basic_constraint_present && !ca_cert_ctx->cert[i]->basic_constraint_cA) continue; if (asn1_compare_dn(cert->ca_cert_dn, ca_cert_ctx->cert[i]->cert_dn) == 0) { /* use this CA certificate for signature verification */ match_ca_cert = true; ctx = ca_cert_ctx->cert[i]->rsa_ctx->bi_ctx; mod = ca_cert_ctx->cert[i]->rsa_ctx->m; expn = ca_cert_ctx->cert[i]->rsa_ctx->e; break; } i++; } } /* couldn't find a trusted cert (& let self-signed errors be returned) */ if (!match_ca_cert && !is_self_signed) { ret = X509_VFY_ERROR_NO_TRUSTED_CERT; goto end_verify; } } else if (asn1_compare_dn(cert->ca_cert_dn, next_cert->cert_dn) != 0) { /* check the chain */ ret = X509_VFY_ERROR_INVALID_CHAIN; goto end_verify; } else /* use the next certificate in the chain for signature verify */ { ctx = next_cert->rsa_ctx->bi_ctx; mod = next_cert->rsa_ctx->m; expn = next_cert->rsa_ctx->e; } /* cert is self signed */ if (!match_ca_cert && is_self_signed) { ret = X509_VFY_ERROR_SELF_SIGNED; goto end_verify; } /* check the signature */ cert_sig = sig_verify(ctx, cert->signature, cert->sig_len, bi_clone(ctx, mod), bi_clone(ctx, expn)); if (cert_sig && cert->digest) { if (bi_compare(cert_sig, cert->digest) != 0) ret = X509_VFY_ERROR_BAD_SIGNATURE; bi_free(ctx, cert_sig); } else { ret = X509_VFY_ERROR_BAD_SIGNATURE; } bi_clear_cache(ctx); if (ret) goto end_verify; /* go down the certificate chain using recursion. */ if (next_cert != NULL) { (*pathLenConstraint)++; /* don't include last certificate */ ret = x509_verify(ca_cert_ctx, next_cert, pathLenConstraint); } end_verify: return ret; } #endif #if defined (CONFIG_SSL_FULL_MODE) /** * Used for diagnostics. */ void x509_print(const X509_CTX *cert, CA_CERT_CTX *ca_cert_ctx) { if (cert == NULL) return; char not_part_of_cert[30]; strcpy_P(not_part_of_cert, "<Not Part Of Certificate>"); char critical[16]; strcpy_P(critical, "critical, "); printf("=== CERTIFICATE ISSUED TO ===\n"); printf("Common Name (CN):\t\t"); printf("%s\n", cert->cert_dn[X509_COMMON_NAME] ? cert->cert_dn[X509_COMMON_NAME] : not_part_of_cert); printf("Organization (O):\t\t"); printf("%s\n", cert->cert_dn[X509_ORGANIZATION] ? cert->cert_dn[X509_ORGANIZATION] : not_part_of_cert); if (cert->cert_dn[X509_ORGANIZATIONAL_UNIT]) { printf("Organizational Unit (OU):\t"); printf("%s\n", cert->cert_dn[X509_ORGANIZATIONAL_UNIT]); } if (cert->cert_dn[X509_LOCATION]) { printf("Location (L):\t\t\t"); printf("%s\n", cert->cert_dn[X509_LOCATION]); } if (cert->cert_dn[X509_COUNTRY]) { printf("Country (C):\t\t\t"); printf("%s\n", cert->cert_dn[X509_COUNTRY]); } if (cert->cert_dn[X509_STATE]) { printf("State (ST):\t\t\t"); printf("%s\n", cert->cert_dn[X509_STATE]); } if (cert->basic_constraint_present) { printf("Basic Constraints:\t\t%sCA:%s, pathlen:%d\n", cert->basic_constraint_is_critical ? critical : "", cert->basic_constraint_cA? "TRUE" : "FALSE", cert->basic_constraint_pathLenConstraint); } if (cert->key_usage_present) { printf("Key Usage:\t\t\t%s", cert->key_usage_is_critical ? critical : ""); bool has_started = false; if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DIGITAL_SIGNATURE)) { printf("Digital Signature"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_NON_REPUDIATION)) { if (has_started) printf(", "); printf("Non Repudiation"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_ENCIPHERMENT)) { if (has_started) printf(", "); printf("Key Encipherment"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DATA_ENCIPHERMENT)) { if (has_started) printf(", "); printf("Data Encipherment"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_AGREEMENT)) { if (has_started) printf(", "); printf("Key Agreement"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) { if (has_started) printf(", "); printf("Key Cert Sign"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_CRL_SIGN)) { if (has_started) printf(", "); printf("CRL Sign"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_ENCIPHER_ONLY)) { if (has_started) printf(", "); printf("Encipher Only"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DECIPHER_ONLY)) { if (has_started) printf(", "); printf("Decipher Only"); has_started = true; } printf("\n"); } if (cert->subject_alt_name_present) { printf("Subject Alt Name:\t\t%s", cert->subject_alt_name_is_critical ? critical : ""); if (cert->subject_alt_dnsnames) { int i = 0; while (cert->subject_alt_dnsnames[i]) printf("%s ", cert->subject_alt_dnsnames[i++]); } printf("\n"); } printf("=== CERTIFICATE ISSUED BY ===\n"); printf("Common Name (CN):\t\t"); printf("%s\n", cert->ca_cert_dn[X509_COMMON_NAME] ? cert->ca_cert_dn[X509_COMMON_NAME] : not_part_of_cert); printf("Organization (O):\t\t"); printf("%s\n", cert->ca_cert_dn[X509_ORGANIZATION] ? cert->ca_cert_dn[X509_ORGANIZATION] : not_part_of_cert); if (cert->ca_cert_dn[X509_ORGANIZATIONAL_UNIT]) { printf("Organizational Unit (OU):\t"); printf("%s\n", cert->ca_cert_dn[X509_ORGANIZATIONAL_UNIT]); } if (cert->ca_cert_dn[X509_LOCATION]) { printf("Location (L):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_LOCATION]); } if (cert->ca_cert_dn[X509_COUNTRY]) { printf("Country (C):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_COUNTRY]); } if (cert->ca_cert_dn[X509_STATE]) { printf("State (ST):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_STATE]); } printf("Not Before:\t\t\t%s", ctime(&cert->not_before)); printf("Not After:\t\t\t%s", ctime(&cert->not_after)); printf("RSA bitsize:\t\t\t%d\n", cert->rsa_ctx->num_octets*8); printf("Sig Type:\t\t\t"); switch (cert->sig_type) { case SIG_TYPE_MD5: printf("MD5\n"); break; case SIG_TYPE_SHA1: printf("SHA1\n"); break; case SIG_TYPE_SHA256: printf("SHA256\n"); break; case SIG_TYPE_SHA384: printf("SHA384\n"); break; case SIG_TYPE_SHA512: printf("SHA512\n"); break; default: printf("Unrecognized: %d\n", cert->sig_type); break; } if (ca_cert_ctx) { int pathLenConstraint = 0; char buff[64]; printf("Verify:\t\t\t\t%s\n", x509_display_error(x509_verify(ca_cert_ctx, cert, &pathLenConstraint), buff)); } #if 0 print_blob("Signature", cert->signature, cert->sig_len); bi_print("Modulus", cert->rsa_ctx->m); bi_print("Pub Exp", cert->rsa_ctx->e); #endif if (ca_cert_ctx) { x509_print(cert->next, ca_cert_ctx); } TTY_FLUSH(); } const char * x509_display_error(int error, char *buff) { switch (error) { case X509_OK: strcpy_P(buff, "Certificate verify successful"); return buff; case X509_NOT_OK: strcpy_P(buff, "X509 not ok"); return buff; case X509_VFY_ERROR_NO_TRUSTED_CERT: strcpy_P(buff, "No trusted cert is available"); return buff; case X509_VFY_ERROR_BAD_SIGNATURE: strcpy_P(buff, "Bad signature"); return buff; case X509_VFY_ERROR_NOT_YET_VALID: strcpy_P(buff, "Cert is not yet valid"); return buff; case X509_VFY_ERROR_EXPIRED: strcpy_P(buff, "Cert has expired"); return buff; case X509_VFY_ERROR_SELF_SIGNED: strcpy_P(buff, "Cert is self-signed"); return buff; case X509_VFY_ERROR_INVALID_CHAIN: strcpy_P(buff, "Chain is invalid (check order of certs)"); return buff; case X509_VFY_ERROR_UNSUPPORTED_DIGEST: strcpy_P(buff, "Unsupported digest"); return buff; case X509_INVALID_PRIV_KEY: strcpy_P(buff, "Invalid private key"); return buff; case X509_VFY_ERROR_BASIC_CONSTRAINT: strcpy_P(buff, "Basic constraint invalid"); return buff; default: strcpy_P(buff, "Unknown"); return buff; } } #endif /* CONFIG_SSL_FULL_MODE */
./CrossVul/dataset_final_sorted/CWE-347/c/bad_327_1
crossvul-cpp_data_bad_3998_0
#include "curveMath.h" #include <string.h> int pointZZ_pEqual(const PointZZ_p * op1, const PointZZ_p * op2) { // check x coords if(mpz_cmp(op1->x, op2->x) != 0) { return 0; } // check y coords if(mpz_cmp(op1->y, op2->y) != 0) { return 0; } return 1; } void pointZZ_pDouble(PointZZ_p * rop, const PointZZ_p * op, const CurveZZ_p * curve) { mpz_t numer, denom, lambda; mpz_inits(numer, denom, lambda, NULL); // calculate lambda mpz_mul(numer, op->x, op->x); mpz_mul_ui(numer, numer, 3); mpz_add(numer, numer, curve->a); mpz_mul_ui(denom, op->y, 2); mpz_invert(denom, denom, curve->p); // TODO check status mpz_mul(lambda, numer, denom); mpz_mod(lambda, lambda, curve->p); // calculate resulting x coord mpz_mul(rop->x, lambda, lambda); mpz_sub(rop->x, rop->x, op->x); mpz_sub(rop->x, rop->x, op->x); mpz_mod(rop->x, rop->x, curve->p); //calculate resulting y coord mpz_sub(rop->y, op->x, rop->x); mpz_mul(rop->y, lambda, rop->y); mpz_sub(rop->y, rop->y, op->y); mpz_mod(rop->y, rop->y, curve->p); mpz_clears(numer, denom, lambda, NULL); } void pointZZ_pAdd(PointZZ_p * rop, const PointZZ_p * op1, const PointZZ_p * op2, const CurveZZ_p * curve) { mpz_t xdiff, ydiff, lambda; mpz_inits(xdiff, ydiff, lambda, NULL); // calculate lambda mpz_sub(ydiff, op2->y, op1->y); mpz_sub(xdiff, op2->x, op1->x); mpz_invert(xdiff, xdiff, curve->p); // TODO check status mpz_mul(lambda, ydiff, xdiff); mpz_mod(lambda, lambda, curve->p); // calculate resulting x coord mpz_mul(rop->x, lambda, lambda); mpz_sub(rop->x, rop->x, op1->x); mpz_sub(rop->x, rop->x, op2->x); mpz_mod(rop->x, rop->x, curve->p); //calculate resulting y coord mpz_sub(rop->y, op1->x, rop->x); mpz_mul(rop->y, lambda, rop->y); mpz_sub(rop->y, rop->y, op1->y); mpz_mod(rop->y, rop->y, curve->p); mpz_clears(xdiff, ydiff, lambda, NULL); } void pointZZ_pMul(PointZZ_p * rop, const PointZZ_p * point, const mpz_t scalar, const CurveZZ_p * curve) { PointZZ_p R0, R1, tmp; mpz_inits(R1.x, R1.y, tmp.x, tmp.y, NULL); mpz_init_set(R0.x, point->x); mpz_init_set(R0.y, point->y); pointZZ_pDouble(&R1, point, curve); int dbits = mpz_sizeinbase(scalar, 2), i; for(i = dbits - 2; i >= 0; i--) { if(mpz_tstbit(scalar, i)) { mpz_set(tmp.x, R0.x); mpz_set(tmp.y, R0.y); pointZZ_pAdd(&R0, &R1, &tmp, curve); mpz_set(tmp.x, R1.x); mpz_set(tmp.y, R1.y); pointZZ_pDouble(&R1, &tmp, curve); } else { mpz_set(tmp.x, R1.x); mpz_set(tmp.y, R1.y); pointZZ_pAdd(&R1, &R0, &tmp, curve); mpz_set(tmp.x, R0.x); mpz_set(tmp.y, R0.y); pointZZ_pDouble(&R0, &tmp, curve); } } mpz_init_set(rop->x, R0.x); mpz_init_set(rop->y, R0.y); mpz_clears(R0.x, R0.y, R1.x, R1.y, tmp.x, tmp.y, NULL); } void pointZZ_pShamirsTrick(PointZZ_p * rop, const PointZZ_p * point1, const mpz_t scalar1, const PointZZ_p * point2, const mpz_t scalar2, const CurveZZ_p * curve) { PointZZ_p sum, tmp; mpz_inits(sum.x, sum.y, tmp.x, tmp.y, NULL); pointZZ_pAdd(&sum, point1, point2, curve); int scalar1Bits = mpz_sizeinbase(scalar1, 2); int scalar2Bits = mpz_sizeinbase(scalar2, 2); int l = (scalar1Bits > scalar2Bits ? scalar1Bits : scalar2Bits) - 1; if(mpz_tstbit(scalar1, l) && mpz_tstbit(scalar2, l)) { mpz_set(rop->x, sum.x); mpz_set(rop->y, sum.y); } else if(mpz_tstbit(scalar1, l)) { mpz_set(rop->x, point1->x); mpz_set(rop->y, point1->y); } else if(mpz_tstbit(scalar2, l)) { mpz_set(rop->x, point2->x); mpz_set(rop->y, point2->y); } for(l = l - 1; l >= 0; l--) { mpz_set(tmp.x, rop->x); mpz_set(tmp.y, rop->y); pointZZ_pDouble(rop, &tmp, curve); mpz_set(tmp.x, rop->x); mpz_set(tmp.y, rop->y); if(mpz_tstbit(scalar1, l) && mpz_tstbit(scalar2, l)) { pointZZ_pAdd(rop, &tmp, &sum, curve); } else if(mpz_tstbit(scalar1, l)) { pointZZ_pAdd(rop, &tmp, point1, curve); } else if(mpz_tstbit(scalar2, l)) { pointZZ_pAdd(rop, &tmp, point2, curve); } } mpz_clears(sum.x, sum.y, tmp.x, tmp.y, NULL); } /****************************************************************************** PYTHON BINDINGS ******************************************************************************/ static PyObject * curvemath_mul(PyObject *self, PyObject *args) { char * x, * y, * d, * p, * a, * b, * q, * gx, * gy; if (!PyArg_ParseTuple(args, "sssssssss", &x, &y, &d, &p, &a, &b, &q, &gx, &gy)) { return NULL; } PointZZ_p result; mpz_t scalar; mpz_init_set_str(scalar, d, 10); CurveZZ_p * curve = buildCurveZZ_p(p, a, b, q, gx, gy, 10);; PointZZ_p * point = buildPointZZ_p(x, y, 10); pointZZ_pMul(&result, point, scalar, curve); destroyPointZZ_p(point); destroyCurveZZ_p(curve); char * resultX = mpz_get_str(NULL, 10, result.x); char * resultY = mpz_get_str(NULL, 10, result.y); mpz_clears(result.x, result.y, scalar, NULL); PyObject * ret = Py_BuildValue("ss", resultX, resultY); free(resultX); free(resultY); return ret; } static PyObject * curvemath_add(PyObject *self, PyObject *args) { char * px, * py, * qx, * qy, * p, * a, * b, * q, * gx, * gy; if (!PyArg_ParseTuple(args, "ssssssssss", &px, &py, &qx, &qy, &p, &a, &b, &q, &gx, &gy)) { return NULL; } PointZZ_p result; mpz_inits(result.x, result.y, NULL); CurveZZ_p * curve = buildCurveZZ_p(p, a, b, q, gx, gy, 10);; PointZZ_p * P = buildPointZZ_p(px, py, 10); PointZZ_p * Q = buildPointZZ_p(qx, qy, 10); if(pointZZ_pEqual(P, Q)) { pointZZ_pDouble(&result, P, curve); } else { pointZZ_pAdd(&result, P, Q, curve); } destroyPointZZ_p(P); destroyPointZZ_p(Q); destroyCurveZZ_p(curve); char * resultX = mpz_get_str(NULL, 10, result.x); char * resultY = mpz_get_str(NULL, 10, result.y); mpz_clears(result.x, result.y, NULL); PyObject * ret = Py_BuildValue("ss", resultX, resultY); free(resultX); free(resultY); return ret; } static PyMethodDef curvemath__methods__[] = { {"mul", curvemath_mul, METH_VARARGS, "Multiply a curve point by an integer scalar."}, {"add", curvemath_add, METH_VARARGS, "Add two points on a curve."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "curvemath", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ curvemath__methods__, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; PyMODINIT_FUNC PyInit_curvemath(void) { PyObject * m = PyModule_Create(&moduledef); return m; } #else PyMODINIT_FUNC initcurvemath(void) { Py_InitModule("curvemath", curvemath__methods__); } #endif
./CrossVul/dataset_final_sorted/CWE-347/c/bad_3998_0
crossvul-cpp_data_good_327_1
/* * Copyright (c) 2007-2017, Cameron Rich * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of the axTLS project nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file x509.c * * Certificate processing. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/time.h> #include "os_port.h" #include "crypto_misc.h" #ifdef CONFIG_SSL_CERT_VERIFICATION static int x509_v3_subject_alt_name(const uint8_t *cert, int offset, X509_CTX *x509_ctx); static int x509_v3_basic_constraints(const uint8_t *cert, int offset, X509_CTX *x509_ctx); static int x509_v3_key_usage(const uint8_t *cert, int offset, X509_CTX *x509_ctx); #endif /** * Construct a new x509 object. * @return 0 if ok. < 0 if there was a problem. */ int x509_new(const uint8_t *cert, int *len, X509_CTX **ctx) { int begin_tbs, end_tbs, begin_spki, end_spki; int ret = X509_NOT_OK, offset = 0, cert_size = 0; int version = 0; X509_CTX *x509_ctx; #ifdef CONFIG_SSL_CERT_VERIFICATION /* only care if doing verification */ BI_CTX *bi_ctx; #endif *ctx = (X509_CTX *)calloc(1, sizeof(X509_CTX)); x509_ctx = *ctx; /* get the certificate size */ asn1_skip_obj(cert, &cert_size, ASN1_SEQUENCE); if (asn1_next_obj(cert, &offset, ASN1_SEQUENCE) < 0) goto end_cert; begin_tbs = offset; /* start of the tbs */ end_tbs = begin_tbs; /* work out the end of the tbs */ asn1_skip_obj(cert, &end_tbs, ASN1_SEQUENCE); if (asn1_next_obj(cert, &offset, ASN1_SEQUENCE) < 0) goto end_cert; /* optional version */ if (cert[offset] == ASN1_EXPLICIT_TAG && asn1_version(cert, &offset, &version) == X509_NOT_OK) goto end_cert; if (asn1_skip_obj(cert, &offset, ASN1_INTEGER) || /* serial number */ asn1_next_obj(cert, &offset, ASN1_SEQUENCE) < 0) goto end_cert; /* make sure the signature is ok */ if (asn1_signature_type(cert, &offset, x509_ctx)) { ret = X509_VFY_ERROR_UNSUPPORTED_DIGEST; goto end_cert; } if (asn1_name(cert, &offset, x509_ctx->ca_cert_dn) || asn1_validity(cert, &offset, x509_ctx) || asn1_name(cert, &offset, x509_ctx->cert_dn)) { goto end_cert; } begin_spki = offset; if (asn1_public_key(cert, &offset, x509_ctx)) goto end_cert; end_spki = offset; x509_ctx->fingerprint = malloc(SHA1_SIZE); SHA1_CTX sha_fp_ctx; SHA1_Init(&sha_fp_ctx); SHA1_Update(&sha_fp_ctx, &cert[0], cert_size); SHA1_Final(x509_ctx->fingerprint, &sha_fp_ctx); x509_ctx->spki_sha256 = malloc(SHA256_SIZE); SHA256_CTX spki_hash_ctx; SHA256_Init(&spki_hash_ctx); SHA256_Update(&spki_hash_ctx, &cert[begin_spki], end_spki-begin_spki); SHA256_Final(x509_ctx->spki_sha256, &spki_hash_ctx); #ifdef CONFIG_SSL_CERT_VERIFICATION /* only care if doing verification */ bi_ctx = x509_ctx->rsa_ctx->bi_ctx; /* use the appropriate signature algorithm */ switch (x509_ctx->sig_type) { case SIG_TYPE_MD5: { MD5_CTX md5_ctx; uint8_t md5_dgst[MD5_SIZE]; MD5_Init(&md5_ctx); MD5_Update(&md5_ctx, &cert[begin_tbs], end_tbs-begin_tbs); MD5_Final(md5_dgst, &md5_ctx); x509_ctx->digest = bi_import(bi_ctx, md5_dgst, MD5_SIZE); } break; case SIG_TYPE_SHA1: { SHA1_CTX sha_ctx; uint8_t sha_dgst[SHA1_SIZE]; SHA1_Init(&sha_ctx); SHA1_Update(&sha_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA1_Final(sha_dgst, &sha_ctx); x509_ctx->digest = bi_import(bi_ctx, sha_dgst, SHA1_SIZE); } break; case SIG_TYPE_SHA256: { SHA256_CTX sha256_ctx; uint8_t sha256_dgst[SHA256_SIZE]; SHA256_Init(&sha256_ctx); SHA256_Update(&sha256_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA256_Final(sha256_dgst, &sha256_ctx); x509_ctx->digest = bi_import(bi_ctx, sha256_dgst, SHA256_SIZE); } break; case SIG_TYPE_SHA384: { SHA384_CTX sha384_ctx; uint8_t sha384_dgst[SHA384_SIZE]; SHA384_Init(&sha384_ctx); SHA384_Update(&sha384_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA384_Final(sha384_dgst, &sha384_ctx); x509_ctx->digest = bi_import(bi_ctx, sha384_dgst, SHA384_SIZE); } break; case SIG_TYPE_SHA512: { SHA512_CTX sha512_ctx; uint8_t sha512_dgst[SHA512_SIZE]; SHA512_Init(&sha512_ctx); SHA512_Update(&sha512_ctx, &cert[begin_tbs], end_tbs-begin_tbs); SHA512_Final(sha512_dgst, &sha512_ctx); x509_ctx->digest = bi_import(bi_ctx, sha512_dgst, SHA512_SIZE); } break; } if (version == 2 && asn1_next_obj(cert, &offset, ASN1_V3_DATA) > 0) { x509_v3_subject_alt_name(cert, offset, x509_ctx); x509_v3_basic_constraints(cert, offset, x509_ctx); x509_v3_key_usage(cert, offset, x509_ctx); } offset = end_tbs; /* skip the rest of v3 data */ if (asn1_skip_obj(cert, &offset, ASN1_SEQUENCE) || asn1_signature(cert, &offset, x509_ctx)) goto end_cert; /* Saves a few bytes of memory */ bi_clear_cache(bi_ctx); #endif ret = X509_OK; end_cert: if (len) { *len = cert_size; } if (ret) { #ifdef CONFIG_SSL_FULL_MODE char buff[64]; printf("Error: Invalid X509 ASN.1 file (%s)\n", x509_display_error(ret, buff)); #endif x509_free(x509_ctx); *ctx = NULL; } return ret; } #ifdef CONFIG_SSL_CERT_VERIFICATION /* only care if doing verification */ static int x509_v3_subject_alt_name(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { if ((offset = asn1_is_subject_alt_name(cert, offset)) > 0) { x509_ctx->subject_alt_name_present = true; x509_ctx->subject_alt_name_is_critical = asn1_is_critical_ext(cert, &offset); if (asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) > 0) { int altlen; if ((altlen = asn1_next_obj(cert, &offset, ASN1_SEQUENCE)) > 0) { int endalt = offset + altlen; int totalnames = 0; while (offset < endalt) { int type = cert[offset++]; int dnslen = get_asn1_length(cert, &offset); if (type == ASN1_CONTEXT_DNSNAME) { x509_ctx->subject_alt_dnsnames = (char**) realloc(x509_ctx->subject_alt_dnsnames, (totalnames + 2) * sizeof(char*)); x509_ctx->subject_alt_dnsnames[totalnames] = (char*)malloc(dnslen + 1); x509_ctx->subject_alt_dnsnames[totalnames+1] = NULL; memcpy(x509_ctx->subject_alt_dnsnames[totalnames], cert + offset, dnslen); x509_ctx->subject_alt_dnsnames[totalnames][dnslen] = 0; totalnames++; } offset += dnslen; } } } } return X509_OK; } /** * Basic constraints - see https://tools.ietf.org/html/rfc5280#page-39 */ static int x509_v3_basic_constraints(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { int ret = X509_OK; int lenSeq = 0; if ((offset = asn1_is_basic_constraints(cert, offset)) == 0) goto end_contraints; x509_ctx->basic_constraint_present = true; x509_ctx->basic_constraint_is_critical = asn1_is_critical_ext(cert, &offset); /* Assign Defaults in case not specified basic_constraint_cA will already by zero by virtue of the calloc */ x509_ctx->basic_constraint_cA = 0; /* basic_constraint_pathLenConstraint is unlimited by default. 10000 is just a large number (limits.h is not already included) */ x509_ctx->basic_constraint_pathLenConstraint = 10000; if ((asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) < 0) || ((lenSeq = asn1_next_obj(cert, &offset, ASN1_SEQUENCE)) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than zero, continue with the basic_constraint_cA */ if ((lenSeq>0)&&(asn1_get_bool(cert, &offset, &x509_ctx->basic_constraint_cA) < 0)) { ret = X509_NOT_OK; } /* If the Sequence Length is greater than 3, it has more content than the basic_constraint_cA bool, so grab the pathLenConstraint */ if ((lenSeq>3) && (asn1_get_int(cert, &offset, &x509_ctx->basic_constraint_pathLenConstraint) < 0)) { ret = X509_NOT_OK; } end_contraints: return ret; } /* * Key usage - see https://tools.ietf.org/html/rfc5280#section-4.2.1.3 */ static int x509_v3_key_usage(const uint8_t *cert, int offset, X509_CTX *x509_ctx) { int ret = X509_OK; if ((offset = asn1_is_key_usage(cert, offset)) == 0) goto end_key_usage; x509_ctx->key_usage_present = true; x509_ctx->key_usage_is_critical = asn1_is_critical_ext(cert, &offset); if (asn1_next_obj(cert, &offset, ASN1_OCTET_STRING) < 0 || asn1_get_bit_string_as_int(cert, &offset, &x509_ctx->key_usage)) { ret = X509_NOT_OK; } end_key_usage: return ret; } #endif /** * Free an X.509 object's resources. */ void x509_free(X509_CTX *x509_ctx) { X509_CTX *next; int i; if (x509_ctx == NULL) /* if already null, then don't bother */ return; for (i = 0; i < X509_NUM_DN_TYPES; i++) { free(x509_ctx->ca_cert_dn[i]); free(x509_ctx->cert_dn[i]); } free(x509_ctx->signature); #ifdef CONFIG_SSL_CERT_VERIFICATION if (x509_ctx->digest) { bi_free(x509_ctx->rsa_ctx->bi_ctx, x509_ctx->digest); } if (x509_ctx->fingerprint) { free(x509_ctx->fingerprint); } if (x509_ctx->spki_sha256) { free(x509_ctx->spki_sha256); } if (x509_ctx->subject_alt_dnsnames) { for (i = 0; x509_ctx->subject_alt_dnsnames[i]; ++i) free(x509_ctx->subject_alt_dnsnames[i]); free(x509_ctx->subject_alt_dnsnames); } #endif RSA_free(x509_ctx->rsa_ctx); next = x509_ctx->next; free(x509_ctx); x509_free(next); /* clear the chain */ } #ifdef CONFIG_SSL_CERT_VERIFICATION static const uint8_t sig_prefix_md5[] PROGMEM = {0x30, 0x20, 0x30, 0x0C, 0x06, 0x08, 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x02, 0x05, 0x05, 0x00, 0x04, 0x10}; static const uint8_t sig_prefix_sha1[] PROGMEM = {0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0E, 0x03, 0x02, 0x1A, 0x05, 0x00, 0x04, 0x14}; static const uint8_t sig_prefix_sha256[] PROGMEM = {0x30, 0x31, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01, 0x05, 0x00, 0x04, 0x20}; static const uint8_t sig_prefix_sha384[] PROGMEM = {0x30, 0x41, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x02, 0x05, 0x00, 0x04, 0x30}; static const uint8_t sig_prefix_sha512[] PROGMEM = {0x30, 0x51, 0x30, 0x0d, 0x06, 0x09, 0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x03, 0x05, 0x00, 0x04, 0x40}; /** * Take a signature and decrypt it. */ static bigint *sig_verify(BI_CTX *ctx, const uint8_t *sig, int sig_len, uint8_t sig_type, bigint *modulus, bigint *pub_exp) { int i; bigint *decrypted_bi, *dat_bi; bigint *bir = NULL; uint8_t *block = (uint8_t *)malloc(sig_len); const uint8_t *sig_prefix = NULL; uint8_t sig_prefix_size = 0, hash_len = 0; /* adjust our expections */ switch (sig_type) { case SIG_TYPE_MD5: sig_prefix = sig_prefix_md5; sig_prefix_size = sizeof(sig_prefix_md5); break; case SIG_TYPE_SHA1: sig_prefix = sig_prefix_sha1; sig_prefix_size = sizeof(sig_prefix_sha1); break; case SIG_TYPE_SHA256: sig_prefix = sig_prefix_sha256; sig_prefix_size = sizeof(sig_prefix_sha256); break; case SIG_TYPE_SHA384: sig_prefix = sig_prefix_sha384; sig_prefix_size = sizeof(sig_prefix_sha384); break; case SIG_TYPE_SHA512: sig_prefix = sig_prefix_sha512; sig_prefix_size = sizeof(sig_prefix_sha512); break; } if (sig_prefix) hash_len = sig_prefix[sig_prefix_size - 1]; /* check length (#A) */ if (sig_len < 2 + 8 + 1 + sig_prefix_size + hash_len) goto err; /* decrypt */ dat_bi = bi_import(ctx, sig, sig_len); ctx->mod_offset = BIGINT_M_OFFSET; /* convert to a normal block */ decrypted_bi = bi_mod_power2(ctx, dat_bi, modulus, pub_exp); bi_export(ctx, decrypted_bi, block, sig_len); ctx->mod_offset = BIGINT_M_OFFSET; /* check the first 2 bytes */ if (block[0] != 0 || block[1] != 1) goto err; /* check the padding */ i = 2; /* start at the first padding byte */ while (i < sig_len - 1 - sig_prefix_size - hash_len) { /* together with (#A), we require at least 8 bytes of padding */ if (block[i++] != 0xFF) goto err; } /* check end of padding */ if (block[i++] != 0) goto err; /* check the ASN.1 metadata */ if (memcmp_P(block+i, sig_prefix, sig_prefix_size)) goto err; /* now we can get the hash we need */ bir = bi_import(ctx, block + i + sig_prefix_size, hash_len); err: free(block); /* save a few bytes of memory */ bi_clear_cache(ctx); return bir; } /** * Do some basic checks on the certificate chain. * * Certificate verification consists of a number of checks: * - The date of the certificate is after the start date. * - The date of the certificate is before the finish date. * - A root certificate exists in the certificate store. * - That the certificate(s) are not self-signed. * - The certificate chain is valid. * - The signature of the certificate is valid. * - Basic constraints */ int x509_verify(const CA_CERT_CTX *ca_cert_ctx, const X509_CTX *cert, int *pathLenConstraint) { int ret = X509_OK, i = 0; bigint *cert_sig; X509_CTX *next_cert = NULL; BI_CTX *ctx = NULL; bigint *mod = NULL, *expn = NULL; int match_ca_cert = 0; struct timeval tv; uint8_t is_self_signed = 0; if (cert == NULL) { ret = X509_VFY_ERROR_NO_TRUSTED_CERT; goto end_verify; } /* a self-signed certificate that is not in the CA store - use this to check the signature */ if (asn1_compare_dn(cert->ca_cert_dn, cert->cert_dn) == 0) { is_self_signed = 1; ctx = cert->rsa_ctx->bi_ctx; mod = cert->rsa_ctx->m; expn = cert->rsa_ctx->e; } gettimeofday(&tv, NULL); /* check the not before date */ if (tv.tv_sec < cert->not_before) { ret = X509_VFY_ERROR_NOT_YET_VALID; goto end_verify; } /* check the not after date */ if (tv.tv_sec > cert->not_after) { ret = X509_VFY_ERROR_EXPIRED; goto end_verify; } if (cert->basic_constraint_present) { /* If the cA boolean is not asserted, then the keyCertSign bit in the key usage extension MUST NOT be asserted. */ if (!cert->basic_constraint_cA && IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) { ret = X509_VFY_ERROR_BASIC_CONSTRAINT; goto end_verify; } /* The pathLenConstraint field is meaningful only if the cA boolean is asserted and the key usage extension, if present, asserts the keyCertSign bit. In this case, it gives the maximum number of non-self-issued intermediate certificates that may follow this certificate in a valid certification path. */ if (cert->basic_constraint_cA && (!cert->key_usage_present || IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) && (cert->basic_constraint_pathLenConstraint+1) < *pathLenConstraint) { ret = X509_VFY_ERROR_BASIC_CONSTRAINT; goto end_verify; } } next_cert = cert->next; /* last cert in the chain - look for a trusted cert */ if (next_cert == NULL) { if (ca_cert_ctx != NULL) { /* go thru the CA store */ while (i < CONFIG_X509_MAX_CA_CERTS && ca_cert_ctx->cert[i]) { /* the extension is present but the cA boolean is not asserted, then the certified public key MUST NOT be used to verify certificate signatures. */ if (cert->basic_constraint_present && !ca_cert_ctx->cert[i]->basic_constraint_cA) continue; if (asn1_compare_dn(cert->ca_cert_dn, ca_cert_ctx->cert[i]->cert_dn) == 0) { /* use this CA certificate for signature verification */ match_ca_cert = true; ctx = ca_cert_ctx->cert[i]->rsa_ctx->bi_ctx; mod = ca_cert_ctx->cert[i]->rsa_ctx->m; expn = ca_cert_ctx->cert[i]->rsa_ctx->e; break; } i++; } } /* couldn't find a trusted cert (& let self-signed errors be returned) */ if (!match_ca_cert && !is_self_signed) { ret = X509_VFY_ERROR_NO_TRUSTED_CERT; goto end_verify; } } else if (asn1_compare_dn(cert->ca_cert_dn, next_cert->cert_dn) != 0) { /* check the chain */ ret = X509_VFY_ERROR_INVALID_CHAIN; goto end_verify; } else /* use the next certificate in the chain for signature verify */ { ctx = next_cert->rsa_ctx->bi_ctx; mod = next_cert->rsa_ctx->m; expn = next_cert->rsa_ctx->e; } /* cert is self signed */ if (!match_ca_cert && is_self_signed) { ret = X509_VFY_ERROR_SELF_SIGNED; goto end_verify; } /* check the signature */ cert_sig = sig_verify(ctx, cert->signature, cert->sig_len, cert->sig_type, bi_clone(ctx, mod), bi_clone(ctx, expn)); if (cert_sig && cert->digest) { if (bi_compare(cert_sig, cert->digest) != 0) ret = X509_VFY_ERROR_BAD_SIGNATURE; bi_free(ctx, cert_sig); } else { ret = X509_VFY_ERROR_BAD_SIGNATURE; } bi_clear_cache(ctx); if (ret) goto end_verify; /* go down the certificate chain using recursion. */ if (next_cert != NULL) { (*pathLenConstraint)++; /* don't include last certificate */ ret = x509_verify(ca_cert_ctx, next_cert, pathLenConstraint); } end_verify: return ret; } #endif #if defined (CONFIG_SSL_FULL_MODE) /** * Used for diagnostics. */ void x509_print(const X509_CTX *cert, CA_CERT_CTX *ca_cert_ctx) { if (cert == NULL) return; char not_part_of_cert[30]; strcpy_P(not_part_of_cert, "<Not Part Of Certificate>"); char critical[16]; strcpy_P(critical, "critical, "); printf("=== CERTIFICATE ISSUED TO ===\n"); printf("Common Name (CN):\t\t"); printf("%s\n", cert->cert_dn[X509_COMMON_NAME] ? cert->cert_dn[X509_COMMON_NAME] : not_part_of_cert); printf("Organization (O):\t\t"); printf("%s\n", cert->cert_dn[X509_ORGANIZATION] ? cert->cert_dn[X509_ORGANIZATION] : not_part_of_cert); if (cert->cert_dn[X509_ORGANIZATIONAL_UNIT]) { printf("Organizational Unit (OU):\t"); printf("%s\n", cert->cert_dn[X509_ORGANIZATIONAL_UNIT]); } if (cert->cert_dn[X509_LOCATION]) { printf("Location (L):\t\t\t"); printf("%s\n", cert->cert_dn[X509_LOCATION]); } if (cert->cert_dn[X509_COUNTRY]) { printf("Country (C):\t\t\t"); printf("%s\n", cert->cert_dn[X509_COUNTRY]); } if (cert->cert_dn[X509_STATE]) { printf("State (ST):\t\t\t"); printf("%s\n", cert->cert_dn[X509_STATE]); } if (cert->basic_constraint_present) { printf("Basic Constraints:\t\t%sCA:%s, pathlen:%d\n", cert->basic_constraint_is_critical ? critical : "", cert->basic_constraint_cA? "TRUE" : "FALSE", cert->basic_constraint_pathLenConstraint); } if (cert->key_usage_present) { printf("Key Usage:\t\t\t%s", cert->key_usage_is_critical ? critical : ""); bool has_started = false; if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DIGITAL_SIGNATURE)) { printf("Digital Signature"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_NON_REPUDIATION)) { if (has_started) printf(", "); printf("Non Repudiation"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_ENCIPHERMENT)) { if (has_started) printf(", "); printf("Key Encipherment"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DATA_ENCIPHERMENT)) { if (has_started) printf(", "); printf("Data Encipherment"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_AGREEMENT)) { if (has_started) printf(", "); printf("Key Agreement"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_KEY_CERT_SIGN)) { if (has_started) printf(", "); printf("Key Cert Sign"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_CRL_SIGN)) { if (has_started) printf(", "); printf("CRL Sign"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_ENCIPHER_ONLY)) { if (has_started) printf(", "); printf("Encipher Only"); has_started = true; } if (IS_SET_KEY_USAGE_FLAG(cert, KEY_USAGE_DECIPHER_ONLY)) { if (has_started) printf(", "); printf("Decipher Only"); has_started = true; } printf("\n"); } if (cert->subject_alt_name_present) { printf("Subject Alt Name:\t\t%s", cert->subject_alt_name_is_critical ? critical : ""); if (cert->subject_alt_dnsnames) { int i = 0; while (cert->subject_alt_dnsnames[i]) printf("%s ", cert->subject_alt_dnsnames[i++]); } printf("\n"); } printf("=== CERTIFICATE ISSUED BY ===\n"); printf("Common Name (CN):\t\t"); printf("%s\n", cert->ca_cert_dn[X509_COMMON_NAME] ? cert->ca_cert_dn[X509_COMMON_NAME] : not_part_of_cert); printf("Organization (O):\t\t"); printf("%s\n", cert->ca_cert_dn[X509_ORGANIZATION] ? cert->ca_cert_dn[X509_ORGANIZATION] : not_part_of_cert); if (cert->ca_cert_dn[X509_ORGANIZATIONAL_UNIT]) { printf("Organizational Unit (OU):\t"); printf("%s\n", cert->ca_cert_dn[X509_ORGANIZATIONAL_UNIT]); } if (cert->ca_cert_dn[X509_LOCATION]) { printf("Location (L):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_LOCATION]); } if (cert->ca_cert_dn[X509_COUNTRY]) { printf("Country (C):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_COUNTRY]); } if (cert->ca_cert_dn[X509_STATE]) { printf("State (ST):\t\t\t"); printf("%s\n", cert->ca_cert_dn[X509_STATE]); } printf("Not Before:\t\t\t%s", ctime(&cert->not_before)); printf("Not After:\t\t\t%s", ctime(&cert->not_after)); printf("RSA bitsize:\t\t\t%d\n", cert->rsa_ctx->num_octets*8); printf("Sig Type:\t\t\t"); switch (cert->sig_type) { case SIG_TYPE_MD5: printf("MD5\n"); break; case SIG_TYPE_SHA1: printf("SHA1\n"); break; case SIG_TYPE_SHA256: printf("SHA256\n"); break; case SIG_TYPE_SHA384: printf("SHA384\n"); break; case SIG_TYPE_SHA512: printf("SHA512\n"); break; default: printf("Unrecognized: %d\n", cert->sig_type); break; } if (ca_cert_ctx) { int pathLenConstraint = 0; char buff[64]; printf("Verify:\t\t\t\t%s\n", x509_display_error(x509_verify(ca_cert_ctx, cert, &pathLenConstraint), buff)); } #if 0 print_blob("Signature", cert->signature, cert->sig_len); bi_print("Modulus", cert->rsa_ctx->m); bi_print("Pub Exp", cert->rsa_ctx->e); #endif if (ca_cert_ctx) { x509_print(cert->next, ca_cert_ctx); } TTY_FLUSH(); } const char * x509_display_error(int error, char *buff) { switch (error) { case X509_OK: strcpy_P(buff, "Certificate verify successful"); return buff; case X509_NOT_OK: strcpy_P(buff, "X509 not ok"); return buff; case X509_VFY_ERROR_NO_TRUSTED_CERT: strcpy_P(buff, "No trusted cert is available"); return buff; case X509_VFY_ERROR_BAD_SIGNATURE: strcpy_P(buff, "Bad signature"); return buff; case X509_VFY_ERROR_NOT_YET_VALID: strcpy_P(buff, "Cert is not yet valid"); return buff; case X509_VFY_ERROR_EXPIRED: strcpy_P(buff, "Cert has expired"); return buff; case X509_VFY_ERROR_SELF_SIGNED: strcpy_P(buff, "Cert is self-signed"); return buff; case X509_VFY_ERROR_INVALID_CHAIN: strcpy_P(buff, "Chain is invalid (check order of certs)"); return buff; case X509_VFY_ERROR_UNSUPPORTED_DIGEST: strcpy_P(buff, "Unsupported digest"); return buff; case X509_INVALID_PRIV_KEY: strcpy_P(buff, "Invalid private key"); return buff; case X509_VFY_ERROR_BASIC_CONSTRAINT: strcpy_P(buff, "Basic constraint invalid"); return buff; default: strcpy_P(buff, "Unknown"); return buff; } } #endif /* CONFIG_SSL_FULL_MODE */
./CrossVul/dataset_final_sorted/CWE-347/c/good_327_1
crossvul-cpp_data_good_3998_0
#include "curveMath.h" #include <string.h> int pointZZ_pEqual(const PointZZ_p * op1, const PointZZ_p * op2) { // check x coords if(mpz_cmp(op1->x, op2->x) != 0) { return 0; } // check y coords if(mpz_cmp(op1->y, op2->y) != 0) { return 0; } return 1; } int pointZZ_pIsIdentityElement(const PointZZ_p * op) { return mpz_cmp_ui(op->x, 0) == 0 && mpz_cmp_ui(op->y, 0) == 0 ? 1 : 0; } void pointZZ_pSetToIdentityElement(PointZZ_p * op) { mpz_set_ui(op->x, 0); mpz_set_ui(op->y, 0); } void pointZZ_pDouble(PointZZ_p * rop, const PointZZ_p * op, const CurveZZ_p * curve) { if(pointZZ_pIsIdentityElement(op)) { return pointZZ_pSetToIdentityElement(rop); } mpz_t numer, denom, lambda; mpz_inits(numer, denom, lambda, NULL); // calculate lambda mpz_mul(numer, op->x, op->x); mpz_mul_ui(numer, numer, 3); mpz_add(numer, numer, curve->a); mpz_mul_ui(denom, op->y, 2); mpz_invert(denom, denom, curve->p); // TODO check status mpz_mul(lambda, numer, denom); mpz_mod(lambda, lambda, curve->p); // calculate resulting x coord mpz_mul(rop->x, lambda, lambda); mpz_sub(rop->x, rop->x, op->x); mpz_sub(rop->x, rop->x, op->x); mpz_mod(rop->x, rop->x, curve->p); //calculate resulting y coord mpz_sub(rop->y, op->x, rop->x); mpz_mul(rop->y, lambda, rop->y); mpz_sub(rop->y, rop->y, op->y); mpz_mod(rop->y, rop->y, curve->p); mpz_clears(numer, denom, lambda, NULL); } void pointZZ_pAdd(PointZZ_p * rop, const PointZZ_p * op1, const PointZZ_p * op2, const CurveZZ_p * curve) { // handle identity element cases if(pointZZ_pIsIdentityElement(op1) && pointZZ_pIsIdentityElement(op2)) { return pointZZ_pSetToIdentityElement(rop); } else if(pointZZ_pIsIdentityElement(op1)) { mpz_set(rop->x, op2->x); mpz_set(rop->y, op2->y); return; } else if(pointZZ_pIsIdentityElement(op2)) { mpz_set(rop->x, op1->x); mpz_set(rop->y, op1->y); return; } // use doubling algorithm if points are equal if(pointZZ_pEqual(op1, op2)) { pointZZ_pDouble(rop, op1, curve); return; } // check if points sum to identity element mpz_t negy; mpz_init(negy); mpz_sub(negy, curve->p, op2->y); if(mpz_cmp(op1->x, op2->x) == 0 && mpz_cmp(op1->y, negy) == 0) { mpz_clear(negy); return pointZZ_pSetToIdentityElement(rop); } mpz_t xdiff, ydiff, lambda; mpz_inits(xdiff, ydiff, lambda, NULL); // calculate lambda mpz_sub(ydiff, op2->y, op1->y); mpz_sub(xdiff, op2->x, op1->x); mpz_invert(xdiff, xdiff, curve->p); // TODO check status mpz_mul(lambda, ydiff, xdiff); mpz_mod(lambda, lambda, curve->p); // calculate resulting x coord mpz_mul(rop->x, lambda, lambda); mpz_sub(rop->x, rop->x, op1->x); mpz_sub(rop->x, rop->x, op2->x); mpz_mod(rop->x, rop->x, curve->p); //calculate resulting y coord mpz_sub(rop->y, op1->x, rop->x); mpz_mul(rop->y, lambda, rop->y); mpz_sub(rop->y, rop->y, op1->y); mpz_mod(rop->y, rop->y, curve->p); mpz_clears(negy, xdiff, ydiff, lambda, NULL); } void pointZZ_pMul(PointZZ_p * rop, const PointZZ_p * point, const mpz_t scalar, const CurveZZ_p * curve) { // handle the identity element if(pointZZ_pIsIdentityElement(point)) { return pointZZ_pSetToIdentityElement(rop); } PointZZ_p R0, R1, tmp; mpz_inits(R1.x, R1.y, tmp.x, tmp.y, NULL); mpz_init_set(R0.x, point->x); mpz_init_set(R0.y, point->y); pointZZ_pDouble(&R1, point, curve); int dbits = mpz_sizeinbase(scalar, 2), i; for(i = dbits - 2; i >= 0; i--) { if(mpz_tstbit(scalar, i)) { mpz_set(tmp.x, R0.x); mpz_set(tmp.y, R0.y); pointZZ_pAdd(&R0, &R1, &tmp, curve); mpz_set(tmp.x, R1.x); mpz_set(tmp.y, R1.y); pointZZ_pDouble(&R1, &tmp, curve); } else { mpz_set(tmp.x, R1.x); mpz_set(tmp.y, R1.y); pointZZ_pAdd(&R1, &R0, &tmp, curve); mpz_set(tmp.x, R0.x); mpz_set(tmp.y, R0.y); pointZZ_pDouble(&R0, &tmp, curve); } } mpz_init_set(rop->x, R0.x); mpz_init_set(rop->y, R0.y); mpz_clears(R0.x, R0.y, R1.x, R1.y, tmp.x, tmp.y, NULL); } void pointZZ_pShamirsTrick(PointZZ_p * rop, const PointZZ_p * point1, const mpz_t scalar1, const PointZZ_p * point2, const mpz_t scalar2, const CurveZZ_p * curve) { PointZZ_p sum, tmp; mpz_inits(sum.x, sum.y, tmp.x, tmp.y, NULL); pointZZ_pAdd(&sum, point1, point2, curve); int scalar1Bits = mpz_sizeinbase(scalar1, 2); int scalar2Bits = mpz_sizeinbase(scalar2, 2); int l = (scalar1Bits > scalar2Bits ? scalar1Bits : scalar2Bits) - 1; if(mpz_tstbit(scalar1, l) && mpz_tstbit(scalar2, l)) { mpz_set(rop->x, sum.x); mpz_set(rop->y, sum.y); } else if(mpz_tstbit(scalar1, l)) { mpz_set(rop->x, point1->x); mpz_set(rop->y, point1->y); } else if(mpz_tstbit(scalar2, l)) { mpz_set(rop->x, point2->x); mpz_set(rop->y, point2->y); } for(l = l - 1; l >= 0; l--) { mpz_set(tmp.x, rop->x); mpz_set(tmp.y, rop->y); pointZZ_pDouble(rop, &tmp, curve); mpz_set(tmp.x, rop->x); mpz_set(tmp.y, rop->y); if(mpz_tstbit(scalar1, l) && mpz_tstbit(scalar2, l)) { pointZZ_pAdd(rop, &tmp, &sum, curve); } else if(mpz_tstbit(scalar1, l)) { pointZZ_pAdd(rop, &tmp, point1, curve); } else if(mpz_tstbit(scalar2, l)) { pointZZ_pAdd(rop, &tmp, point2, curve); } } mpz_clears(sum.x, sum.y, tmp.x, tmp.y, NULL); } /****************************************************************************** PYTHON BINDINGS ******************************************************************************/ static PyObject * curvemath_mul(PyObject *self, PyObject *args) { char * x, * y, * d, * p, * a, * b, * q, * gx, * gy; if (!PyArg_ParseTuple(args, "sssssssss", &x, &y, &d, &p, &a, &b, &q, &gx, &gy)) { return NULL; } PointZZ_p result; mpz_t scalar; mpz_init_set_str(scalar, d, 10); CurveZZ_p * curve = buildCurveZZ_p(p, a, b, q, gx, gy, 10);; PointZZ_p * point = buildPointZZ_p(x, y, 10); pointZZ_pMul(&result, point, scalar, curve); destroyPointZZ_p(point); destroyCurveZZ_p(curve); char * resultX = mpz_get_str(NULL, 10, result.x); char * resultY = mpz_get_str(NULL, 10, result.y); mpz_clears(result.x, result.y, scalar, NULL); PyObject * ret = Py_BuildValue("ss", resultX, resultY); free(resultX); free(resultY); return ret; } static PyObject * curvemath_add(PyObject *self, PyObject *args) { char * px, * py, * qx, * qy, * p, * a, * b, * q, * gx, * gy; if (!PyArg_ParseTuple(args, "ssssssssss", &px, &py, &qx, &qy, &p, &a, &b, &q, &gx, &gy)) { return NULL; } PointZZ_p result; mpz_inits(result.x, result.y, NULL); CurveZZ_p * curve = buildCurveZZ_p(p, a, b, q, gx, gy, 10);; PointZZ_p * P = buildPointZZ_p(px, py, 10); PointZZ_p * Q = buildPointZZ_p(qx, qy, 10); if(pointZZ_pEqual(P, Q)) { pointZZ_pDouble(&result, P, curve); } else { pointZZ_pAdd(&result, P, Q, curve); } destroyPointZZ_p(P); destroyPointZZ_p(Q); destroyCurveZZ_p(curve); char * resultX = mpz_get_str(NULL, 10, result.x); char * resultY = mpz_get_str(NULL, 10, result.y); mpz_clears(result.x, result.y, NULL); PyObject * ret = Py_BuildValue("ss", resultX, resultY); free(resultX); free(resultY); return ret; } static PyMethodDef curvemath__methods__[] = { {"mul", curvemath_mul, METH_VARARGS, "Multiply a curve point by an integer scalar."}, {"add", curvemath_add, METH_VARARGS, "Add two points on a curve."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "curvemath", /* m_name */ NULL, /* m_doc */ -1, /* m_size */ curvemath__methods__, /* m_methods */ NULL, /* m_reload */ NULL, /* m_traverse */ NULL, /* m_clear */ NULL, /* m_free */ }; PyMODINIT_FUNC PyInit_curvemath(void) { PyObject * m = PyModule_Create(&moduledef); return m; } #else PyMODINIT_FUNC initcurvemath(void) { Py_InitModule("curvemath", curvemath__methods__); } #endif
./CrossVul/dataset_final_sorted/CWE-347/c/good_3998_0
crossvul-cpp_data_bad_724_0
/* Copyright (C) 2007-2017 Open Information Security Foundation * * You can copy, redistribute or modify this Program under the terms of * the GNU General Public License version 2 as published by the Free * Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * version 2 along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ /** * \file * * \author Victor Julien <victor@inliniac.net> * * Basic detection engine */ #include "suricata-common.h" #include "suricata.h" #include "conf.h" #include "decode.h" #include "flow.h" #include "stream-tcp.h" #include "app-layer.h" #include "app-layer-parser.h" #include "detect.h" #include "detect-engine.h" #include "detect-engine-profile.h" #include "detect-engine-alert.h" #include "detect-engine-siggroup.h" #include "detect-engine-address.h" #include "detect-engine-proto.h" #include "detect-engine-port.h" #include "detect-engine-mpm.h" #include "detect-engine-iponly.h" #include "detect-engine-threshold.h" #include "detect-engine-prefilter.h" #include "detect-engine-state.h" #include "detect-engine-analyzer.h" #include "detect-engine-filedata.h" #include "detect-engine-payload.h" #include "detect-engine-event.h" #include "detect-engine-hcbd.h" #include "detect-engine-hsbd.h" #include "detect-filestore.h" #include "detect-flowvar.h" #include "detect-replace.h" #include "util-validate.h" #include "util-detect.h" typedef struct DetectRunScratchpad { const AppProto alproto; const uint8_t flow_flags; /* flow/state flags: STREAM_* */ const bool app_decoder_events; const SigGroupHead *sgh; SignatureMask pkt_mask; } DetectRunScratchpad; /* prototypes */ static DetectRunScratchpad DetectRunSetup(const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow); static void DetectRunInspectIPOnly(ThreadVars *tv, const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Flow * const pflow, Packet * const p); static inline void DetectRunGetRuleGroup(const DetectEngineCtx *de_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch); static inline void DetectRunPrefilterPkt(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, DetectRunScratchpad *scratch); static inline void DetectRulePacketRules(ThreadVars * const tv, DetectEngineCtx * const de_ctx, DetectEngineThreadCtx * const det_ctx, Packet * const p, Flow * const pflow, const DetectRunScratchpad *scratch); static void DetectRunTx(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, Flow *f, DetectRunScratchpad *scratch); static inline void DetectRunPostRules(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch); static void DetectRunCleanup(DetectEngineThreadCtx *det_ctx, Packet *p, Flow * const pflow); /** \internal */ static void DetectRun(ThreadVars *th_v, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { SCEnter(); SCLogDebug("pcap_cnt %"PRIu64, p->pcap_cnt); /* bail early if packet should not be inspected */ if (p->flags & PKT_NOPACKET_INSPECTION) { /* nothing to do */ SCReturn; } /* Load the Packet's flow early, even though it might not be needed. * Mark as a constant pointer, although the flow itself can change. */ Flow * const pflow = p->flow; DetectRunScratchpad scratch = DetectRunSetup(de_ctx, det_ctx, p, pflow); /* run the IPonly engine */ DetectRunInspectIPOnly(th_v, de_ctx, det_ctx, pflow, p); /* get our rule group */ DetectRunGetRuleGroup(de_ctx, p, pflow, &scratch); /* if we didn't get a sig group head, we * have nothing to do.... */ if (scratch.sgh == NULL) { SCLogDebug("no sgh for this packet, nothing to match against"); goto end; } /* run the prefilters for packets */ DetectRunPrefilterPkt(th_v, de_ctx, det_ctx, p, &scratch); PACKET_PROFILING_DETECT_START(p, PROF_DETECT_RULES); /* inspect the rules against the packet */ DetectRulePacketRules(th_v, de_ctx, det_ctx, p, pflow, &scratch); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_RULES); /* run tx/state inspection */ if (pflow && pflow->alstate) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX); DetectRunTx(th_v, de_ctx, det_ctx, p, pflow, &scratch); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX); } end: DetectRunPostRules(th_v, de_ctx, det_ctx, p, pflow, &scratch); DetectRunCleanup(det_ctx, p, pflow); SCReturn; } static void DetectRunPostMatch(ThreadVars *tv, DetectEngineThreadCtx *det_ctx, Packet *p, const Signature *s) { /* run the packet match functions */ const SigMatchData *smd = s->sm_arrays[DETECT_SM_LIST_POSTMATCH]; if (smd != NULL) { KEYWORD_PROFILING_SET_LIST(det_ctx, DETECT_SM_LIST_POSTMATCH); SCLogDebug("running match functions, sm %p", smd); while (1) { KEYWORD_PROFILING_START; (void)sigmatch_table[smd->type].Match(tv, det_ctx, p, s, smd->ctx); KEYWORD_PROFILING_END(det_ctx, smd->type, 1); if (smd->is_last) break; smd++; } } DetectReplaceExecute(p, det_ctx); if (s->flags & SIG_FLAG_FILESTORE) DetectFilestorePostMatch(tv, det_ctx, p, s); return; } /** * \brief Get the SigGroupHead for a packet. * * \param de_ctx detection engine context * \param p packet * * \retval sgh the SigGroupHead or NULL if non applies to the packet */ const SigGroupHead *SigMatchSignaturesGetSgh(const DetectEngineCtx *de_ctx, const Packet *p) { SCEnter(); int f; SigGroupHead *sgh = NULL; /* if the packet proto is 0 (not set), we're inspecting it against * the decoder events sgh we have. */ if (p->proto == 0 && p->events.cnt > 0) { SCReturnPtr(de_ctx->decoder_event_sgh, "SigGroupHead"); } else if (p->proto == 0) { if (!(PKT_IS_IPV4(p) || PKT_IS_IPV6(p))) { /* not IP, so nothing to do */ SCReturnPtr(NULL, "SigGroupHead"); } } /* select the flow_gh */ if (p->flowflags & FLOW_PKT_TOCLIENT) f = 0; else f = 1; int proto = IP_GET_IPPROTO(p); if (proto == IPPROTO_TCP) { DetectPort *list = de_ctx->flow_gh[f].tcp; SCLogDebug("tcp toserver %p, tcp toclient %p: going to use %p", de_ctx->flow_gh[1].tcp, de_ctx->flow_gh[0].tcp, de_ctx->flow_gh[f].tcp); uint16_t port = f ? p->dp : p->sp; SCLogDebug("tcp port %u -> %u:%u", port, p->sp, p->dp); DetectPort *sghport = DetectPortLookupGroup(list, port); if (sghport != NULL) sgh = sghport->sh; SCLogDebug("TCP list %p, port %u, direction %s, sghport %p, sgh %p", list, port, f ? "toserver" : "toclient", sghport, sgh); } else if (proto == IPPROTO_UDP) { DetectPort *list = de_ctx->flow_gh[f].udp; uint16_t port = f ? p->dp : p->sp; DetectPort *sghport = DetectPortLookupGroup(list, port); if (sghport != NULL) sgh = sghport->sh; SCLogDebug("UDP list %p, port %u, direction %s, sghport %p, sgh %p", list, port, f ? "toserver" : "toclient", sghport, sgh); } else { sgh = de_ctx->flow_gh[f].sgh[proto]; } SCReturnPtr(sgh, "SigGroupHead"); } static inline void DetectPrefilterMergeSort(DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx) { SigIntId mpm, nonmpm; det_ctx->match_array_cnt = 0; SigIntId *mpm_ptr = det_ctx->pmq.rule_id_array; SigIntId *nonmpm_ptr = det_ctx->non_pf_id_array; uint32_t m_cnt = det_ctx->pmq.rule_id_array_cnt; uint32_t n_cnt = det_ctx->non_pf_id_cnt; SigIntId *final_ptr; uint32_t final_cnt; SigIntId id; SigIntId previous_id = (SigIntId)-1; Signature **sig_array = de_ctx->sig_array; Signature **match_array = det_ctx->match_array; Signature *s; SCLogDebug("PMQ rule id array count %d", det_ctx->pmq.rule_id_array_cnt); /* Load first values. */ if (likely(m_cnt)) { mpm = *mpm_ptr; } else { /* mpm list is empty */ final_ptr = nonmpm_ptr; final_cnt = n_cnt; goto final; } if (likely(n_cnt)) { nonmpm = *nonmpm_ptr; } else { /* non-mpm list is empty. */ final_ptr = mpm_ptr; final_cnt = m_cnt; goto final; } while (1) { if (mpm < nonmpm) { /* Take from mpm list */ id = mpm; s = sig_array[id]; /* As the mpm list can contain duplicates, check for that here. */ if (likely(id != previous_id)) { *match_array++ = s; previous_id = id; } if (unlikely(--m_cnt == 0)) { /* mpm list is now empty */ final_ptr = nonmpm_ptr; final_cnt = n_cnt; goto final; } mpm_ptr++; mpm = *mpm_ptr; } else if (mpm > nonmpm) { id = nonmpm; s = sig_array[id]; /* As the mpm list can contain duplicates, check for that here. */ if (likely(id != previous_id)) { *match_array++ = s; previous_id = id; } if (unlikely(--n_cnt == 0)) { final_ptr = mpm_ptr; final_cnt = m_cnt; goto final; } nonmpm_ptr++; nonmpm = *nonmpm_ptr; } else { /* implied mpm == nonmpm */ /* special case: if on both lists, it's a negated mpm pattern */ /* mpm list may have dups, so skip past them here */ while (--m_cnt != 0) { mpm_ptr++; mpm = *mpm_ptr; if (mpm != nonmpm) break; } /* if mpm is done, update nonmpm_ptrs and jump to final */ if (unlikely(m_cnt == 0)) { n_cnt--; /* mpm list is now empty */ final_ptr = ++nonmpm_ptr; final_cnt = n_cnt; goto final; } /* otherwise, if nonmpm is done jump to final for mpm * mpm ptrs alrady updated */ if (unlikely(--n_cnt == 0)) { final_ptr = mpm_ptr; final_cnt = m_cnt; goto final; } /* not at end of the lists, update nonmpm. Mpm already * updated in while loop above. */ nonmpm_ptr++; nonmpm = *nonmpm_ptr; } } final: /* Only one list remaining. Just walk that list. */ while (final_cnt-- > 0) { id = *final_ptr++; s = sig_array[id]; /* As the mpm list can contain duplicates, check for that here. */ if (likely(id != previous_id)) { *match_array++ = s; previous_id = id; } } det_ctx->match_array_cnt = match_array - det_ctx->match_array; BUG_ON((det_ctx->pmq.rule_id_array_cnt + det_ctx->non_pf_id_cnt) < det_ctx->match_array_cnt); } static inline void DetectPrefilterBuildNonPrefilterList(DetectEngineThreadCtx *det_ctx, SignatureMask mask, uint8_t alproto) { uint32_t x = 0; for (x = 0; x < det_ctx->non_pf_store_cnt; x++) { /* only if the mask matches this rule can possibly match, * so build the non_mpm array only for match candidates */ const SignatureMask rule_mask = det_ctx->non_pf_store_ptr[x].mask; const uint8_t rule_alproto = det_ctx->non_pf_store_ptr[x].alproto; if ((rule_mask & mask) == rule_mask && (rule_alproto == 0 || rule_alproto == alproto)) { det_ctx->non_pf_id_array[det_ctx->non_pf_id_cnt++] = det_ctx->non_pf_store_ptr[x].id; } } } /** \internal * \brief select non-mpm list * Based on the packet properties, select the non-mpm list to use * \todo move non_pf_store* into scratchpad */ static inline void DetectPrefilterSetNonPrefilterList(const Packet *p, DetectEngineThreadCtx *det_ctx, DetectRunScratchpad *scratch) { if ((p->proto == IPPROTO_TCP) && (p->tcph != NULL) && (p->tcph->th_flags & TH_SYN)) { det_ctx->non_pf_store_ptr = scratch->sgh->non_pf_syn_store_array; det_ctx->non_pf_store_cnt = scratch->sgh->non_pf_syn_store_cnt; } else { det_ctx->non_pf_store_ptr = scratch->sgh->non_pf_other_store_array; det_ctx->non_pf_store_cnt = scratch->sgh->non_pf_other_store_cnt; } SCLogDebug("sgh non_pf ptr %p cnt %u (syn %p/%u, other %p/%u)", det_ctx->non_pf_store_ptr, det_ctx->non_pf_store_cnt, scratch->sgh->non_pf_syn_store_array, scratch->sgh->non_pf_syn_store_cnt, scratch->sgh->non_pf_other_store_array, scratch->sgh->non_pf_other_store_cnt); } /** \internal * \brief update flow's file tracking flags based on the detection engine */ static inline void DetectPostInspectFileFlagsUpdate(Flow *pflow, const SigGroupHead *sgh, uint8_t direction) { /* see if this sgh requires us to consider file storing */ if (!FileForceFilestore() && (sgh == NULL || sgh->filestore_cnt == 0)) { FileDisableStoring(pflow, direction); } #ifdef HAVE_MAGIC /* see if this sgh requires us to consider file magic */ if (!FileForceMagic() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILEMAGIC))) { SCLogDebug("disabling magic for flow"); FileDisableMagic(pflow, direction); } #endif /* see if this sgh requires us to consider file md5 */ if (!FileForceMd5() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILEMD5))) { SCLogDebug("disabling md5 for flow"); FileDisableMd5(pflow, direction); } /* see if this sgh requires us to consider file sha1 */ if (!FileForceSha1() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILESHA1))) { SCLogDebug("disabling sha1 for flow"); FileDisableSha1(pflow, direction); } /* see if this sgh requires us to consider file sha256 */ if (!FileForceSha256() && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILESHA256))) { SCLogDebug("disabling sha256 for flow"); FileDisableSha256(pflow, direction); } /* see if this sgh requires us to consider filesize */ if (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVEFILESIZE)) { SCLogDebug("disabling filesize for flow"); FileDisableFilesize(pflow, direction); } } static inline void DetectRunPostGetFirstRuleGroup(const Packet *p, Flow *pflow, const SigGroupHead *sgh) { if ((p->flowflags & FLOW_PKT_TOSERVER) && !(pflow->flags & FLOW_SGH_TOSERVER)) { /* first time we see this toserver sgh, store it */ pflow->sgh_toserver = sgh; pflow->flags |= FLOW_SGH_TOSERVER; if (p->proto == IPPROTO_TCP && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVERAWSTREAM))) { if (pflow->protoctx != NULL) { TcpSession *ssn = pflow->protoctx; SCLogDebug("STREAMTCP_STREAM_FLAG_DISABLE_RAW ssn.client"); ssn->client.flags |= STREAMTCP_STREAM_FLAG_DISABLE_RAW; } } DetectPostInspectFileFlagsUpdate(pflow, pflow->sgh_toserver, STREAM_TOSERVER); } else if ((p->flowflags & FLOW_PKT_TOCLIENT) && !(pflow->flags & FLOW_SGH_TOCLIENT)) { pflow->sgh_toclient = sgh; pflow->flags |= FLOW_SGH_TOCLIENT; if (p->proto == IPPROTO_TCP && (sgh == NULL || !(sgh->flags & SIG_GROUP_HEAD_HAVERAWSTREAM))) { if (pflow->protoctx != NULL) { TcpSession *ssn = pflow->protoctx; SCLogDebug("STREAMTCP_STREAM_FLAG_DISABLE_RAW ssn.server"); ssn->server.flags |= STREAMTCP_STREAM_FLAG_DISABLE_RAW; } } DetectPostInspectFileFlagsUpdate(pflow, pflow->sgh_toclient, STREAM_TOCLIENT); } } static inline void DetectRunGetRuleGroup( const DetectEngineCtx *de_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch) { const SigGroupHead *sgh = NULL; if (pflow) { bool use_flow_sgh = false; /* Get the stored sgh from the flow (if any). Make sure we're not using * the sgh for icmp error packets part of the same stream. */ if (IP_GET_IPPROTO(p) == pflow->proto) { /* filter out icmp */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH); if ((p->flowflags & FLOW_PKT_TOSERVER) && (pflow->flags & FLOW_SGH_TOSERVER)) { sgh = pflow->sgh_toserver; SCLogDebug("sgh = pflow->sgh_toserver; => %p", sgh); use_flow_sgh = true; } else if ((p->flowflags & FLOW_PKT_TOCLIENT) && (pflow->flags & FLOW_SGH_TOCLIENT)) { sgh = pflow->sgh_toclient; SCLogDebug("sgh = pflow->sgh_toclient; => %p", sgh); use_flow_sgh = true; } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH); } if (!(use_flow_sgh)) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH); sgh = SigMatchSignaturesGetSgh(de_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH); /* HACK: prevent the wrong sgh (or NULL) from being stored in the * flow's sgh pointers */ if (PKT_IS_ICMPV4(p) && ICMPV4_DEST_UNREACH_IS_VALID(p)) { ; /* no-op */ } else { /* store the found sgh (or NULL) in the flow to save us * from looking it up again for the next packet. * Also run other tasks */ DetectRunPostGetFirstRuleGroup(p, pflow, sgh); } } } else { /* p->flags & PKT_HAS_FLOW */ /* no flow */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_GETSGH); sgh = SigMatchSignaturesGetSgh(de_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_GETSGH); } scratch->sgh = sgh; } static void DetectRunInspectIPOnly(ThreadVars *tv, const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Flow * const pflow, Packet * const p) { if (pflow) { /* set the iponly stuff */ if (pflow->flags & FLOW_TOCLIENT_IPONLY_SET) p->flowflags |= FLOW_PKT_TOCLIENT_IPONLY_SET; if (pflow->flags & FLOW_TOSERVER_IPONLY_SET) p->flowflags |= FLOW_PKT_TOSERVER_IPONLY_SET; if (((p->flowflags & FLOW_PKT_TOSERVER) && !(p->flowflags & FLOW_PKT_TOSERVER_IPONLY_SET)) || ((p->flowflags & FLOW_PKT_TOCLIENT) && !(p->flowflags & FLOW_PKT_TOCLIENT_IPONLY_SET))) { SCLogDebug("testing against \"ip-only\" signatures"); PACKET_PROFILING_DETECT_START(p, PROF_DETECT_IPONLY); IPOnlyMatchPacket(tv, de_ctx, det_ctx, &de_ctx->io_ctx, &det_ctx->io_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_IPONLY); /* save in the flow that we scanned this direction... */ FlowSetIPOnlyFlag(pflow, p->flowflags & FLOW_PKT_TOSERVER ? 1 : 0); } else if (((p->flowflags & FLOW_PKT_TOSERVER) && (pflow->flags & FLOW_TOSERVER_IPONLY_SET)) || ((p->flowflags & FLOW_PKT_TOCLIENT) && (pflow->flags & FLOW_TOCLIENT_IPONLY_SET))) { /* If we have a drop from IP only module, * we will drop the rest of the flow packets * This will apply only to inline/IPS */ if (pflow->flags & FLOW_ACTION_DROP) { PACKET_DROP(p); } } } else { /* p->flags & PKT_HAS_FLOW */ /* no flow */ /* Even without flow we should match the packet src/dst */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_IPONLY); IPOnlyMatchPacket(tv, de_ctx, det_ctx, &de_ctx->io_ctx, &det_ctx->io_ctx, p); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_IPONLY); } } /* returns 0 if no match, 1 if match */ static inline int DetectRunInspectRuleHeader( const Packet *p, const Flow *f, const Signature *s, const uint32_t sflags, const uint8_t s_proto_flags) { /* check if this signature has a requirement for flowvars of some type * and if so, if we actually have any in the flow. If not, the sig * can't match and we skip it. */ if ((p->flags & PKT_HAS_FLOW) && (sflags & SIG_FLAG_REQUIRE_FLOWVAR)) { DEBUG_VALIDATE_BUG_ON(f == NULL); int m = f->flowvar ? 1 : 0; /* no flowvars? skip this sig */ if (m == 0) { SCLogDebug("skipping sig as the flow has no flowvars and sig " "has SIG_FLAG_REQUIRE_FLOWVAR flag set."); return 0; } } if ((s_proto_flags & DETECT_PROTO_IPV4) && !PKT_IS_IPV4(p)) { SCLogDebug("ip version didn't match"); return 0; } if ((s_proto_flags & DETECT_PROTO_IPV6) && !PKT_IS_IPV6(p)) { SCLogDebug("ip version didn't match"); return 0; } if (DetectProtoContainsProto(&s->proto, IP_GET_IPPROTO(p)) == 0) { SCLogDebug("proto didn't match"); return 0; } /* check the source & dst port in the sig */ if (p->proto == IPPROTO_TCP || p->proto == IPPROTO_UDP || p->proto == IPPROTO_SCTP) { if (!(sflags & SIG_FLAG_DP_ANY)) { if (p->flags & PKT_IS_FRAGMENT) return 0; DetectPort *dport = DetectPortLookupGroup(s->dp,p->dp); if (dport == NULL) { SCLogDebug("dport didn't match."); return 0; } } if (!(sflags & SIG_FLAG_SP_ANY)) { if (p->flags & PKT_IS_FRAGMENT) return 0; DetectPort *sport = DetectPortLookupGroup(s->sp,p->sp); if (sport == NULL) { SCLogDebug("sport didn't match."); return 0; } } } else if ((sflags & (SIG_FLAG_DP_ANY|SIG_FLAG_SP_ANY)) != (SIG_FLAG_DP_ANY|SIG_FLAG_SP_ANY)) { SCLogDebug("port-less protocol and sig needs ports"); return 0; } /* check the destination address */ if (!(sflags & SIG_FLAG_DST_ANY)) { if (PKT_IS_IPV4(p)) { if (DetectAddressMatchIPv4(s->addr_dst_match4, s->addr_dst_match4_cnt, &p->dst) == 0) return 0; } else if (PKT_IS_IPV6(p)) { if (DetectAddressMatchIPv6(s->addr_dst_match6, s->addr_dst_match6_cnt, &p->dst) == 0) return 0; } } /* check the source address */ if (!(sflags & SIG_FLAG_SRC_ANY)) { if (PKT_IS_IPV4(p)) { if (DetectAddressMatchIPv4(s->addr_src_match4, s->addr_src_match4_cnt, &p->src) == 0) return 0; } else if (PKT_IS_IPV6(p)) { if (DetectAddressMatchIPv6(s->addr_src_match6, s->addr_src_match6_cnt, &p->src) == 0) return 0; } } return 1; } /* returns 0 if no match, 1 if match */ static inline int DetectRunInspectRulePacketMatches( ThreadVars *tv, DetectEngineThreadCtx *det_ctx, Packet *p, const Flow *f, const Signature *s) { /* run the packet match functions */ if (s->sm_arrays[DETECT_SM_LIST_MATCH] != NULL) { KEYWORD_PROFILING_SET_LIST(det_ctx, DETECT_SM_LIST_MATCH); SigMatchData *smd = s->sm_arrays[DETECT_SM_LIST_MATCH]; SCLogDebug("running match functions, sm %p", smd); if (smd != NULL) { while (1) { KEYWORD_PROFILING_START; if (sigmatch_table[smd->type].Match(tv, det_ctx, p, s, smd->ctx) <= 0) { KEYWORD_PROFILING_END(det_ctx, smd->type, 0); SCLogDebug("no match"); return 0; } KEYWORD_PROFILING_END(det_ctx, smd->type, 1); if (smd->is_last) { SCLogDebug("match and is_last"); break; } smd++; } } } return 1; } /** \internal * \brief run packet/stream prefilter engines */ static inline void DetectRunPrefilterPkt( ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, DetectRunScratchpad *scratch ) { DetectPrefilterSetNonPrefilterList(p, det_ctx, scratch); /* create our prefilter mask */ PacketCreateMask(p, &scratch->pkt_mask, scratch->alproto, scratch->app_decoder_events); /* build and prefilter non_pf list against the mask of the packet */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_NONMPMLIST); det_ctx->non_pf_id_cnt = 0; if (likely(det_ctx->non_pf_store_cnt > 0)) { DetectPrefilterBuildNonPrefilterList(det_ctx, scratch->pkt_mask, scratch->alproto); } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_NONMPMLIST); /* run the prefilter engines */ Prefilter(det_ctx, scratch->sgh, p, scratch->flow_flags); /* create match list if we have non-pf and/or pf */ if (det_ctx->non_pf_store_cnt || det_ctx->pmq.rule_id_array_cnt) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_PF_SORT2); DetectPrefilterMergeSort(de_ctx, det_ctx); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_PF_SORT2); } #ifdef PROFILING if (tv) { StatsAddUI64(tv, det_ctx->counter_mpm_list, (uint64_t)det_ctx->pmq.rule_id_array_cnt); StatsAddUI64(tv, det_ctx->counter_nonmpm_list, (uint64_t)det_ctx->non_pf_store_cnt); /* non mpm sigs after mask prefilter */ StatsAddUI64(tv, det_ctx->counter_fnonmpm_list, (uint64_t)det_ctx->non_pf_id_cnt); } #endif } static inline void DetectRulePacketRules( ThreadVars * const tv, DetectEngineCtx * const de_ctx, DetectEngineThreadCtx * const det_ctx, Packet * const p, Flow * const pflow, const DetectRunScratchpad *scratch ) { const Signature *s = NULL; const Signature *next_s = NULL; /* inspect the sigs against the packet */ /* Prefetch the next signature. */ SigIntId match_cnt = det_ctx->match_array_cnt; #ifdef PROFILING if (tv) { StatsAddUI64(tv, det_ctx->counter_match_list, (uint64_t)match_cnt); } #endif Signature **match_array = det_ctx->match_array; SGH_PROFILING_RECORD(det_ctx, scratch->sgh); #ifdef PROFILING #ifdef HAVE_LIBJANSSON if (match_cnt >= de_ctx->profile_match_logging_threshold) RulesDumpMatchArray(det_ctx, scratch->sgh, p); #endif #endif uint32_t sflags, next_sflags = 0; if (match_cnt) { next_s = *match_array++; next_sflags = next_s->flags; } while (match_cnt--) { RULE_PROFILING_START(p); uint8_t alert_flags = 0; bool state_alert = false; #ifdef PROFILING bool smatch = false; /* signature match */ #endif s = next_s; sflags = next_sflags; if (match_cnt) { next_s = *match_array++; next_sflags = next_s->flags; } const uint8_t s_proto_flags = s->proto.flags; SCLogDebug("inspecting signature id %"PRIu32"", s->id); if (s->app_inspect != NULL) { goto next; // handle sig in DetectRunTx } /* don't run mask check for stateful rules. * There we depend on prefilter */ if ((s->mask & scratch->pkt_mask) != s->mask) { SCLogDebug("mask mismatch %x & %x != %x", s->mask, scratch->pkt_mask, s->mask); goto next; } if (unlikely(sflags & SIG_FLAG_DSIZE)) { if (likely(p->payload_len < s->dsize_low || p->payload_len > s->dsize_high)) { SCLogDebug("kicked out as p->payload_len %u, dsize low %u, hi %u", p->payload_len, s->dsize_low, s->dsize_high); goto next; } } /* if the sig has alproto and the session as well they should match */ if (likely(sflags & SIG_FLAG_APPLAYER)) { if (s->alproto != ALPROTO_UNKNOWN && s->alproto != scratch->alproto) { if (s->alproto == ALPROTO_DCERPC) { if (scratch->alproto != ALPROTO_SMB && scratch->alproto != ALPROTO_SMB2) { SCLogDebug("DCERPC sig, alproto not SMB or SMB2"); goto next; } } else { SCLogDebug("alproto mismatch"); goto next; } } } if (DetectRunInspectRuleHeader(p, pflow, s, sflags, s_proto_flags) == 0) { goto next; } /* Check the payload keywords. If we are a MPM sig and we've made * to here, we've had at least one of the patterns match */ if (s->sm_arrays[DETECT_SM_LIST_PMATCH] != NULL) { KEYWORD_PROFILING_SET_LIST(det_ctx, DETECT_SM_LIST_PMATCH); /* if we have stream msgs, inspect against those first, * but not for a "dsize" signature */ if (sflags & SIG_FLAG_REQUIRE_STREAM) { int pmatch = 0; if (p->flags & PKT_DETECT_HAS_STREAMDATA) { pmatch = DetectEngineInspectStreamPayload(de_ctx, det_ctx, s, pflow, p); if (pmatch) { det_ctx->flags |= DETECT_ENGINE_THREAD_CTX_STREAM_CONTENT_MATCH; /* Tell the engine that this reassembled stream can drop the * rest of the pkts with no further inspection */ if (s->action & ACTION_DROP) alert_flags |= PACKET_ALERT_FLAG_DROP_FLOW; alert_flags |= PACKET_ALERT_FLAG_STREAM_MATCH; } } /* no match? then inspect packet payload */ if (pmatch == 0) { SCLogDebug("no match in stream, fall back to packet payload"); /* skip if we don't have to inspect the packet and segment was * added to stream */ if (!(sflags & SIG_FLAG_REQUIRE_PACKET) && (p->flags & PKT_STREAM_ADD)) { goto next; } if (DetectEngineInspectPacketPayload(de_ctx, det_ctx, s, pflow, p) != 1) { goto next; } } } else { if (DetectEngineInspectPacketPayload(de_ctx, det_ctx, s, pflow, p) != 1) { goto next; } } } if (DetectRunInspectRulePacketMatches(tv, det_ctx, p, pflow, s) == 0) goto next; #ifdef PROFILING smatch = true; #endif DetectRunPostMatch(tv, det_ctx, p, s); if (!(sflags & SIG_FLAG_NOALERT)) { /* stateful sigs call PacketAlertAppend from DeStateDetectStartDetection */ if (!state_alert) PacketAlertAppend(det_ctx, s, p, 0, alert_flags); } else { /* apply actions even if not alerting */ DetectSignatureApplyActions(p, s, alert_flags); } next: DetectVarProcessList(det_ctx, pflow, p); DetectReplaceFree(det_ctx); RULE_PROFILING_END(det_ctx, s, smatch, p); det_ctx->flags = 0; continue; } } static DetectRunScratchpad DetectRunSetup( const DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow) { AppProto alproto = ALPROTO_UNKNOWN; uint8_t flow_flags = 0; /* flow/state flags */ bool app_decoder_events = false; PACKET_PROFILING_DETECT_START(p, PROF_DETECT_SETUP); #ifdef UNITTESTS p->alerts.cnt = 0; #endif det_ctx->ticker++; det_ctx->filestore_cnt = 0; det_ctx->base64_decoded_len = 0; det_ctx->raw_stream_progress = 0; #ifdef DEBUG if (p->flags & PKT_STREAM_ADD) { det_ctx->pkt_stream_add_cnt++; } #endif /* grab the protocol state we will detect on */ if (p->flags & PKT_HAS_FLOW) { DEBUG_VALIDATE_BUG_ON(pflow == NULL); if (p->flowflags & FLOW_PKT_TOSERVER) { flow_flags = STREAM_TOSERVER; SCLogDebug("flag STREAM_TOSERVER set"); } else if (p->flowflags & FLOW_PKT_TOCLIENT) { flow_flags = STREAM_TOCLIENT; SCLogDebug("flag STREAM_TOCLIENT set"); } SCLogDebug("p->flowflags 0x%02x", p->flowflags); if (p->flags & PKT_STREAM_EOF) { flow_flags |= STREAM_EOF; SCLogDebug("STREAM_EOF set"); } /* store tenant_id in the flow so that we can use it * for creating pseudo packets */ if (p->tenant_id > 0 && pflow->tenant_id == 0) { pflow->tenant_id = p->tenant_id; } /* live ruleswap check for flow updates */ if (pflow->de_ctx_version == 0) { /* first time this flow is inspected, set id */ pflow->de_ctx_version = de_ctx->version; } else if (pflow->de_ctx_version != de_ctx->version) { /* first time we inspect flow with this de_ctx, reset */ pflow->flags &= ~FLOW_SGH_TOSERVER; pflow->flags &= ~FLOW_SGH_TOCLIENT; pflow->sgh_toserver = NULL; pflow->sgh_toclient = NULL; pflow->de_ctx_version = de_ctx->version; GenericVarFree(pflow->flowvar); pflow->flowvar = NULL; DetectEngineStateResetTxs(pflow); } /* Retrieve the app layer state and protocol and the tcp reassembled * stream chunks. */ if ((p->proto == IPPROTO_TCP && (p->flags & PKT_STREAM_EST)) || (p->proto == IPPROTO_UDP) || (p->proto == IPPROTO_SCTP && (p->flowflags & FLOW_PKT_ESTABLISHED))) { /* update flow flags with knowledge on disruptions */ flow_flags = FlowGetDisruptionFlags(pflow, flow_flags); alproto = FlowGetAppProtocol(pflow); if (p->proto == IPPROTO_TCP && pflow->protoctx && StreamReassembleRawHasDataReady(pflow->protoctx, p)) { p->flags |= PKT_DETECT_HAS_STREAMDATA; flow_flags |= STREAM_FLUSH; } SCLogDebug("alproto %u", alproto); } else { SCLogDebug("packet doesn't have established flag set (proto %d)", p->proto); } app_decoder_events = AppLayerParserHasDecoderEvents(pflow->alparser); } DetectRunScratchpad pad = { alproto, flow_flags, app_decoder_events, NULL, 0 }; PACKET_PROFILING_DETECT_END(p, PROF_DETECT_SETUP); return pad; } static inline void DetectRunPostRules( ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet * const p, Flow * const pflow, DetectRunScratchpad *scratch) { /* see if we need to increment the inspect_id and reset the de_state */ if (pflow && pflow->alstate && AppLayerParserProtocolSupportsTxs(p->proto, scratch->alproto)) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_TX_UPDATE); DeStateUpdateInspectTransactionId(pflow, scratch->flow_flags, (scratch->sgh == NULL)); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_TX_UPDATE); } /* so now let's iterate the alerts and remove the ones after a pass rule * matched (if any). This is done inside PacketAlertFinalize() */ /* PR: installed "tag" keywords are handled after the threshold inspection */ PACKET_PROFILING_DETECT_START(p, PROF_DETECT_ALERT); PacketAlertFinalize(de_ctx, det_ctx, p); if (p->alerts.cnt > 0) { StatsAddUI64(tv, det_ctx->counter_alerts, (uint64_t)p->alerts.cnt); } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_ALERT); } static void DetectRunCleanup(DetectEngineThreadCtx *det_ctx, Packet *p, Flow * const pflow) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_CLEANUP); /* cleanup pkt specific part of the patternmatcher */ PacketPatternCleanup(det_ctx); if (pflow != NULL) { /* update inspected tracker for raw reassembly */ if (p->proto == IPPROTO_TCP && pflow->protoctx != NULL) { StreamReassembleRawUpdateProgress(pflow->protoctx, p, det_ctx->raw_stream_progress); DetectEngineCleanHCBDBuffers(det_ctx); } } PACKET_PROFILING_DETECT_END(p, PROF_DETECT_CLEANUP); SCReturn; } void RuleMatchCandidateTxArrayInit(DetectEngineThreadCtx *det_ctx, uint32_t size) { DEBUG_VALIDATE_BUG_ON(det_ctx->tx_candidates); det_ctx->tx_candidates = SCCalloc(size, sizeof(RuleMatchCandidateTx)); if (det_ctx->tx_candidates == NULL) { FatalError(SC_ERR_MEM_ALLOC, "failed to allocate %"PRIu64" bytes", (uint64_t)(size * sizeof(RuleMatchCandidateTx))); } det_ctx->tx_candidates_size = size; SCLogDebug("array initialized to %u elements (%"PRIu64" bytes)", size, (uint64_t)(size * sizeof(RuleMatchCandidateTx))); } void RuleMatchCandidateTxArrayFree(DetectEngineThreadCtx *det_ctx) { SCFree(det_ctx->tx_candidates); det_ctx->tx_candidates_size = 0; } /* if size >= cur_space */ static inline bool RuleMatchCandidateTxArrayHasSpace(const DetectEngineThreadCtx *det_ctx, const uint32_t need) { if (det_ctx->tx_candidates_size >= need) return 1; return 0; } /* realloc */ static int RuleMatchCandidateTxArrayExpand(DetectEngineThreadCtx *det_ctx, const uint32_t needed) { const uint32_t old_size = det_ctx->tx_candidates_size; uint32_t new_size = needed; void *ptmp = SCRealloc(det_ctx->tx_candidates, (new_size * sizeof(RuleMatchCandidateTx))); if (ptmp == NULL) { FatalError(SC_ERR_MEM_ALLOC, "failed to expand to %"PRIu64" bytes", (uint64_t)(new_size * sizeof(RuleMatchCandidateTx))); // TODO can this be handled more gracefully? } det_ctx->tx_candidates = ptmp; det_ctx->tx_candidates_size = new_size; SCLogDebug("array expanded from %u to %u elements (%"PRIu64" bytes -> %"PRIu64" bytes)", old_size, new_size, (uint64_t)(old_size * sizeof(RuleMatchCandidateTx)), (uint64_t)(new_size * sizeof(RuleMatchCandidateTx))); (void)old_size; return 1; } /* TODO maybe let one with flags win if equal? */ static int DetectRunTxSortHelper(const void *a, const void *b) { const RuleMatchCandidateTx *s0 = a; const RuleMatchCandidateTx *s1 = b; if (s1->id == s0->id) return 0; else return s0->id > s1->id ? -1 : 1; } #if 0 #define TRACE_SID_TXS(sid,txs,...) \ do { \ char _trace_buf[2048]; \ snprintf(_trace_buf, sizeof(_trace_buf), __VA_ARGS__); \ SCLogNotice("%p/%"PRIu64"/%u: %s", txs->tx_ptr, txs->tx_id, sid, _trace_buf); \ } while(0) #else #define TRACE_SID_TXS(sid,txs,...) #endif /** \internal * \brief inspect a rule against a transaction * * Inspect a rule. New detection or continued stateful * detection. * * \param stored_flags pointer to stored flags or NULL. * If stored_flags is set it means we're continueing * inspection from an earlier run. * * \retval bool true sig matched, false didn't match */ static bool DetectRunTxInspectRule(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, Flow *f, const uint8_t in_flow_flags, // direction, EOF, etc void *alstate, DetectTransaction *tx, const Signature *s, uint32_t *stored_flags, RuleMatchCandidateTx *can, DetectRunScratchpad *scratch) { uint8_t flow_flags = in_flow_flags; const int direction = (flow_flags & STREAM_TOSERVER) ? 0 : 1; uint32_t inspect_flags = stored_flags ? *stored_flags : 0; int total_matches = 0; int file_no_match = 0; bool retval = false; bool mpm_before_progress = false; // is mpm engine before progress? bool mpm_in_progress = false; // is mpm engine in a buffer we will revisit? /* see if we want to pass on the FLUSH flag */ if ((s->flags & SIG_FLAG_FLUSH) == 0) flow_flags &=~ STREAM_FLUSH; TRACE_SID_TXS(s->id, tx, "starting %s", direction ? "toclient" : "toserver"); /* for a new inspection we inspect pkt header and packet matches */ if (likely(stored_flags == NULL)) { TRACE_SID_TXS(s->id, tx, "first inspect, run packet matches"); if (DetectRunInspectRuleHeader(p, f, s, s->flags, s->proto.flags) == 0) { TRACE_SID_TXS(s->id, tx, "DetectRunInspectRuleHeader() no match"); return false; } if (DetectRunInspectRulePacketMatches(tv, det_ctx, p, f, s) == 0) { TRACE_SID_TXS(s->id, tx, "DetectRunInspectRulePacketMatches no match"); return false; } /* stream mpm and negated mpm sigs can end up here with wrong proto */ if (!(f->alproto == s->alproto || s->alproto == ALPROTO_UNKNOWN)) { TRACE_SID_TXS(s->id, tx, "alproto mismatch"); return false; } } const DetectEngineAppInspectionEngine *engine = s->app_inspect; while (engine != NULL) { // TODO could be do {} while as s->app_inspect cannot be null TRACE_SID_TXS(s->id, tx, "engine %p inspect_flags %x", engine, inspect_flags); if (!(inspect_flags & BIT_U32(engine->id)) && direction == engine->dir) { const bool skip_engine = (engine->alproto != 0 && engine->alproto != f->alproto); /* special case: file_data on 'alert tcp' will have engines * in the list that are not for us. */ if (unlikely(skip_engine)) { engine = engine->next; continue; } /* engines are sorted per progress, except that the one with * mpm/prefilter enabled is first */ if (tx->tx_progress < engine->progress) { SCLogDebug("tx progress %d < engine progress %d", tx->tx_progress, engine->progress); break; } if (engine->mpm) { if (tx->tx_progress > engine->progress) { mpm_before_progress = true; } else if (tx->tx_progress == engine->progress) { mpm_in_progress = true; } } /* run callback: but bypass stream callback if we can */ int match; if (unlikely(engine->stream && can->stream_stored)) { match = can->stream_result; TRACE_SID_TXS(s->id, tx, "stream skipped, stored result %d used instead", match); } else { KEYWORD_PROFILING_SET_LIST(det_ctx, engine->sm_list); if (engine->Callback) { match = engine->Callback(tv, de_ctx, det_ctx, s, engine->smd, f, flow_flags, alstate, tx->tx_ptr, tx->tx_id); } else { BUG_ON(engine->v2.Callback == NULL); match = engine->v2.Callback(de_ctx, det_ctx, engine, s, f, flow_flags, alstate, tx->tx_ptr, tx->tx_id); } TRACE_SID_TXS(s->id, tx, "engine %p match %d", engine, match); if (engine->stream) { can->stream_stored = true; can->stream_result = match; TRACE_SID_TXS(s->id, tx, "stream ran, store result %d for next tx (if any)", match); } } if (match == DETECT_ENGINE_INSPECT_SIG_MATCH) { inspect_flags |= BIT_U32(engine->id); engine = engine->next; total_matches++; continue; } else if (match == DETECT_ENGINE_INSPECT_SIG_MATCH_MORE_FILES) { /* if the file engine matched, but indicated more * files are still in progress, we don't set inspect * flags as these would end inspection for this tx */ engine = engine->next; total_matches++; continue; } else if (match == DETECT_ENGINE_INSPECT_SIG_CANT_MATCH) { inspect_flags |= DE_STATE_FLAG_SIG_CANT_MATCH; inspect_flags |= BIT_U32(engine->id); } else if (match == DETECT_ENGINE_INSPECT_SIG_CANT_MATCH_FILESTORE) { inspect_flags |= DE_STATE_FLAG_SIG_CANT_MATCH; inspect_flags |= BIT_U32(engine->id); file_no_match = 1; } /* implied DETECT_ENGINE_INSPECT_SIG_NO_MATCH */ if (engine->mpm && mpm_before_progress) { inspect_flags |= DE_STATE_FLAG_SIG_CANT_MATCH; inspect_flags |= BIT_U32(engine->id); } break; } engine = engine->next; } TRACE_SID_TXS(s->id, tx, "inspect_flags %x, total_matches %u, engine %p", inspect_flags, total_matches, engine); if (engine == NULL && total_matches) { inspect_flags |= DE_STATE_FLAG_FULL_INSPECT; TRACE_SID_TXS(s->id, tx, "MATCH"); retval = true; } if (stored_flags) { *stored_flags = inspect_flags; TRACE_SID_TXS(s->id, tx, "continue inspect flags %08x", inspect_flags); } else { // store... or? If tx is done we might not want to come back to this tx // also... if mpmid tracking is enabled, we won't do a sig again for this tx... TRACE_SID_TXS(s->id, tx, "start inspect flags %08x", inspect_flags); if (inspect_flags & DE_STATE_FLAG_SIG_CANT_MATCH) { if (file_no_match) { /* if we have a mismatch on a file sig, we need to keep state. * We may get another file on the same tx (for http and smtp * at least), so for a new file we need to re-eval the sig. * Thoughts / TODO: * - not for some protos that have 1 file per tx (e.g. nfs) * - maybe we only need this for file sigs that mix with * other matches? E.g. 'POST + filename', is different than * just 'filename'. */ DetectRunStoreStateTx(scratch->sgh, f, tx->tx_ptr, tx->tx_id, s, inspect_flags, flow_flags, file_no_match); } } else if ((inspect_flags & DE_STATE_FLAG_FULL_INSPECT) && mpm_before_progress) { TRACE_SID_TXS(s->id, tx, "no need to store match sig, " "mpm won't trigger for it anymore"); if (inspect_flags & DE_STATE_FLAG_FILE_INSPECT) { TRACE_SID_TXS(s->id, tx, "except that for new files, " "we may have to revisit anyway"); DetectRunStoreStateTx(scratch->sgh, f, tx->tx_ptr, tx->tx_id, s, inspect_flags, flow_flags, file_no_match); } } else if ((inspect_flags & DE_STATE_FLAG_FULL_INSPECT) == 0 && mpm_in_progress) { TRACE_SID_TXS(s->id, tx, "no need to store no-match sig, " "mpm will revisit it"); } else { TRACE_SID_TXS(s->id, tx, "storing state: flags %08x", inspect_flags); DetectRunStoreStateTx(scratch->sgh, f, tx->tx_ptr, tx->tx_id, s, inspect_flags, flow_flags, file_no_match); } } return retval; } /** \internal * \brief get a DetectTransaction object * \retval struct filled with relevant info or all nulls/0s */ static DetectTransaction GetDetectTx(const uint8_t ipproto, const AppProto alproto, void *alstate, const uint64_t tx_id, void *tx_ptr, const int tx_end_state, const uint8_t flow_flags) { const uint64_t detect_flags = AppLayerParserGetTxDetectFlags(ipproto, alproto, tx_ptr, flow_flags); if (detect_flags & APP_LAYER_TX_INSPECTED_FLAG) { SCLogDebug("%"PRIu64" tx already fully inspected for %s. Flags %016"PRIx64, tx_id, flow_flags & STREAM_TOSERVER ? "toserver" : "toclient", detect_flags); DetectTransaction no_tx = { NULL, 0, NULL, 0, 0, 0, 0, 0, }; return no_tx; } const int tx_progress = AppLayerParserGetStateProgress(ipproto, alproto, tx_ptr, flow_flags); const int dir_int = (flow_flags & STREAM_TOSERVER) ? 0 : 1; DetectEngineState *tx_de_state = AppLayerParserGetTxDetectState(ipproto, alproto, tx_ptr); DetectEngineStateDirection *tx_dir_state = tx_de_state ? &tx_de_state->dir_state[dir_int] : NULL; uint64_t prefilter_flags = detect_flags & APP_LAYER_TX_PREFILTER_MASK; DetectTransaction tx = { .tx_ptr = tx_ptr, .tx_id = tx_id, .de_state = tx_dir_state, .detect_flags = detect_flags, .prefilter_flags = prefilter_flags, .prefilter_flags_orig = prefilter_flags, .tx_progress = tx_progress, .tx_end_state = tx_end_state, }; return tx; } static void DetectRunTx(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p, Flow *f, DetectRunScratchpad *scratch) { const uint8_t flow_flags = scratch->flow_flags; const SigGroupHead * const sgh = scratch->sgh; void * const alstate = f->alstate; const uint8_t ipproto = f->proto; const AppProto alproto = f->alproto; const uint64_t total_txs = AppLayerParserGetTxCnt(f, alstate); uint64_t tx_id_min = AppLayerParserGetTransactionInspectId(f->alparser, flow_flags); const int tx_end_state = AppLayerParserGetStateProgressCompletionStatus(alproto, flow_flags); AppLayerGetTxIteratorFunc IterFunc = AppLayerGetTxIterator(ipproto, alproto); AppLayerGetTxIterState state; memset(&state, 0, sizeof(state)); while (1) { AppLayerGetTxIterTuple ires = IterFunc(ipproto, alproto, alstate, tx_id_min, total_txs, &state); if (ires.tx_ptr == NULL) break; DetectTransaction tx = GetDetectTx(ipproto, alproto, alstate, ires.tx_id, ires.tx_ptr, tx_end_state, flow_flags); if (tx.tx_ptr == NULL) { SCLogDebug("%p/%"PRIu64" no transaction to inspect", tx.tx_ptr, tx_id_min); tx_id_min++; // next (if any) run look for +1 goto next; } tx_id_min = tx.tx_id + 1; // next look for cur + 1 uint32_t array_idx = 0; uint32_t total_rules = det_ctx->match_array_cnt; total_rules += (tx.de_state ? tx.de_state->cnt : 0); /* run prefilter engines and merge results into a candidates array */ if (sgh->tx_engines) { PACKET_PROFILING_DETECT_START(p, PROF_DETECT_PF_TX); DetectRunPrefilterTx(det_ctx, sgh, p, ipproto, flow_flags, alproto, alstate, &tx); PACKET_PROFILING_DETECT_END(p, PROF_DETECT_PF_TX); SCLogDebug("%p/%"PRIu64" rules added from prefilter: %u candidates", tx.tx_ptr, tx.tx_id, det_ctx->pmq.rule_id_array_cnt); total_rules += det_ctx->pmq.rule_id_array_cnt; if (!(RuleMatchCandidateTxArrayHasSpace(det_ctx, total_rules))) { RuleMatchCandidateTxArrayExpand(det_ctx, total_rules); } for (uint32_t i = 0; i < det_ctx->pmq.rule_id_array_cnt; i++) { const Signature *s = de_ctx->sig_array[det_ctx->pmq.rule_id_array[i]]; const SigIntId id = s->num; det_ctx->tx_candidates[array_idx].s = s; det_ctx->tx_candidates[array_idx].id = id; det_ctx->tx_candidates[array_idx].flags = NULL; det_ctx->tx_candidates[array_idx].stream_reset = 0; array_idx++; } } else { if (!(RuleMatchCandidateTxArrayHasSpace(det_ctx, total_rules))) { RuleMatchCandidateTxArrayExpand(det_ctx, total_rules); } } /* merge 'state' rules from the regular prefilter */ uint32_t x = array_idx; for (uint32_t i = 0; i < det_ctx->match_array_cnt; i++) { const Signature *s = det_ctx->match_array[i]; if (s->app_inspect != NULL) { const SigIntId id = s->num; det_ctx->tx_candidates[array_idx].s = s; det_ctx->tx_candidates[array_idx].id = id; det_ctx->tx_candidates[array_idx].flags = NULL; det_ctx->tx_candidates[array_idx].stream_reset = 0; array_idx++; SCLogDebug("%p/%"PRIu64" rule %u (%u) added from 'match' list", tx.tx_ptr, tx.tx_id, s->id, id); } } SCLogDebug("%p/%"PRIu64" rules added from 'match' list: %u", tx.tx_ptr, tx.tx_id, array_idx - x); (void)x; /* merge stored state into results */ if (tx.de_state != NULL) { const uint32_t old = array_idx; /* if tx.de_state->flags has 'new file' set and sig below has * 'file inspected' flag, reset the file part of the state */ const bool have_new_file = (tx.de_state->flags & DETECT_ENGINE_STATE_FLAG_FILE_NEW); if (have_new_file) { SCLogDebug("%p/%"PRIu64" destate: need to consider new file", tx.tx_ptr, tx.tx_id); tx.de_state->flags &= ~DETECT_ENGINE_STATE_FLAG_FILE_NEW; } SigIntId state_cnt = 0; DeStateStore *tx_store = tx.de_state->head; for (; tx_store != NULL; tx_store = tx_store->next) { SCLogDebug("tx_store %p", tx_store); SigIntId store_cnt = 0; for (store_cnt = 0; store_cnt < DE_STATE_CHUNK_SIZE && state_cnt < tx.de_state->cnt; store_cnt++, state_cnt++) { DeStateStoreItem *item = &tx_store->store[store_cnt]; SCLogDebug("rule id %u, inspect_flags %u", item->sid, item->flags); if (have_new_file && (item->flags & DE_STATE_FLAG_FILE_INSPECT)) { /* remove part of the state. File inspect engine will now * be able to run again */ item->flags &= ~(DE_STATE_FLAG_SIG_CANT_MATCH|DE_STATE_FLAG_FULL_INSPECT|DE_STATE_FLAG_FILE_INSPECT); SCLogDebug("rule id %u, post file reset inspect_flags %u", item->sid, item->flags); } det_ctx->tx_candidates[array_idx].s = de_ctx->sig_array[item->sid]; det_ctx->tx_candidates[array_idx].id = item->sid; det_ctx->tx_candidates[array_idx].flags = &item->flags; det_ctx->tx_candidates[array_idx].stream_reset = 0; array_idx++; } } if (old && old != array_idx) { qsort(det_ctx->tx_candidates, array_idx, sizeof(RuleMatchCandidateTx), DetectRunTxSortHelper); SCLogDebug("%p/%"PRIu64" rules added from 'continue' list: %u", tx.tx_ptr, tx.tx_id, array_idx - old); } } det_ctx->tx_id = tx.tx_id; det_ctx->tx_id_set = 1; det_ctx->p = p; /* run rules: inspect the match candidates */ for (uint32_t i = 0; i < array_idx; i++) { RuleMatchCandidateTx *can = &det_ctx->tx_candidates[i]; const Signature *s = det_ctx->tx_candidates[i].s; uint32_t *inspect_flags = det_ctx->tx_candidates[i].flags; /* deduplicate: rules_array is sorted, but not deduplicated: * both mpm and stored state could give us the same sid. * As they are back to back in that case we can check for it * here. We select the stored state one. */ if ((i + 1) < array_idx) { if (det_ctx->tx_candidates[i].s == det_ctx->tx_candidates[i+1].s) { if (det_ctx->tx_candidates[i].flags != NULL) { i++; SCLogDebug("%p/%"PRIu64" inspecting SKIP NEXT: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); } else if (det_ctx->tx_candidates[i+1].flags != NULL) { SCLogDebug("%p/%"PRIu64" inspecting SKIP CURRENT: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); continue; } else { // if it's all the same, inspect the current one and skip next. i++; SCLogDebug("%p/%"PRIu64" inspecting SKIP NEXT: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); } } } SCLogDebug("%p/%"PRIu64" inspecting: sid %u (%u), flags %08x", tx.tx_ptr, tx.tx_id, s->id, s->num, inspect_flags ? *inspect_flags : 0); if (inspect_flags) { if (*inspect_flags & (DE_STATE_FLAG_FULL_INSPECT|DE_STATE_FLAG_SIG_CANT_MATCH)) { SCLogDebug("%p/%"PRIu64" inspecting: sid %u (%u), flags %08x ALREADY COMPLETE", tx.tx_ptr, tx.tx_id, s->id, s->num, *inspect_flags); continue; } } if (inspect_flags) { /* continue previous inspection */ SCLogDebug("%p/%"PRIu64" Continueing sid %u", tx.tx_ptr, tx.tx_id, s->id); } else { /* start new inspection */ SCLogDebug("%p/%"PRIu64" Start sid %u", tx.tx_ptr, tx.tx_id, s->id); } /* call individual rule inspection */ RULE_PROFILING_START(p); const int r = DetectRunTxInspectRule(tv, de_ctx, det_ctx, p, f, flow_flags, alstate, &tx, s, inspect_flags, can, scratch); if (r == 1) { /* match */ DetectRunPostMatch(tv, det_ctx, p, s); uint8_t alert_flags = (PACKET_ALERT_FLAG_STATE_MATCH|PACKET_ALERT_FLAG_TX); if (s->action & ACTION_DROP) alert_flags |= PACKET_ALERT_FLAG_DROP_FLOW; SCLogDebug("%p/%"PRIu64" sig %u (%u) matched", tx.tx_ptr, tx.tx_id, s->id, s->num); if (!(s->flags & SIG_FLAG_NOALERT)) { PacketAlertAppend(det_ctx, s, p, tx.tx_id, alert_flags); } else { DetectSignatureApplyActions(p, s, alert_flags); } } DetectVarProcessList(det_ctx, p->flow, p); RULE_PROFILING_END(det_ctx, s, r, p); } det_ctx->tx_id = 0; det_ctx->tx_id_set = 0; det_ctx->p = NULL; /* see if we have any updated state to store in the tx */ uint64_t new_detect_flags = 0; /* this side of the tx is done */ if (tx.tx_progress >= tx.tx_end_state) { new_detect_flags |= APP_LAYER_TX_INSPECTED_FLAG; SCLogDebug("%p/%"PRIu64" tx is done for direction %s. Flag %016"PRIx64, tx.tx_ptr, tx.tx_id, flow_flags & STREAM_TOSERVER ? "toserver" : "toclient", new_detect_flags); } if (tx.prefilter_flags != tx.prefilter_flags_orig) { new_detect_flags |= tx.prefilter_flags; SCLogDebug("%p/%"PRIu64" updated prefilter flags %016"PRIx64" " "(was: %016"PRIx64") for direction %s. Flag %016"PRIx64, tx.tx_ptr, tx.tx_id, tx.prefilter_flags, tx.prefilter_flags_orig, flow_flags & STREAM_TOSERVER ? "toserver" : "toclient", new_detect_flags); } if (new_detect_flags != 0 && (new_detect_flags | tx.detect_flags) != tx.detect_flags) { new_detect_flags |= tx.detect_flags; SCLogDebug("%p/%"PRIu64" Storing new flags %016"PRIx64" (was %016"PRIx64")", tx.tx_ptr, tx.tx_id, new_detect_flags, tx.detect_flags); AppLayerParserSetTxDetectFlags(ipproto, alproto, tx.tx_ptr, flow_flags, new_detect_flags); } next: InspectionBufferClean(det_ctx); if (!ires.has_next) break; } } /** \brief Apply action(s) and Set 'drop' sig info, * if applicable */ void DetectSignatureApplyActions(Packet *p, const Signature *s, const uint8_t alert_flags) { PACKET_UPDATE_ACTION(p, s->action); if (s->action & ACTION_DROP) { if (p->alerts.drop.action == 0) { p->alerts.drop.num = s->num; p->alerts.drop.action = s->action; p->alerts.drop.s = (Signature *)s; } } else if (s->action & ACTION_PASS) { /* if an stream/app-layer match we enforce the pass for the flow */ if ((p->flow != NULL) && (alert_flags & (PACKET_ALERT_FLAG_STATE_MATCH|PACKET_ALERT_FLAG_STREAM_MATCH))) { FlowSetNoPacketInspectionFlag(p->flow); } } } static DetectEngineThreadCtx *GetTenantById(HashTable *h, uint32_t id) { /* technically we need to pass a DetectEngineThreadCtx struct with the * tentant_id member. But as that member is the first in the struct, we * can use the id directly. */ return HashTableLookup(h, &id, 0); } static void DetectFlow(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { if (p->flags & PKT_NOPACKET_INSPECTION) { /* hack: if we are in pass the entire flow mode, we need to still * update the inspect_id forward. So test for the condition here, * and call the update code if necessary. */ const int pass = ((p->flow->flags & FLOW_NOPACKET_INSPECTION)); const AppProto alproto = FlowGetAppProtocol(p->flow); if (pass && AppLayerParserProtocolSupportsTxs(p->proto, alproto)) { uint8_t flags; if (p->flowflags & FLOW_PKT_TOSERVER) { flags = STREAM_TOSERVER; } else { flags = STREAM_TOCLIENT; } flags = FlowGetDisruptionFlags(p->flow, flags); DeStateUpdateInspectTransactionId(p->flow, flags, true); } SCLogDebug("p->pcap %"PRIu64": no detection on packet, " "PKT_NOPACKET_INSPECTION is set", p->pcap_cnt); return; } /* see if the packet matches one or more of the sigs */ (void)DetectRun(tv, de_ctx, det_ctx, p); } static void DetectNoFlow(ThreadVars *tv, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { /* No need to perform any detection on this packet, if the the given flag is set.*/ if ((p->flags & PKT_NOPACKET_INSPECTION) || (PACKET_TEST_ACTION(p, ACTION_DROP))) { return; } /* see if the packet matches one or more of the sigs */ DetectRun(tv, de_ctx, det_ctx, p); return; } /** \brief Detection engine thread wrapper. * \param tv thread vars * \param p packet to inspect * \param data thread specific data * \param pq packet queue * \retval TM_ECODE_FAILED error * \retval TM_ECODE_OK ok */ TmEcode Detect(ThreadVars *tv, Packet *p, void *data, PacketQueue *pq, PacketQueue *postpq) { DEBUG_VALIDATE_PACKET(p); DetectEngineCtx *de_ctx = NULL; DetectEngineThreadCtx *det_ctx = (DetectEngineThreadCtx *)data; if (det_ctx == NULL) { printf("ERROR: Detect has no thread ctx\n"); goto error; } if (unlikely(SC_ATOMIC_GET(det_ctx->so_far_used_by_detect) == 0)) { (void)SC_ATOMIC_SET(det_ctx->so_far_used_by_detect, 1); SCLogDebug("Detect Engine using new det_ctx - %p", det_ctx); } /* if in MT mode _and_ we have tenants registered, use * MT logic. */ if (det_ctx->mt_det_ctxs_cnt > 0 && det_ctx->TenantGetId != NULL) { uint32_t tenant_id = p->tenant_id; if (tenant_id == 0) tenant_id = det_ctx->TenantGetId(det_ctx, p); if (tenant_id > 0 && tenant_id < det_ctx->mt_det_ctxs_cnt) { p->tenant_id = tenant_id; det_ctx = GetTenantById(det_ctx->mt_det_ctxs_hash, tenant_id); if (det_ctx == NULL) return TM_ECODE_OK; de_ctx = det_ctx->de_ctx; if (de_ctx == NULL) return TM_ECODE_OK; if (unlikely(SC_ATOMIC_GET(det_ctx->so_far_used_by_detect) == 0)) { (void)SC_ATOMIC_SET(det_ctx->so_far_used_by_detect, 1); SCLogDebug("MT de_ctx %p det_ctx %p (tenant %u)", de_ctx, det_ctx, tenant_id); } } else { /* use default if no tenants are registered for this packet */ de_ctx = det_ctx->de_ctx; } } else { de_ctx = det_ctx->de_ctx; } if (p->flow) { DetectFlow(tv, de_ctx, det_ctx, p); } else { DetectNoFlow(tv, de_ctx, det_ctx, p); } return TM_ECODE_OK; error: return TM_ECODE_FAILED; } /** \brief disable file features we don't need * Called if we have no detection engine. */ void DisableDetectFlowFileFlags(Flow *f) { DetectPostInspectFileFlagsUpdate(f, NULL /* no sgh */, STREAM_TOSERVER); DetectPostInspectFileFlagsUpdate(f, NULL /* no sgh */, STREAM_TOCLIENT); } #ifdef UNITTESTS /** * \brief wrapper for old tests */ void SigMatchSignatures(ThreadVars *th_v, DetectEngineCtx *de_ctx, DetectEngineThreadCtx *det_ctx, Packet *p) { DetectRun(th_v, de_ctx, det_ctx, p); } #endif /* * TESTS */ #ifdef UNITTESTS #include "tests/detect.c" #endif
./CrossVul/dataset_final_sorted/CWE-347/c/bad_724_0
crossvul-cpp_data_bad_4255_1
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "hermes/VM/JSObject.h" #include "hermes/VM/BuildMetadata.h" #include "hermes/VM/Callable.h" #include "hermes/VM/HostModel.h" #include "hermes/VM/InternalProperty.h" #include "hermes/VM/JSArray.h" #include "hermes/VM/JSDate.h" #include "hermes/VM/JSProxy.h" #include "hermes/VM/Operations.h" #include "hermes/VM/StringView.h" #include "llvh/ADT/SmallSet.h" namespace hermes { namespace vm { ObjectVTable JSObject::vt{ VTable( CellKind::ObjectKind, cellSize<JSObject>(), nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // externalMemorySize VTable::HeapSnapshotMetadata{HeapSnapshot::NodeType::Object, JSObject::_snapshotNameImpl, JSObject::_snapshotAddEdgesImpl, nullptr, JSObject::_snapshotAddLocationsImpl}), JSObject::_getOwnIndexedRangeImpl, JSObject::_haveOwnIndexedImpl, JSObject::_getOwnIndexedPropertyFlagsImpl, JSObject::_getOwnIndexedImpl, JSObject::_setOwnIndexedImpl, JSObject::_deleteOwnIndexedImpl, JSObject::_checkAllOwnIndexedImpl, }; void ObjectBuildMeta(const GCCell *cell, Metadata::Builder &mb) { // This call is just for debugging and consistency purposes. mb.addJSObjectOverlapSlots(JSObject::numOverlapSlots<JSObject>()); const auto *self = static_cast<const JSObject *>(cell); mb.addField("parent", &self->parent_); mb.addField("class", &self->clazz_); mb.addField("propStorage", &self->propStorage_); // Declare the direct properties. static const char *directPropName[JSObject::DIRECT_PROPERTY_SLOTS] = { "directProp0", "directProp1", "directProp2", "directProp3"}; for (unsigned i = mb.getJSObjectOverlapSlots(); i < JSObject::DIRECT_PROPERTY_SLOTS; ++i) { mb.addField(directPropName[i], self->directProps() + i); } } #ifdef HERMESVM_SERIALIZE void JSObject::serializeObjectImpl( Serializer &s, const GCCell *cell, unsigned overlapSlots) { auto *self = vmcast<const JSObject>(cell); s.writeData(&self->flags_, sizeof(ObjectFlags)); s.writeRelocation(self->parent_.get(s.getRuntime())); s.writeRelocation(self->clazz_.get(s.getRuntime())); // propStorage_ : GCPointer<PropStorage> is also ArrayStorage. Serialize // *propStorage_ with this JSObject. bool hasArray = (bool)self->propStorage_; s.writeInt<uint8_t>(hasArray); if (hasArray) { ArrayStorage::serializeArrayStorage( s, self->propStorage_.get(s.getRuntime())); } // Record the number of overlap slots, so that the deserialization code // doesn't need to keep track of it. s.writeInt<uint8_t>(overlapSlots); for (size_t i = overlapSlots; i < JSObject::DIRECT_PROPERTY_SLOTS; i++) { s.writeHermesValue(self->directProps()[i]); } } void ObjectSerialize(Serializer &s, const GCCell *cell) { JSObject::serializeObjectImpl(s, cell, JSObject::numOverlapSlots<JSObject>()); s.endObject(cell); } void ObjectDeserialize(Deserializer &d, CellKind kind) { assert(kind == CellKind::ObjectKind && "Expected JSObject"); void *mem = d.getRuntime()->alloc</*fixedSize*/ true>(cellSize<JSObject>()); auto *obj = new (mem) JSObject(d, &JSObject::vt.base); d.endObject(obj); } JSObject::JSObject(Deserializer &d, const VTable *vtp) : GCCell(&d.getRuntime()->getHeap(), vtp) { d.readData(&flags_, sizeof(ObjectFlags)); d.readRelocation(&parent_, RelocationKind::GCPointer); d.readRelocation(&clazz_, RelocationKind::GCPointer); if (d.readInt<uint8_t>()) { propStorage_.set( d.getRuntime(), ArrayStorage::deserializeArrayStorage(d), &d.getRuntime()->getHeap()); } auto overlapSlots = d.readInt<uint8_t>(); for (size_t i = overlapSlots; i < JSObject::DIRECT_PROPERTY_SLOTS; i++) { d.readHermesValue(&directProps()[i]); } } #endif PseudoHandle<JSObject> JSObject::create( Runtime *runtime, Handle<JSObject> parentHandle) { JSObjectAlloc<JSObject> mem{runtime}; return mem.initToPseudoHandle(new (mem) JSObject( runtime, &vt.base, *parentHandle, runtime->getHiddenClassForPrototypeRaw( *parentHandle, numOverlapSlots<JSObject>() + ANONYMOUS_PROPERTY_SLOTS), GCPointerBase::NoBarriers())); } PseudoHandle<JSObject> JSObject::create(Runtime *runtime) { JSObjectAlloc<JSObject> mem{runtime}; JSObject *objProto = runtime->objectPrototypeRawPtr; return mem.initToPseudoHandle(new (mem) JSObject( runtime, &vt.base, objProto, runtime->getHiddenClassForPrototypeRaw( objProto, numOverlapSlots<JSObject>() + ANONYMOUS_PROPERTY_SLOTS), GCPointerBase::NoBarriers())); } PseudoHandle<JSObject> JSObject::create( Runtime *runtime, unsigned propertyCount) { JSObjectAlloc<JSObject> mem{runtime}; JSObject *objProto = runtime->objectPrototypeRawPtr; auto self = mem.initToPseudoHandle(new (mem) JSObject( runtime, &vt.base, objProto, runtime->getHiddenClassForPrototypeRaw( objProto, numOverlapSlots<JSObject>() + ANONYMOUS_PROPERTY_SLOTS), GCPointerBase::NoBarriers())); return runtime->ignoreAllocationFailure( JSObject::allocatePropStorage(std::move(self), runtime, propertyCount)); } PseudoHandle<JSObject> JSObject::create( Runtime *runtime, Handle<HiddenClass> clazz) { auto obj = JSObject::create(runtime, clazz->getNumProperties()); obj->clazz_.set(runtime, *clazz, &runtime->getHeap()); // If the hidden class has index like property, we need to clear the fast path // flag. if (LLVM_UNLIKELY(obj->clazz_.get(runtime)->getHasIndexLikeProperties())) obj->flags_.fastIndexProperties = false; return obj; } void JSObject::initializeLazyObject( Runtime *runtime, Handle<JSObject> lazyObject) { assert(lazyObject->flags_.lazyObject && "object must be lazy"); // object is now assumed to be a regular object. lazyObject->flags_.lazyObject = 0; // only functions can be lazy. assert(vmisa<Callable>(lazyObject.get()) && "unexpected lazy object"); Callable::defineLazyProperties(Handle<Callable>::vmcast(lazyObject), runtime); } ObjectID JSObject::getObjectID(JSObject *self, Runtime *runtime) { if (LLVM_LIKELY(self->flags_.objectID)) return self->flags_.objectID; // Object ID does not yet exist, get next unique global ID.. self->flags_.objectID = runtime->generateNextObjectID(); // Make sure it is not zero. if (LLVM_UNLIKELY(!self->flags_.objectID)) --self->flags_.objectID; return self->flags_.objectID; } CallResult<PseudoHandle<JSObject>> JSObject::getPrototypeOf( PseudoHandle<JSObject> selfHandle, Runtime *runtime) { if (LLVM_LIKELY(!selfHandle->isProxyObject())) { return createPseudoHandle(selfHandle->getParent(runtime)); } return JSProxy::getPrototypeOf( runtime->makeHandle(std::move(selfHandle)), runtime); } namespace { CallResult<bool> proxyOpFlags( Runtime *runtime, PropOpFlags opFlags, const char *msg, CallResult<bool> res) { if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*res && opFlags.getThrowOnError()) { return runtime->raiseTypeError(msg); } return res; } } // namespace CallResult<bool> JSObject::setParent( JSObject *self, Runtime *runtime, JSObject *parent, PropOpFlags opFlags) { if (LLVM_UNLIKELY(self->isProxyObject())) { return proxyOpFlags( runtime, opFlags, "Object is not extensible.", JSProxy::setPrototypeOf( runtime->makeHandle(self), runtime, runtime->makeHandle(parent))); } // ES9 9.1.2 // 4. if (self->parent_.get(runtime) == parent) return true; // 5. if (!self->isExtensible()) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError("Object is not extensible."); } else { return false; } } // 6-8. Check for a prototype cycle. for (JSObject *cur = parent; cur; cur = cur->parent_.get(runtime)) { if (cur == self) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError("Prototype cycle detected"); } else { return false; } } else if (LLVM_UNLIKELY(cur->isProxyObject())) { // TODO this branch should also be used for module namespace and // immutable prototype exotic objects. break; } } // 9. self->parent_.set(runtime, parent, &runtime->getHeap()); // 10. return true; } void JSObject::allocateNewSlotStorage( Handle<JSObject> selfHandle, Runtime *runtime, SlotIndex newSlotIndex, Handle<> valueHandle) { // If it is a direct property, just store the value and we are done. if (LLVM_LIKELY(newSlotIndex < DIRECT_PROPERTY_SLOTS)) { selfHandle->directProps()[newSlotIndex].set( *valueHandle, &runtime->getHeap()); return; } // Make the slot index relative to the indirect storage. newSlotIndex -= DIRECT_PROPERTY_SLOTS; // Allocate a new property storage if not already allocated. if (LLVM_UNLIKELY(!selfHandle->propStorage_)) { // Allocate new storage. assert(newSlotIndex == 0 && "allocated slot must be at end"); auto arrRes = runtime->ignoreAllocationFailure( PropStorage::create(runtime, DEFAULT_PROPERTY_CAPACITY)); selfHandle->propStorage_.set( runtime, vmcast<PropStorage>(arrRes), &runtime->getHeap()); } else if (LLVM_UNLIKELY( newSlotIndex >= selfHandle->propStorage_.get(runtime)->capacity())) { // Reallocate the existing one. assert( newSlotIndex == selfHandle->propStorage_.get(runtime)->size() && "allocated slot must be at end"); auto hnd = runtime->makeMutableHandle(selfHandle->propStorage_); PropStorage::resize(hnd, runtime, newSlotIndex + 1); selfHandle->propStorage_.set(runtime, *hnd, &runtime->getHeap()); } { NoAllocScope scope{runtime}; auto *const propStorage = selfHandle->propStorage_.getNonNull(runtime); if (newSlotIndex >= propStorage->size()) { assert( newSlotIndex == propStorage->size() && "allocated slot must be at end"); PropStorage::resizeWithinCapacity(propStorage, runtime, newSlotIndex + 1); } // If we don't need to resize, just store it directly. propStorage->at(newSlotIndex).set(*valueHandle, &runtime->getHeap()); } } CallResult<PseudoHandle<>> JSObject::getNamedPropertyValue_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<JSObject> propObj, NamedPropertyDescriptor desc) { assert( !selfHandle->flags_.proxyObject && !propObj->flags_.proxyObject && "getNamedPropertyValue_RJS cannot be used with proxy objects"); if (LLVM_LIKELY(!desc.flags.accessor)) return createPseudoHandle(getNamedSlotValue(propObj.get(), runtime, desc)); auto *accessor = vmcast<PropertyAccessor>(getNamedSlotValue(propObj.get(), runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return accessor->getter.get(runtime)->executeCall0( runtime->makeHandle(accessor->getter), runtime, selfHandle); } CallResult<PseudoHandle<>> JSObject::getComputedPropertyValue_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<JSObject> propObj, ComputedPropertyDescriptor desc) { assert( !selfHandle->flags_.proxyObject && !propObj->flags_.proxyObject && "getComputedPropertyValue_RJS cannot be used with proxy objects"); if (LLVM_LIKELY(!desc.flags.accessor)) return createPseudoHandle( getComputedSlotValue(propObj.get(), runtime, desc)); auto *accessor = vmcast<PropertyAccessor>( getComputedSlotValue(propObj.get(), runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return accessor->getter.get(runtime)->executeCall0( runtime->makeHandle(accessor->getter), runtime, selfHandle); } CallResult<PseudoHandle<>> JSObject::getComputedPropertyValue_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<JSObject> propObj, ComputedPropertyDescriptor desc, Handle<> nameValHandle) { if (!propObj) { return createPseudoHandle(HermesValue::encodeEmptyValue()); } if (LLVM_LIKELY(!desc.flags.proxyObject)) { return JSObject::getComputedPropertyValue_RJS( selfHandle, runtime, propObj, desc); } CallResult<Handle<>> keyRes = toPropertyKey(runtime, nameValHandle); if (LLVM_UNLIKELY(keyRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } CallResult<bool> hasRes = JSProxy::hasComputed(propObj, runtime, *keyRes); if (LLVM_UNLIKELY(hasRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*hasRes) { return createPseudoHandle(HermesValue::encodeEmptyValue()); } return JSProxy::getComputed(propObj, runtime, *keyRes, selfHandle); } CallResult<Handle<JSArray>> JSObject::getOwnPropertyKeys( Handle<JSObject> selfHandle, Runtime *runtime, OwnKeysFlags okFlags) { assert( (okFlags.getIncludeNonSymbols() || okFlags.getIncludeSymbols()) && "Can't exclude symbols and strings"); if (LLVM_UNLIKELY( selfHandle->flags_.lazyObject || selfHandle->flags_.proxyObject)) { if (selfHandle->flags_.proxyObject) { CallResult<PseudoHandle<JSArray>> proxyRes = JSProxy::ownPropertyKeys(selfHandle, runtime, okFlags); if (LLVM_UNLIKELY(proxyRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return runtime->makeHandle(std::move(*proxyRes)); } assert(selfHandle->flags_.lazyObject && "descriptor flags are impossible"); initializeLazyObject(runtime, selfHandle); } auto range = getOwnIndexedRange(selfHandle.get(), runtime); // Estimate the capacity of the output array. This estimate is only // reasonable for the non-symbol case. uint32_t capacity = okFlags.getIncludeNonSymbols() ? (selfHandle->clazz_.get(runtime)->getNumProperties() + range.second - range.first) : 0; auto arrayRes = JSArray::create(runtime, capacity, 0); if (LLVM_UNLIKELY(arrayRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto array = runtime->makeHandle(std::move(*arrayRes)); // Optional array of SymbolIDs reported via host object API llvh::Optional<Handle<JSArray>> hostObjectSymbols; size_t hostObjectSymbolCount = 0; // If current object is a host object we need to deduplicate its properties llvh::SmallSet<SymbolID::RawType, 16> dedupSet; // Output index. uint32_t index = 0; // Avoid allocating a new handle per element. MutableHandle<> tmpHandle{runtime}; // Number of indexed properties. uint32_t numIndexed = 0; // Regular properties with names that are array indexes are stashed here, if // encountered. llvh::SmallVector<uint32_t, 8> indexNames{}; // Iterate the named properties excluding those which use Symbols. if (okFlags.getIncludeNonSymbols()) { // Get host object property names if (LLVM_UNLIKELY(selfHandle->flags_.hostObject)) { assert( range.first == range.second && "Host objects cannot own indexed range"); auto hostSymbolsRes = vmcast<HostObject>(selfHandle.get())->getHostPropertyNames(); if (hostSymbolsRes == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } if ((hostObjectSymbolCount = (**hostSymbolsRes)->getEndIndex()) != 0) { Handle<JSArray> hostSymbols = *hostSymbolsRes; hostObjectSymbols = std::move(hostSymbols); capacity += hostObjectSymbolCount; } } // Iterate the indexed properties. GCScopeMarkerRAII marker{runtime}; for (auto i = range.first; i != range.second; ++i) { auto res = getOwnIndexedPropertyFlags(selfHandle.get(), runtime, i); if (!res) continue; // If specified, check whether it is enumerable. if (!okFlags.getIncludeNonEnumerable() && !res->enumerable) continue; tmpHandle = HermesValue::encodeDoubleValue(i); JSArray::setElementAt(array, runtime, index++, tmpHandle); marker.flush(); } numIndexed = index; HiddenClass::forEachProperty( runtime->makeHandle(selfHandle->clazz_), runtime, [runtime, okFlags, array, hostObjectSymbolCount, &index, &indexNames, &tmpHandle, &dedupSet](SymbolID id, NamedPropertyDescriptor desc) { if (!isPropertyNamePrimitive(id)) { return; } // If specified, check whether it is enumerable. if (!okFlags.getIncludeNonEnumerable()) { if (!desc.flags.enumerable) return; } // Host properties might overlap with the ones recognized by the // hidden class. If we're dealing with a host object then keep track // of hidden class properties for the deduplication purposes. if (LLVM_UNLIKELY(hostObjectSymbolCount > 0)) { dedupSet.insert(id.unsafeGetRaw()); } // Check if this property is an integer index. If it is, we stash it // away to deal with it later. This check should be fast since most // property names don't start with a digit. auto propNameAsIndex = toArrayIndex( runtime->getIdentifierTable().getStringView(runtime, id)); if (LLVM_UNLIKELY(propNameAsIndex)) { indexNames.push_back(*propNameAsIndex); return; } tmpHandle = HermesValue::encodeStringValue( runtime->getStringPrimFromSymbolID(id)); JSArray::setElementAt(array, runtime, index++, tmpHandle); }); // Iterate over HostObject properties and append them to the array. Do not // append duplicates. if (LLVM_UNLIKELY(hostObjectSymbols)) { for (size_t i = 0; i < hostObjectSymbolCount; ++i) { assert( (*hostObjectSymbols)->at(runtime, i).isSymbol() && "Host object needs to return array of SymbolIDs"); marker.flush(); SymbolID id = (*hostObjectSymbols)->at(runtime, i).getSymbol(); if (dedupSet.count(id.unsafeGetRaw()) == 0) { dedupSet.insert(id.unsafeGetRaw()); assert( !InternalProperty::isInternal(id) && "host object returned reserved symbol"); auto propNameAsIndex = toArrayIndex( runtime->getIdentifierTable().getStringView(runtime, id)); if (LLVM_UNLIKELY(propNameAsIndex)) { indexNames.push_back(*propNameAsIndex); continue; } tmpHandle = HermesValue::encodeStringValue( runtime->getStringPrimFromSymbolID(id)); JSArray::setElementAt(array, runtime, index++, tmpHandle); } } } } // Now iterate the named properties again, including only Symbols. // We could iterate only once, if we chose to ignore (and disallow) // own properties on HostObjects, as we do with Proxies. if (okFlags.getIncludeSymbols()) { MutableHandle<SymbolID> idHandle{runtime}; HiddenClass::forEachProperty( runtime->makeHandle(selfHandle->clazz_), runtime, [runtime, okFlags, array, &index, &idHandle]( SymbolID id, NamedPropertyDescriptor desc) { if (!isSymbolPrimitive(id)) { return; } // If specified, check whether it is enumerable. if (!okFlags.getIncludeNonEnumerable()) { if (!desc.flags.enumerable) return; } idHandle = id; JSArray::setElementAt(array, runtime, index++, idHandle); }); } // The end (exclusive) of the named properties. uint32_t endNamed = index; // Properly set the length of the array. auto cr = JSArray::setLength( array, runtime, endNamed + indexNames.size(), PropOpFlags{}); (void)cr; assert( cr != ExecutionStatus::EXCEPTION && *cr && "JSArray::setLength() failed"); // If we have no index-like names, we are done. if (LLVM_LIKELY(indexNames.empty())) return array; // In the unlikely event that we encountered index-like names, we need to sort // them and merge them with the real indexed properties. Note that it is // guaranteed that there are no clashes. std::sort(indexNames.begin(), indexNames.end()); // Also make space for the new elements by shifting all the named properties // to the right. First, resize the array. JSArray::setStorageEndIndex(array, runtime, endNamed + indexNames.size()); // Shift the non-index property names. The region [numIndexed..endNamed) is // moved to [numIndexed+indexNames.size()..array->size()). // TODO: optimize this by implementing memcpy-like functionality in ArrayImpl. for (uint32_t last = endNamed, toLast = array->getEndIndex(); last != numIndexed;) { --last; --toLast; tmpHandle = array->at(runtime, last); JSArray::setElementAt(array, runtime, toLast, tmpHandle); } // Now we need to merge the indexes in indexNames and the array // [0..numIndexed). We start from the end and copy the larger element from // either array. // 1+ the destination position to copy into. for (uint32_t toLast = numIndexed + indexNames.size(), indexNamesLast = indexNames.size(); toLast != 0;) { if (numIndexed) { uint32_t a = (uint32_t)array->at(runtime, numIndexed - 1).getNumber(); uint32_t b; if (indexNamesLast && (b = indexNames[indexNamesLast - 1]) > a) { tmpHandle = HermesValue::encodeDoubleValue(b); --indexNamesLast; } else { tmpHandle = HermesValue::encodeDoubleValue(a); --numIndexed; } } else { assert(indexNamesLast && "prematurely ran out of source values"); tmpHandle = HermesValue::encodeDoubleValue(indexNames[indexNamesLast - 1]); --indexNamesLast; } --toLast; JSArray::setElementAt(array, runtime, toLast, tmpHandle); } return array; } /// Convert a value to string unless already converted /// \param nameValHandle [Handle<>] the value to convert /// \param str [MutableHandle<StringPrimitive>] the string is stored /// there. Must be initialized to null initially. #define LAZY_TO_STRING(runtime, nameValHandle, str) \ do { \ if (!str) { \ auto status = toString_RJS(runtime, nameValHandle); \ assert( \ status != ExecutionStatus::EXCEPTION && \ "toString() of primitive cannot fail"); \ str = status->get(); \ } \ } while (0) /// Convert a value to an identifier unless already converted /// \param nameValHandle [Handle<>] the value to convert /// \param id [SymbolID] the identifier is stored there. Must be initialized /// to INVALID_IDENTIFIER_ID initially. #define LAZY_TO_IDENTIFIER(runtime, nameValHandle, id) \ do { \ if (id.isInvalid()) { \ CallResult<Handle<SymbolID>> idRes = \ valueToSymbolID(runtime, nameValHandle); \ if (LLVM_UNLIKELY(idRes == ExecutionStatus::EXCEPTION)) { \ return ExecutionStatus::EXCEPTION; \ } \ id = **idRes; \ } \ } while (0) /// Convert a value to array index, if possible. /// \param nameValHandle [Handle<>] the value to convert /// \param str [MutableHandle<StringPrimitive>] the string is stored /// there. Must be initialized to null initially. /// \param arrayIndex [OptValue<uint32_t>] the array index is stored /// there. #define TO_ARRAY_INDEX(runtime, nameValHandle, str, arrayIndex) \ do { \ arrayIndex = toArrayIndexFastPath(*nameValHandle); \ if (!arrayIndex && !nameValHandle->isSymbol()) { \ LAZY_TO_STRING(runtime, nameValHandle, str); \ arrayIndex = toArrayIndex(runtime, str); \ } \ } while (0) /// \return true if the flags of a new property make it suitable for indexed /// storage. All new indexed properties are enumerable, writable and /// configurable and have no accessors. static bool canNewPropertyBeIndexed(DefinePropertyFlags dpf) { return dpf.setEnumerable && dpf.enumerable && dpf.setWritable && dpf.writable && dpf.setConfigurable && dpf.configurable && !dpf.setSetter && !dpf.setGetter; } struct JSObject::Helper { public: LLVM_ATTRIBUTE_ALWAYS_INLINE static ObjectFlags &flags(JSObject *self) { return self->flags_; } LLVM_ATTRIBUTE_ALWAYS_INLINE static OptValue<PropertyFlags> getOwnIndexedPropertyFlags(JSObject *self, Runtime *runtime, uint32_t index) { return JSObject::getOwnIndexedPropertyFlags(self, runtime, index); } LLVM_ATTRIBUTE_ALWAYS_INLINE static NamedPropertyDescriptor &castToNamedPropertyDescriptorRef( ComputedPropertyDescriptor &desc) { return desc.castToNamedPropertyDescriptorRef(); } }; namespace { /// ES5.1 8.12.1. /// A helper which takes a SymbolID which caches the conversion of /// nameValHandle if it's needed. It should be default constructed, /// and may or may not be set. This has been measured to be a useful /// perf win. Note that always_inline seems to be ignored on static /// methods, so this function has to be local to the cpp file in order /// to be inlined for the perf win. LLVM_ATTRIBUTE_ALWAYS_INLINE CallResult<bool> getOwnComputedPrimitiveDescriptorImpl( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, JSObject::IgnoreProxy ignoreProxy, SymbolID &id, ComputedPropertyDescriptor &desc) { assert( !nameValHandle->isObject() && "nameValHandle passed to " "getOwnComputedPrimitiveDescriptor " "cannot be an object"); // Try the fast paths first if we have "fast" index properties and the // property name is an obvious index. if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { if (JSObject::Helper::flags(*selfHandle).fastIndexProperties) { auto res = JSObject::Helper::getOwnIndexedPropertyFlags( selfHandle.get(), runtime, *arrayIndex); if (res) { // This a valid array index, residing in our indexed storage. desc.flags = *res; desc.flags.indexed = 1; desc.slot = *arrayIndex; return true; } // This a valid array index, but we don't have it in our indexed storage, // and we don't have index-like named properties. return false; } if (!selfHandle->getClass(runtime)->getHasIndexLikeProperties() && !selfHandle->isHostObject() && !selfHandle->isLazy() && !selfHandle->isProxyObject()) { // Early return to handle the case where an object definitely has no // index-like properties. This avoids allocating a new StringPrimitive and // uniquing it below. return false; } } // Convert the string to a SymbolID LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); // Look for a named property with this name. if (JSObject::getOwnNamedDescriptor( selfHandle, runtime, id, JSObject::Helper::castToNamedPropertyDescriptorRef(desc))) { return true; } if (LLVM_LIKELY( !JSObject::Helper::flags(*selfHandle).indexedStorage && !selfHandle->isLazy() && !selfHandle->isProxyObject())) { return false; } MutableHandle<StringPrimitive> strPrim{runtime}; // If we have indexed storage, perform potentially expensive conversions // to array index and check it. if (JSObject::Helper::flags(*selfHandle).indexedStorage) { // If the name is a valid integer array index, store it here. OptValue<uint32_t> arrayIndex; // Try to convert the property name to an array index. TO_ARRAY_INDEX(runtime, nameValHandle, strPrim, arrayIndex); if (arrayIndex) { auto res = JSObject::Helper::getOwnIndexedPropertyFlags( selfHandle.get(), runtime, *arrayIndex); if (res) { desc.flags = *res; desc.flags.indexed = 1; desc.slot = *arrayIndex; return true; } } return false; } if (selfHandle->isLazy()) { JSObject::initializeLazyObject(runtime, selfHandle); return JSObject::getOwnComputedPrimitiveDescriptor( selfHandle, runtime, nameValHandle, ignoreProxy, desc); } assert(selfHandle->isProxyObject() && "descriptor flags are impossible"); if (ignoreProxy == JSObject::IgnoreProxy::Yes) { return false; } return JSProxy::getOwnProperty( selfHandle, runtime, nameValHandle, desc, nullptr); } } // namespace CallResult<bool> JSObject::getOwnComputedPrimitiveDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, JSObject::IgnoreProxy ignoreProxy, ComputedPropertyDescriptor &desc) { SymbolID id{}; return getOwnComputedPrimitiveDescriptorImpl( selfHandle, runtime, nameValHandle, ignoreProxy, id, desc); } CallResult<bool> JSObject::getOwnComputedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, ComputedPropertyDescriptor &desc) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return JSObject::getOwnComputedPrimitiveDescriptor( selfHandle, runtime, *converted, IgnoreProxy::No, desc); } CallResult<bool> JSObject::getOwnComputedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, ComputedPropertyDescriptor &desc, MutableHandle<> &valueOrAccessor) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } // The proxy is ignored here so we can avoid calling // JSProxy::getOwnProperty twice on proxies, since // getOwnComputedPrimitiveDescriptor doesn't pass back the // valueOrAccessor. CallResult<bool> res = JSObject::getOwnComputedPrimitiveDescriptor( selfHandle, runtime, *converted, IgnoreProxy::Yes, desc); if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (*res) { valueOrAccessor = getComputedSlotValue(selfHandle.get(), runtime, desc); return true; } if (LLVM_UNLIKELY(selfHandle->isProxyObject())) { return JSProxy::getOwnProperty( selfHandle, runtime, nameValHandle, desc, &valueOrAccessor); } return false; } JSObject *JSObject::getNamedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropertyFlags expectedFlags, NamedPropertyDescriptor &desc) { if (findProperty(selfHandle, runtime, name, expectedFlags, desc)) return *selfHandle; // Check here for host object flag. This means that "normal" own // properties above win over host-defined properties, but there's no // cost imposed on own property lookups. This should do what we // need in practice, and we can define host vs js property // disambiguation however we want. This is here in order to avoid // impacting perf for the common case where an own property exists // in normal storage. if (LLVM_UNLIKELY(selfHandle->flags_.hostObject)) { desc.flags.hostObject = true; desc.flags.writable = true; return *selfHandle; } if (LLVM_UNLIKELY(selfHandle->flags_.lazyObject)) { assert( !selfHandle->flags_.proxyObject && "Proxy objects should never be lazy"); // Initialize the object and perform the lookup again. JSObject::initializeLazyObject(runtime, selfHandle); if (findProperty(selfHandle, runtime, name, expectedFlags, desc)) return *selfHandle; } if (LLVM_UNLIKELY(selfHandle->flags_.proxyObject)) { desc.flags.proxyObject = true; return *selfHandle; } if (selfHandle->parent_) { MutableHandle<JSObject> mutableSelfHandle{ runtime, selfHandle->parent_.getNonNull(runtime)}; do { // Check the most common case first, at the cost of some code duplication. if (LLVM_LIKELY( !mutableSelfHandle->flags_.lazyObject && !mutableSelfHandle->flags_.hostObject && !mutableSelfHandle->flags_.proxyObject)) { findProp: if (findProperty( mutableSelfHandle, runtime, name, PropertyFlags::invalid(), desc)) { assert( !selfHandle->flags_.proxyObject && "Proxy object parents should never have own properties"); return *mutableSelfHandle; } } else if (LLVM_UNLIKELY(mutableSelfHandle->flags_.lazyObject)) { JSObject::initializeLazyObject(runtime, mutableSelfHandle); goto findProp; } else if (LLVM_UNLIKELY(mutableSelfHandle->flags_.hostObject)) { desc.flags.hostObject = true; desc.flags.writable = true; return *mutableSelfHandle; } else { assert( mutableSelfHandle->flags_.proxyObject && "descriptor flags are impossible"); desc.flags.proxyObject = true; return *mutableSelfHandle; } } while ((mutableSelfHandle = mutableSelfHandle->parent_.get(runtime))); } return nullptr; } ExecutionStatus JSObject::getComputedPrimitiveDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, MutableHandle<JSObject> &propObj, ComputedPropertyDescriptor &desc) { assert( !nameValHandle->isObject() && "nameValHandle passed to " "getComputedPrimitiveDescriptor cannot " "be an object"); propObj = selfHandle.get(); SymbolID id{}; GCScopeMarkerRAII marker{runtime}; do { // A proxy is ignored here so we can check the bit later and // return it back to the caller for additional processing. Handle<JSObject> loopHandle = propObj; CallResult<bool> res = getOwnComputedPrimitiveDescriptorImpl( loopHandle, runtime, nameValHandle, IgnoreProxy::Yes, id, desc); if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (*res) { return ExecutionStatus::RETURNED; } if (LLVM_UNLIKELY(propObj->flags_.hostObject)) { desc.flags.hostObject = true; desc.flags.writable = true; return ExecutionStatus::RETURNED; } if (LLVM_UNLIKELY(propObj->flags_.proxyObject)) { desc.flags.proxyObject = true; return ExecutionStatus::RETURNED; } // This isn't a proxy, so use the faster getParent() instead of // getPrototypeOf. propObj = propObj->getParent(runtime); // Flush at the end of the loop to allow first iteration to be as fast as // possible. marker.flush(); } while (propObj); return ExecutionStatus::RETURNED; } ExecutionStatus JSObject::getComputedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, MutableHandle<JSObject> &propObj, ComputedPropertyDescriptor &desc) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return getComputedPrimitiveDescriptor( selfHandle, runtime, *converted, propObj, desc); } CallResult<PseudoHandle<>> JSObject::getNamedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, Handle<> receiver, PropOpFlags opFlags, PropertyCacheEntry *cacheEntry) { NamedPropertyDescriptor desc; // Locate the descriptor. propObj contains the object which may be anywhere // along the prototype chain. JSObject *propObj = getNamedDescriptor(selfHandle, runtime, name, desc); if (!propObj) { if (LLVM_UNLIKELY(opFlags.getMustExist())) { return runtime->raiseReferenceError( TwineChar16("Property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' doesn't exist"); } return createPseudoHandle(HermesValue::encodeUndefinedValue()); } if (LLVM_LIKELY( !desc.flags.accessor && !desc.flags.hostObject && !desc.flags.proxyObject)) { // Populate the cache if requested. if (cacheEntry && !propObj->getClass(runtime)->isDictionaryNoCache()) { cacheEntry->clazz = propObj->getClassGCPtr().getStorageType(); cacheEntry->slot = desc.slot; } return createPseudoHandle(getNamedSlotValue(propObj, runtime, desc)); } if (desc.flags.accessor) { auto *accessor = vmcast<PropertyAccessor>(getNamedSlotValue(propObj, runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return Callable::executeCall0( runtime->makeHandle(accessor->getter), runtime, receiver); } else if (desc.flags.hostObject) { auto res = vmcast<HostObject>(propObj)->get(name); if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return createPseudoHandle(*res); } else { assert(desc.flags.proxyObject && "descriptor flags are impossible"); return JSProxy::getNamed( runtime->makeHandle(propObj), runtime, name, receiver); } } CallResult<PseudoHandle<>> JSObject::getNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { // Note that getStringView can be satisfied without materializing the // Identifier. const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { return getComputed_RJS( selfHandle, runtime, runtime->makeHandle(HermesValue::encodeNumberValue(*nameAsIndex))); } // Here we have indexed properties but the symbol was not index-like. // Fall through to getNamed(). } return getNamed_RJS(selfHandle, runtime, name, opFlags); } CallResult<PseudoHandle<>> JSObject::getComputedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, Handle<> receiver) { // Try the fast-path first: no "index-like" properties and the "name" already // is a valid integer index. if (selfHandle->flags_.fastIndexProperties) { if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { // Do we have this value present in our array storage? If so, return it. PseudoHandle<> ourValue = createPseudoHandle( getOwnIndexed(selfHandle.get(), runtime, *arrayIndex)); if (LLVM_LIKELY(!ourValue->isEmpty())) return ourValue; } } // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; ComputedPropertyDescriptor desc; // Locate the descriptor. propObj contains the object which may be anywhere // along the prototype chain. MutableHandle<JSObject> propObj{runtime}; if (LLVM_UNLIKELY( getComputedPrimitiveDescriptor( selfHandle, runtime, nameValPrimitiveHandle, propObj, desc) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!propObj) return createPseudoHandle(HermesValue::encodeUndefinedValue()); if (LLVM_LIKELY( !desc.flags.accessor && !desc.flags.hostObject && !desc.flags.proxyObject)) return createPseudoHandle( getComputedSlotValue(propObj.get(), runtime, desc)); if (desc.flags.accessor) { auto *accessor = vmcast<PropertyAccessor>( getComputedSlotValue(propObj.get(), runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return accessor->getter.get(runtime)->executeCall0( runtime->makeHandle(accessor->getter), runtime, receiver); } else if (desc.flags.hostObject) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); auto propRes = vmcast<HostObject>(selfHandle.get())->get(id); if (propRes == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return createPseudoHandle(*propRes); } else { assert(desc.flags.proxyObject && "descriptor flags are impossible"); CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return JSProxy::getComputed(propObj, runtime, *key, receiver); } } CallResult<bool> JSObject::hasNamed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name) { NamedPropertyDescriptor desc; JSObject *propObj = getNamedDescriptor(selfHandle, runtime, name, desc); if (propObj == nullptr) { return false; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { return JSProxy::hasNamed(runtime->makeHandle(propObj), runtime, name); } return true; } CallResult<bool> JSObject::hasNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { if (haveOwnIndexed(selfHandle.get(), runtime, *nameAsIndex)) { return true; } if (selfHandle->flags_.fastIndexProperties) { return false; } } // Here we have indexed properties but the symbol was not stored in the // indexedStorage. // Fall through to getNamed(). } return hasNamed(selfHandle, runtime, name); } CallResult<bool> JSObject::hasComputed( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle) { // Try the fast-path first: no "index-like" properties and the "name" already // is a valid integer index. if (selfHandle->flags_.fastIndexProperties) { if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { // Do we have this value present in our array storage? If so, return true. if (haveOwnIndexed(selfHandle.get(), runtime, *arrayIndex)) { return true; } } } // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; ComputedPropertyDescriptor desc; MutableHandle<JSObject> propObj{runtime}; if (getComputedPrimitiveDescriptor( selfHandle, runtime, nameValPrimitiveHandle, propObj, desc) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } if (!propObj) { return false; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return JSProxy::hasComputed(propObj, runtime, *key); } // For compatibility with polyfills we want to pretend that all HostObject // properties are "own" properties in 'in'. Since there is no way to check for // a HostObject property, we must always assume success. In practice the // property name would have been obtained from enumerating the properties in // JS code that looks something like this: // for(key in hostObj) { // if (key in hostObj) // ... // } return true; } static ExecutionStatus raiseErrorForOverridingStaticBuiltin( Handle<JSObject> selfHandle, Runtime *runtime, Handle<SymbolID> name) { Handle<StringPrimitive> methodNameHnd = runtime->makeHandle(runtime->getStringPrimFromSymbolID(name.get())); // If the 'name' property does not exist or is an accessor, we don't display // the name. NamedPropertyDescriptor desc; auto *obj = JSObject::getNamedDescriptor( selfHandle, runtime, Predefined::getSymbolID(Predefined::name), desc); assert( !selfHandle->isProxyObject() && "raiseErrorForOverridingStaticBuiltin cannot be used with proxy objects"); if (!obj || desc.flags.accessor) { return runtime->raiseTypeError( TwineChar16("Attempting to override read-only builtin method '") + TwineChar16(methodNameHnd.get()) + "'"); } // Display the name property of the builtin object if it is a string. StringPrimitive *objName = dyn_vmcast<StringPrimitive>( JSObject::getNamedSlotValue(selfHandle.get(), runtime, desc)); if (!objName) { return runtime->raiseTypeError( TwineChar16("Attempting to override read-only builtin method '") + TwineChar16(methodNameHnd.get()) + "'"); } return runtime->raiseTypeError( TwineChar16("Attempting to override read-only builtin method '") + TwineChar16(objName) + "." + TwineChar16(methodNameHnd.get()) + "'"); } CallResult<bool> JSObject::putNamedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, Handle<> valueHandle, Handle<> receiver, PropOpFlags opFlags) { NamedPropertyDescriptor desc; // Look for the property in this object or along the prototype chain. JSObject *propObj = getNamedDescriptor( selfHandle, runtime, name, PropertyFlags::defaultNewNamedPropertyFlags(), desc); // If the property exists (or, we hit a proxy/hostobject on the way // up the chain) if (propObj) { // Get the simple case out of the way: If the property already // exists on selfHandle, is not an accessor, selfHandle and // receiver are the same, selfHandle is not a host // object/proxy/internal setter, and the property is writable, // just write into the same slot. if (LLVM_LIKELY( *selfHandle == propObj && selfHandle.getHermesValue().getRaw() == receiver->getRaw() && !desc.flags.accessor && !desc.flags.internalSetter && !desc.flags.hostObject && !desc.flags.proxyObject && desc.flags.writable)) { setNamedSlotValue( *selfHandle, runtime, desc, valueHandle.getHermesValue()); return true; } if (LLVM_UNLIKELY(desc.flags.accessor)) { auto *accessor = vmcast<PropertyAccessor>(getNamedSlotValue(propObj, runtime, desc)); // If it is a read-only accessor, fail. if (!accessor->setter) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Cannot assign to property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' which has only a getter"); } return false; } // Execute the accessor on this object. if (accessor->setter.get(runtime)->executeCall1( runtime->makeHandle(accessor->setter), runtime, receiver, *valueHandle) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } return true; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { assert( !opFlags.getMustExist() && "MustExist cannot be used with Proxy objects"); CallResult<bool> setRes = JSProxy::setNamed( runtime->makeHandle(propObj), runtime, name, valueHandle, receiver); if (LLVM_UNLIKELY(setRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*setRes && opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Proxy set returned false for property '") + runtime->getIdentifierTable().getStringView(runtime, name) + "'"); } return setRes; } if (LLVM_UNLIKELY(!desc.flags.writable)) { if (desc.flags.staticBuiltin) { return raiseErrorForOverridingStaticBuiltin( selfHandle, runtime, runtime->makeHandle(name)); } if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Cannot assign to read-only property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "'"); } return false; } if (*selfHandle == propObj && desc.flags.internalSetter) { return internalSetter( selfHandle, runtime, name, desc, valueHandle, opFlags); } } // The property does not exist as an conventional own property on // this object. MutableHandle<JSObject> receiverHandle{runtime, *selfHandle}; if (selfHandle.getHermesValue().getRaw() != receiver->getRaw() || receiverHandle->isHostObject() || receiverHandle->isProxyObject()) { if (selfHandle.getHermesValue().getRaw() != receiver->getRaw()) { receiverHandle = dyn_vmcast<JSObject>(*receiver); } if (!receiverHandle) { return false; } if (getOwnNamedDescriptor(receiverHandle, runtime, name, desc)) { if (LLVM_UNLIKELY(desc.flags.accessor || !desc.flags.writable)) { return false; } assert( !receiverHandle->isHostObject() && !receiverHandle->isProxyObject() && "getOwnNamedDescriptor never sets hostObject or proxyObject flags"); setNamedSlotValue( *receiverHandle, runtime, desc, valueHandle.getHermesValue()); return true; } // Now deal with host and proxy object cases. We need to call // getOwnComputedPrimitiveDescriptor because it knows how to call // the [[getOwnProperty]] Proxy impl if needed. if (LLVM_UNLIKELY( receiverHandle->isHostObject() || receiverHandle->isProxyObject())) { if (receiverHandle->isHostObject()) { return vmcast<HostObject>(receiverHandle.get()) ->set(name, *valueHandle); } ComputedPropertyDescriptor desc; CallResult<bool> descDefinedRes = getOwnComputedPrimitiveDescriptor( receiverHandle, runtime, name.isUniqued() ? runtime->makeHandle(HermesValue::encodeStringValue( runtime->getStringPrimFromSymbolID(name))) : runtime->makeHandle(name), IgnoreProxy::No, desc); if (LLVM_UNLIKELY(descDefinedRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } DefinePropertyFlags dpf; if (*descDefinedRes) { dpf.setValue = 1; } else { dpf = DefinePropertyFlags::getDefaultNewPropertyFlags(); } return JSProxy::defineOwnProperty( receiverHandle, runtime, name, dpf, valueHandle, opFlags); } } // Does the caller require it to exist? if (LLVM_UNLIKELY(opFlags.getMustExist())) { return runtime->raiseReferenceError( TwineChar16("Property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' doesn't exist"); } // Add a new property. return addOwnProperty( receiverHandle, runtime, name, DefinePropertyFlags::getDefaultNewPropertyFlags(), valueHandle, opFlags); } CallResult<bool> JSObject::putNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, Handle<> valueHandle, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { // Note that getStringView can be satisfied without materializing the // Identifier. const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { return putComputed_RJS( selfHandle, runtime, runtime->makeHandle(HermesValue::encodeNumberValue(*nameAsIndex)), valueHandle, opFlags); } // Here we have indexed properties but the symbol was not index-like. // Fall through to putNamed(). } return putNamed_RJS(selfHandle, runtime, name, valueHandle, opFlags); } CallResult<bool> JSObject::putComputedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, Handle<> valueHandle, Handle<> receiver, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "mustExist flag cannot be used with computed properties"); // Try the fast-path first: has "index-like" properties, the "name" // already is a valid integer index, selfHandle and receiver are the // same, and it is present in storage. if (selfHandle->flags_.fastIndexProperties) { if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { if (selfHandle.getHermesValue().getRaw() == receiver->getRaw()) { if (haveOwnIndexed(selfHandle.get(), runtime, *arrayIndex)) { auto result = setOwnIndexed(selfHandle, runtime, *arrayIndex, valueHandle); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (LLVM_LIKELY(*result)) return true; if (opFlags.getThrowOnError()) { // TODO: better message. return runtime->raiseTypeError( "Cannot assign to read-only property"); } return false; } } } } // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; ComputedPropertyDescriptor desc; // Look for the property in this object or along the prototype chain. MutableHandle<JSObject> propObj{runtime}; if (LLVM_UNLIKELY( getComputedPrimitiveDescriptor( selfHandle, runtime, nameValPrimitiveHandle, propObj, desc) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } // If the property exists (or, we hit a proxy/hostobject on the way // up the chain) if (propObj) { // Get the simple case out of the way: If the property already // exists on selfHandle, is not an accessor, selfHandle and // receiver are the same, selfHandle is not a host // object/proxy/internal setter, and the property is writable, // just write into the same slot. if (LLVM_LIKELY( selfHandle == propObj && selfHandle.getHermesValue().getRaw() == receiver->getRaw() && !desc.flags.accessor && !desc.flags.internalSetter && !desc.flags.hostObject && !desc.flags.proxyObject && desc.flags.writable)) { if (LLVM_UNLIKELY( setComputedSlotValue(selfHandle, runtime, desc, valueHandle) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return true; } // Is it an accessor? if (LLVM_UNLIKELY(desc.flags.accessor)) { auto *accessor = vmcast<PropertyAccessor>( getComputedSlotValue(propObj.get(), runtime, desc)); // If it is a read-only accessor, fail. if (!accessor->setter) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeErrorForValue( "Cannot assign to property ", nameValPrimitiveHandle, " which has only a getter"); } return false; } // Execute the accessor on this object. if (accessor->setter.get(runtime)->executeCall1( runtime->makeHandle(accessor->setter), runtime, receiver, valueHandle.get()) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } return true; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { assert( !opFlags.getMustExist() && "MustExist cannot be used with Proxy objects"); CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; CallResult<bool> setRes = JSProxy::setComputed(propObj, runtime, *key, valueHandle, receiver); if (LLVM_UNLIKELY(setRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*setRes && opFlags.getThrowOnError()) { // TODO: better message. return runtime->raiseTypeError( TwineChar16("Proxy trap returned false for property")); } return setRes; } if (LLVM_UNLIKELY(!desc.flags.writable)) { if (desc.flags.staticBuiltin) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); return raiseErrorForOverridingStaticBuiltin( selfHandle, runtime, runtime->makeHandle(id)); } if (opFlags.getThrowOnError()) { return runtime->raiseTypeErrorForValue( "Cannot assign to read-only property ", nameValPrimitiveHandle, ""); } return false; } if (selfHandle == propObj && desc.flags.internalSetter) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); return internalSetter( selfHandle, runtime, id, desc.castToNamedPropertyDescriptorRef(), valueHandle, opFlags); } } // The property does not exist as an conventional own property on // this object. MutableHandle<JSObject> receiverHandle{runtime, *selfHandle}; if (selfHandle.getHermesValue().getRaw() != receiver->getRaw() || receiverHandle->isHostObject() || receiverHandle->isProxyObject()) { if (selfHandle.getHermesValue().getRaw() != receiver->getRaw()) { receiverHandle = dyn_vmcast<JSObject>(*receiver); } if (!receiverHandle) { return false; } CallResult<bool> descDefinedRes = getOwnComputedPrimitiveDescriptor( receiverHandle, runtime, nameValPrimitiveHandle, IgnoreProxy::No, desc); if (LLVM_UNLIKELY(descDefinedRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } DefinePropertyFlags dpf; if (*descDefinedRes) { if (LLVM_UNLIKELY(desc.flags.accessor || !desc.flags.writable)) { return false; } if (LLVM_LIKELY( !desc.flags.internalSetter && !receiverHandle->isHostObject() && !receiverHandle->isProxyObject())) { if (LLVM_UNLIKELY( setComputedSlotValue( receiverHandle, runtime, desc, valueHandle) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return true; } } if (LLVM_UNLIKELY( desc.flags.internalSetter || receiverHandle->isHostObject() || receiverHandle->isProxyObject())) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); if (desc.flags.internalSetter) { return internalSetter( receiverHandle, runtime, id, desc.castToNamedPropertyDescriptorRef(), valueHandle, opFlags); } else if (receiverHandle->isHostObject()) { return vmcast<HostObject>(receiverHandle.get())->set(id, *valueHandle); } assert( receiverHandle->isProxyObject() && "descriptor flags are impossible"); if (*descDefinedRes) { dpf.setValue = 1; } else { dpf = DefinePropertyFlags::getDefaultNewPropertyFlags(); } return JSProxy::defineOwnProperty( receiverHandle, runtime, id, dpf, valueHandle, opFlags); } } /// Can we add more properties? if (LLVM_UNLIKELY(!receiverHandle->isExtensible())) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "cannot add a new property"); // TODO: better message. } return false; } // If we have indexed storage we must check whether the property is an index, // and if it is, store it in indexed storage. if (receiverHandle->flags_.indexedStorage) { OptValue<uint32_t> arrayIndex; MutableHandle<StringPrimitive> strPrim{runtime}; TO_ARRAY_INDEX(runtime, nameValPrimitiveHandle, strPrim, arrayIndex); if (arrayIndex) { // Check whether we need to update array's ".length" property. if (auto *array = dyn_vmcast<JSArray>(receiverHandle.get())) { if (LLVM_UNLIKELY(*arrayIndex >= JSArray::getLength(array))) { auto cr = putNamed_RJS( receiverHandle, runtime, Predefined::getSymbolID(Predefined::length), runtime->makeHandle( HermesValue::encodeNumberValue(*arrayIndex + 1)), opFlags); if (LLVM_UNLIKELY(cr == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (LLVM_UNLIKELY(!*cr)) return false; } } auto result = setOwnIndexed(receiverHandle, runtime, *arrayIndex, valueHandle); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (LLVM_LIKELY(*result)) return true; if (opFlags.getThrowOnError()) { // TODO: better message. return runtime->raiseTypeError("Cannot assign to read-only property"); } return false; } } SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); // Add a new named property. return addOwnProperty( receiverHandle, runtime, id, DefinePropertyFlags::getDefaultNewPropertyFlags(), valueHandle, opFlags); } CallResult<bool> JSObject::deleteNamed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "mustExist cannot be specified when deleting"); // Find the property by name. NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, name, desc); // If the property doesn't exist in this object, return success. if (!pos) { if (LLVM_LIKELY( !selfHandle->flags_.lazyObject && !selfHandle->flags_.proxyObject)) { return true; } else if (selfHandle->flags_.lazyObject) { // object is lazy, initialize and read again. initializeLazyObject(runtime, selfHandle); pos = findProperty(selfHandle, runtime, name, desc); if (!pos) // still not there, return true. return true; } else { assert(selfHandle->flags_.proxyObject && "object flags are impossible"); return proxyOpFlags( runtime, opFlags, "Proxy delete returned false", JSProxy::deleteNamed(selfHandle, runtime, name)); } } // If the property isn't configurable, fail. if (LLVM_UNLIKELY(!desc.flags.configurable)) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' is not configurable"); } return false; } // Clear the deleted property value to prevent memory leaks. setNamedSlotValue( *selfHandle, runtime, desc, HermesValue::encodeEmptyValue()); // Perform the actual deletion. auto newClazz = HiddenClass::deleteProperty( runtime->makeHandle(selfHandle->clazz_), runtime, *pos); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); return true; } CallResult<bool> JSObject::deleteComputed( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "mustExist cannot be specified when deleting"); // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; // If the name is a valid integer array index, store it here. OptValue<uint32_t> arrayIndex; // If we have indexed storage, we must attempt to convert the name to array // index, even if the conversion is expensive. if (selfHandle->flags_.indexedStorage) { MutableHandle<StringPrimitive> strPrim{runtime}; TO_ARRAY_INDEX(runtime, nameValPrimitiveHandle, strPrim, arrayIndex); } // Try the fast-path first: the "name" is a valid array index and we don't // have "index-like" named properties. if (arrayIndex && selfHandle->flags_.fastIndexProperties) { // Delete the indexed property. if (deleteOwnIndexed(selfHandle, runtime, *arrayIndex)) return true; // Cannot delete property (for example this may be a typed array). if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot delete property"); } return false; } // slow path, check if object is lazy before continuing. if (LLVM_UNLIKELY(selfHandle->flags_.lazyObject)) { // initialize and try again. initializeLazyObject(runtime, selfHandle); return deleteComputed(selfHandle, runtime, nameValHandle, opFlags); } // Convert the string to an SymbolID; SymbolID id; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); // Find the property by name. NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, id, desc); // If the property exists, make sure it is configurable. if (pos) { // If the property isn't configurable, fail. if (LLVM_UNLIKELY(!desc.flags.configurable)) { if (opFlags.getThrowOnError()) { // TODO: a better message. return runtime->raiseTypeError("Property is not configurable"); } return false; } } // At this point we know that the named property either doesn't exist, or // is configurable and so can be deleted, or the object is a Proxy. // If it is an "index-like" property, we must also delete the "shadow" indexed // property in order to keep Array.length correct. if (arrayIndex) { if (!deleteOwnIndexed(selfHandle, runtime, *arrayIndex)) { // Cannot delete property (for example this may be a typed array). if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot delete property"); } return false; } } if (pos) { // delete the named property (if it exists). // Clear the deleted property value to prevent memory leaks. setNamedSlotValue( *selfHandle, runtime, desc, HermesValue::encodeEmptyValue()); // Remove the property descriptor. auto newClazz = HiddenClass::deleteProperty( runtime->makeHandle(selfHandle->clazz_), runtime, *pos); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); } else if (LLVM_UNLIKELY(selfHandle->flags_.proxyObject)) { CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return proxyOpFlags( runtime, opFlags, "Proxy delete returned false", JSProxy::deleteComputed(selfHandle, runtime, *key)); } return true; } CallResult<bool> JSObject::defineOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "cannot use mustExist with defineOwnProperty"); assert( !(dpFlags.setValue && dpFlags.isAccessor()) && "Cannot set both value and accessor"); assert( (dpFlags.setValue || dpFlags.isAccessor() || valueOrAccessor.get().isUndefined()) && "value must be undefined when all of setValue/setSetter/setGetter are " "false"); #ifndef NDEBUG if (dpFlags.isAccessor()) { assert(valueOrAccessor.get().isPointer() && "accessor must be non-empty"); assert( !dpFlags.setWritable && !dpFlags.writable && "writable must not be set with accessors"); } #endif // Is it an existing property. NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, name, desc); if (pos) { return updateOwnProperty( selfHandle, runtime, name, *pos, desc, dpFlags, valueOrAccessor, opFlags); } if (LLVM_UNLIKELY( selfHandle->flags_.lazyObject || selfHandle->flags_.proxyObject)) { if (selfHandle->flags_.proxyObject) { return JSProxy::defineOwnProperty( selfHandle, runtime, name, dpFlags, valueOrAccessor, opFlags); } assert(selfHandle->flags_.lazyObject && "descriptor flags are impossible"); // if the property was not found and the object is lazy we need to // initialize it and try again. JSObject::initializeLazyObject(runtime, selfHandle); return defineOwnProperty( selfHandle, runtime, name, dpFlags, valueOrAccessor, opFlags); } return addOwnProperty( selfHandle, runtime, name, dpFlags, valueOrAccessor, opFlags); } ExecutionStatus JSObject::defineNewOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropertyFlags propertyFlags, Handle<> valueOrAccessor) { assert( !selfHandle->flags_.proxyObject && "definedNewOwnProperty cannot be used with proxy objects"); assert( !(propertyFlags.accessor && !valueOrAccessor.get().isPointer()) && "accessor must be non-empty"); assert( !(propertyFlags.accessor && propertyFlags.writable) && "writable must not be set with accessors"); assert( !HiddenClass::debugIsPropertyDefined( selfHandle->clazz_.get(runtime), runtime, name) && "new property is already defined"); return addOwnPropertyImpl( selfHandle, runtime, name, propertyFlags, valueOrAccessor); } CallResult<bool> JSObject::defineOwnComputedPrimitive( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { assert( !nameValHandle->isObject() && "nameValHandle passed to " "defineOwnComputedPrimitive() cannot be " "an object"); assert( !opFlags.getMustExist() && "cannot use mustExist with defineOwnProperty"); assert( !(dpFlags.setValue && dpFlags.isAccessor()) && "Cannot set both value and accessor"); assert( (dpFlags.setValue || dpFlags.isAccessor() || valueOrAccessor.get().isUndefined()) && "value must be undefined when all of setValue/setSetter/setGetter are " "false"); assert( !dpFlags.enableInternalSetter && "Cannot set internalSetter on a computed property"); #ifndef NDEBUG if (dpFlags.isAccessor()) { assert(valueOrAccessor.get().isPointer() && "accessor must be non-empty"); assert( !dpFlags.setWritable && !dpFlags.writable && "writable must not be set with accessors"); } #endif // If the name is a valid integer array index, store it here. OptValue<uint32_t> arrayIndex; // If we have indexed storage, we must attempt to convert the name to array // index, even if the conversion is expensive. if (selfHandle->flags_.indexedStorage) { MutableHandle<StringPrimitive> strPrim{runtime}; TO_ARRAY_INDEX(runtime, nameValHandle, strPrim, arrayIndex); } SymbolID id{}; // If not storing a property with an array index name, or if we don't have // indexed storage, just pass to the named routine. if (!arrayIndex) { LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); return defineOwnProperty( selfHandle, runtime, id, dpFlags, valueOrAccessor, opFlags); } // At this point we know that we have indexed storage and that the property // has an index-like name. // First check if a named property with the same name exists. if (selfHandle->clazz_.get(runtime)->getHasIndexLikeProperties()) { LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, id, desc); // If we found a named property, update it. if (pos) { return updateOwnProperty( selfHandle, runtime, id, *pos, desc, dpFlags, valueOrAccessor, opFlags); } } // Does an indexed property with that index exist? auto indexedPropPresent = getOwnIndexedPropertyFlags(selfHandle.get(), runtime, *arrayIndex); if (indexedPropPresent) { // The current value of the property. HermesValue curValueOrAccessor = getOwnIndexed(selfHandle.get(), runtime, *arrayIndex); auto updateStatus = checkPropertyUpdate( runtime, *indexedPropPresent, dpFlags, curValueOrAccessor, valueOrAccessor, opFlags); if (updateStatus == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; if (updateStatus->first == PropertyUpdateStatus::failed) return false; // The property update is valid, but can the property remain an "indexed" // property, or do we need to convert it to a named property? // If the property flags didn't change, the property remains indexed. if (updateStatus->second == *indexedPropPresent) { // If the value doesn't change, we are done. if (updateStatus->first == PropertyUpdateStatus::done) return true; // If we successfully updated the value, we are done. auto result = setOwnIndexed(selfHandle, runtime, *arrayIndex, valueOrAccessor); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (*result) return true; if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError( "cannot change read-only property value"); } return false; } // OK, we need to convert an indexed property to a named one. // Check whether to use the supplied value, or to reuse the old one, as we // are simply reconfiguring it. MutableHandle<> value{runtime}; if (dpFlags.setValue || dpFlags.isAccessor()) { value = valueOrAccessor.get(); } else { value = curValueOrAccessor; } // Update dpFlags to match the existing property flags. dpFlags.setEnumerable = 1; dpFlags.setWritable = 1; dpFlags.setConfigurable = 1; dpFlags.enumerable = updateStatus->second.enumerable; dpFlags.writable = updateStatus->second.writable; dpFlags.configurable = updateStatus->second.configurable; // Delete the existing indexed property. if (!deleteOwnIndexed(selfHandle, runtime, *arrayIndex)) { if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot define property"); } return false; } // Add the new named property. LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); return addOwnProperty(selfHandle, runtime, id, dpFlags, value, opFlags); } /// Can we add new properties? if (!selfHandle->isExtensible()) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "cannot add a new property"); // TODO: better message. } return false; } // This is a new property with an index-like name. // Check whether we need to update array's ".length" property. bool updateLength = false; if (auto arrayHandle = Handle<JSArray>::dyn_vmcast(selfHandle)) { if (LLVM_UNLIKELY(*arrayIndex >= JSArray::getLength(*arrayHandle))) { NamedPropertyDescriptor lengthDesc; bool lengthPresent = getOwnNamedDescriptor( arrayHandle, runtime, Predefined::getSymbolID(Predefined::length), lengthDesc); (void)lengthPresent; assert(lengthPresent && ".length must be present in JSArray"); if (!lengthDesc.flags.writable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "Cannot assign to read-only 'length' property of array"); } return false; } updateLength = true; } } bool newIsIndexed = canNewPropertyBeIndexed(dpFlags); if (newIsIndexed) { auto result = setOwnIndexed( selfHandle, runtime, *arrayIndex, dpFlags.setValue ? valueOrAccessor : Runtime::getUndefinedValue()); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (!*result) { if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot define property"); } return false; } } // If this is an array and we need to update ".length", do so. if (updateLength) { // This should always succeed since we are simply enlarging the length. auto res = JSArray::setLength( Handle<JSArray>::vmcast(selfHandle), runtime, *arrayIndex + 1, opFlags); (void)res; assert( res != ExecutionStatus::EXCEPTION && *res && "JSArray::setLength() failed unexpectedly"); } if (newIsIndexed) return true; // We are adding a new property with an index-like name. LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); return addOwnProperty( selfHandle, runtime, id, dpFlags, valueOrAccessor, opFlags); } CallResult<bool> JSObject::defineOwnComputed( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; return defineOwnComputedPrimitive( selfHandle, runtime, *converted, dpFlags, valueOrAccessor, opFlags); } std::string JSObject::getHeuristicTypeName(GC *gc) { PointerBase *const base = gc->getPointerBase(); if (auto constructorVal = tryGetNamedNoAlloc( this, base, Predefined::getSymbolID(Predefined::constructor))) { if (auto *constructor = dyn_vmcast<JSObject>(*constructorVal)) { auto name = constructor->getNameIfExists(base); // If the constructor's name doesn't exist, or it is just the object // constructor, attempt to find a different name. if (!name.empty() && name != "Object") return name; } } std::string name = getVT()->base.snapshotMetaData.defaultNameForNode(this); // A constructor's name was not found, check if the object is in dictionary // mode. if (getClass(base)->isDictionary()) { return name + "(Dictionary)"; } // If it's not an Object, the CellKind is most likely good enough on its own if (getKind() != CellKind::ObjectKind) { return name; } // If the object isn't a dictionary, and it has only a few property names, // make the name based on those property names. std::vector<std::string> propertyNames; HiddenClass::forEachPropertyNoAlloc( getClass(base), base, [gc, &propertyNames](SymbolID id, NamedPropertyDescriptor) { if (InternalProperty::isInternal(id)) { // Internal properties aren't user-visible, skip them. return; } propertyNames.emplace_back(gc->convertSymbolToUTF8(id)); }); // NOTE: One option is to sort the property names before truncation, to // reduce the number of groups; however, by not sorting them it makes it // easier to spot sets of objects with the same properties but in different // orders, and thus find HiddenClass optimizations to make. // For objects with a lot of properties but aren't in dictionary mode yet, // keep the number displayed small. constexpr int kMaxPropertiesForTypeName = 5; bool truncated = false; if (propertyNames.size() > kMaxPropertiesForTypeName) { propertyNames.erase( propertyNames.begin() + kMaxPropertiesForTypeName, propertyNames.end()); truncated = true; } // The final name should look like Object(a, b, c). if (propertyNames.empty()) { // Don't add parentheses for objects with no properties. return name; } name += "("; bool first = true; for (const auto &prop : propertyNames) { if (!first) { name += ", "; } first = false; name += prop; } if (truncated) { // No need to check for comma edge case because this only happens for // greater than one property. static_assert( kMaxPropertiesForTypeName >= 1, "Property truncation should not happen for 0 properties"); name += ", ..."; } name += ")"; return name; } std::string JSObject::getNameIfExists(PointerBase *base) { // Try "displayName" first, if it is defined. if (auto nameVal = tryGetNamedNoAlloc( this, base, Predefined::getSymbolID(Predefined::displayName))) { if (auto *name = dyn_vmcast<StringPrimitive>(*nameVal)) { return converter(name); } } // Next, use "name" if it is defined. if (auto nameVal = tryGetNamedNoAlloc( this, base, Predefined::getSymbolID(Predefined::name))) { if (auto *name = dyn_vmcast<StringPrimitive>(*nameVal)) { return converter(name); } } // There is no other way to access the "name" property on an object. return ""; } std::string JSObject::_snapshotNameImpl(GCCell *cell, GC *gc) { auto *const self = vmcast<JSObject>(cell); return self->getHeuristicTypeName(gc); } void JSObject::_snapshotAddEdgesImpl(GCCell *cell, GC *gc, HeapSnapshot &snap) { auto *const self = vmcast<JSObject>(cell); // Add the prototype as a property edge, so it's easy for JS developers to // walk the prototype chain on their own. if (self->parent_) { snap.addNamedEdge( HeapSnapshot::EdgeType::Property, // __proto__ chosen for similarity to V8. "__proto__", gc->getObjectID(self->parent_)); } HiddenClass::forEachPropertyNoAlloc( self->clazz_.get(gc->getPointerBase()), gc->getPointerBase(), [self, gc, &snap](SymbolID id, NamedPropertyDescriptor desc) { if (InternalProperty::isInternal(id)) { // Internal properties aren't user-visible, skip them. return; } // Else, it's a user-visible property. GCHermesValue &prop = namedSlotRef(self, gc->getPointerBase(), desc.slot); const llvh::Optional<HeapSnapshot::NodeID> idForProp = gc->getSnapshotID(prop); if (!idForProp) { return; } std::string propName = gc->convertSymbolToUTF8(id); // If the property name is a valid array index, display it as an // "element" instead of a "property". This will put square brackets // around the number and sort it numerically rather than // alphabetically. if (auto index = ::hermes::toArrayIndex(propName)) { snap.addIndexedEdge( HeapSnapshot::EdgeType::Element, index.getValue(), idForProp.getValue()); } else { snap.addNamedEdge( HeapSnapshot::EdgeType::Property, propName, idForProp.getValue()); } }); } void JSObject::_snapshotAddLocationsImpl( GCCell *cell, GC *gc, HeapSnapshot &snap) { auto *const self = vmcast<JSObject>(cell); PointerBase *const base = gc->getPointerBase(); // Add the location of the constructor function for this object, if that // constructor is a user-defined JS function. if (auto constructorVal = tryGetNamedNoAlloc( self, base, Predefined::getSymbolID(Predefined::constructor))) { if (constructorVal->isObject()) { if (auto *constructor = dyn_vmcast<JSFunction>(*constructorVal)) { constructor->addLocationToSnapshot(snap, gc->getObjectID(self)); } } } } std::pair<uint32_t, uint32_t> JSObject::_getOwnIndexedRangeImpl( JSObject *self, Runtime *runtime) { return {0, 0}; } bool JSObject::_haveOwnIndexedImpl(JSObject *self, Runtime *, uint32_t) { return false; } OptValue<PropertyFlags> JSObject::_getOwnIndexedPropertyFlagsImpl( JSObject *self, Runtime *runtime, uint32_t) { return llvh::None; } HermesValue JSObject::_getOwnIndexedImpl(JSObject *, Runtime *, uint32_t) { return HermesValue::encodeEmptyValue(); } CallResult<bool> JSObject::_setOwnIndexedImpl(Handle<JSObject>, Runtime *, uint32_t, Handle<>) { return false; } bool JSObject::_deleteOwnIndexedImpl(Handle<JSObject>, Runtime *, uint32_t) { return false; } bool JSObject::_checkAllOwnIndexedImpl( JSObject * /*self*/, Runtime * /*runtime*/, ObjectVTable::CheckAllOwnIndexedMode /*mode*/) { return true; } void JSObject::preventExtensions(JSObject *self) { assert( !self->flags_.proxyObject && "[[Extensible]] slot cannot be set directly on Proxy objects"); self->flags_.noExtend = true; } CallResult<bool> JSObject::preventExtensions( Handle<JSObject> selfHandle, Runtime *runtime, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->isProxyObject())) { return JSProxy::preventExtensions(selfHandle, runtime, opFlags); } JSObject::preventExtensions(*selfHandle); return true; } ExecutionStatus JSObject::seal(Handle<JSObject> selfHandle, Runtime *runtime) { CallResult<bool> statusRes = JSObject::preventExtensions( selfHandle, runtime, PropOpFlags().plusThrowOnError()); if (LLVM_UNLIKELY(statusRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } assert( *statusRes && "seal preventExtensions with ThrowOnError returned false"); // Already sealed? if (selfHandle->flags_.sealed) return ExecutionStatus::RETURNED; auto newClazz = HiddenClass::makeAllNonConfigurable( runtime->makeHandle(selfHandle->clazz_), runtime); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); selfHandle->flags_.sealed = true; return ExecutionStatus::RETURNED; } ExecutionStatus JSObject::freeze( Handle<JSObject> selfHandle, Runtime *runtime) { CallResult<bool> statusRes = JSObject::preventExtensions( selfHandle, runtime, PropOpFlags().plusThrowOnError()); if (LLVM_UNLIKELY(statusRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } assert( *statusRes && "freeze preventExtensions with ThrowOnError returned false"); // Already frozen? if (selfHandle->flags_.frozen) return ExecutionStatus::RETURNED; auto newClazz = HiddenClass::makeAllReadOnly( runtime->makeHandle(selfHandle->clazz_), runtime); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); selfHandle->flags_.frozen = true; selfHandle->flags_.sealed = true; return ExecutionStatus::RETURNED; } void JSObject::updatePropertyFlagsWithoutTransitions( Handle<JSObject> selfHandle, Runtime *runtime, PropertyFlags flagsToClear, PropertyFlags flagsToSet, OptValue<llvh::ArrayRef<SymbolID>> props) { auto newClazz = HiddenClass::updatePropertyFlagsWithoutTransitions( runtime->makeHandle(selfHandle->clazz_), runtime, flagsToClear, flagsToSet, props); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); } CallResult<bool> JSObject::isExtensible( PseudoHandle<JSObject> self, Runtime *runtime) { if (LLVM_UNLIKELY(self->isProxyObject())) { return JSProxy::isExtensible(runtime->makeHandle(std::move(self)), runtime); } return self->isExtensible(); } bool JSObject::isSealed(PseudoHandle<JSObject> self, Runtime *runtime) { if (self->flags_.sealed) return true; if (!self->flags_.noExtend) return false; auto selfHandle = runtime->makeHandle(std::move(self)); if (!HiddenClass::areAllNonConfigurable( runtime->makeHandle(selfHandle->clazz_), runtime)) { return false; } if (!checkAllOwnIndexed( *selfHandle, runtime, ObjectVTable::CheckAllOwnIndexedMode::NonConfigurable)) { return false; } // Now that we know we are sealed, set the flag. selfHandle->flags_.sealed = true; return true; } bool JSObject::isFrozen(PseudoHandle<JSObject> self, Runtime *runtime) { if (self->flags_.frozen) return true; if (!self->flags_.noExtend) return false; auto selfHandle = runtime->makeHandle(std::move(self)); if (!HiddenClass::areAllReadOnly( runtime->makeHandle(selfHandle->clazz_), runtime)) { return false; } if (!checkAllOwnIndexed( *selfHandle, runtime, ObjectVTable::CheckAllOwnIndexedMode::ReadOnly)) { return false; } // Now that we know we are sealed, set the flag. selfHandle->flags_.frozen = true; selfHandle->flags_.sealed = true; return true; } CallResult<bool> JSObject::addOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { /// Can we add more properties? if (!selfHandle->isExtensible() && !opFlags.getInternalForce()) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Cannot add new property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "'"); } return false; } PropertyFlags flags{}; // Accessors don't set writeable. if (dpFlags.isAccessor()) { dpFlags.setWritable = 0; flags.accessor = 1; } // Override the default flags if specified. if (dpFlags.setEnumerable) flags.enumerable = dpFlags.enumerable; if (dpFlags.setWritable) flags.writable = dpFlags.writable; if (dpFlags.setConfigurable) flags.configurable = dpFlags.configurable; flags.internalSetter = dpFlags.enableInternalSetter; if (LLVM_UNLIKELY( addOwnPropertyImpl( selfHandle, runtime, name, flags, valueOrAccessor) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return true; } ExecutionStatus JSObject::addOwnPropertyImpl( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropertyFlags propertyFlags, Handle<> valueOrAccessor) { assert( !selfHandle->flags_.proxyObject && "Internal properties cannot be added to Proxy objects"); // Add a new property to the class. // TODO: if we check for OOM here in the future, we must undo the slot // allocation. auto addResult = HiddenClass::addProperty( runtime->makeHandle(selfHandle->clazz_), runtime, name, propertyFlags); if (LLVM_UNLIKELY(addResult == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } selfHandle->clazz_.set(runtime, *addResult->first, &runtime->getHeap()); allocateNewSlotStorage( selfHandle, runtime, addResult->second, valueOrAccessor); // If this is an index-like property, we need to clear the fast path flags. if (LLVM_UNLIKELY( selfHandle->clazz_.getNonNull(runtime)->getHasIndexLikeProperties())) selfHandle->flags_.fastIndexProperties = false; return ExecutionStatus::RETURNED; } CallResult<bool> JSObject::updateOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, HiddenClass::PropertyPos propertyPos, NamedPropertyDescriptor desc, const DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { auto updateStatus = checkPropertyUpdate( runtime, desc.flags, dpFlags, getNamedSlotValue(selfHandle.get(), runtime, desc), valueOrAccessor, opFlags); if (updateStatus == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; if (updateStatus->first == PropertyUpdateStatus::failed) return false; // If the property flags changed, update them. if (updateStatus->second != desc.flags) { desc.flags = updateStatus->second; auto newClazz = HiddenClass::updateProperty( runtime->makeHandle(selfHandle->clazz_), runtime, propertyPos, desc.flags); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); } if (updateStatus->first == PropertyUpdateStatus::done) return true; assert( updateStatus->first == PropertyUpdateStatus::needSet && "unexpected PropertyUpdateStatus"); if (dpFlags.setValue) { if (LLVM_LIKELY(!desc.flags.internalSetter)) setNamedSlotValue(selfHandle.get(), runtime, desc, valueOrAccessor.get()); else return internalSetter( selfHandle, runtime, name, desc, valueOrAccessor, opFlags); } else if (dpFlags.isAccessor()) { setNamedSlotValue(selfHandle.get(), runtime, desc, valueOrAccessor.get()); } else { // If checkPropertyUpdate() returned needSet, but there is no value or // accessor, clear the value. setNamedSlotValue( selfHandle.get(), runtime, desc, HermesValue::encodeUndefinedValue()); } return true; } CallResult<std::pair<JSObject::PropertyUpdateStatus, PropertyFlags>> JSObject::checkPropertyUpdate( Runtime *runtime, const PropertyFlags currentFlags, DefinePropertyFlags dpFlags, const HermesValue curValueOrAccessor, Handle<> valueOrAccessor, PropOpFlags opFlags) { // 8.12.9 [5] Return true, if every field in Desc is absent. if (dpFlags.isEmpty()) return std::make_pair(PropertyUpdateStatus::done, currentFlags); assert( (!dpFlags.isAccessor() || (!dpFlags.setWritable && !dpFlags.writable)) && "can't set both accessor and writable"); assert( !dpFlags.enableInternalSetter && "cannot change the value of internalSetter"); // 8.12.9 [6] Return true, if every field in Desc also occurs in current and // the value of every field in Desc is the same value as the corresponding // field in current when compared using the SameValue algorithm (9.12). // TODO: this would probably be much more efficient with bitmasks. if ((!dpFlags.setEnumerable || dpFlags.enumerable == currentFlags.enumerable) && (!dpFlags.setConfigurable || dpFlags.configurable == currentFlags.configurable)) { if (dpFlags.isAccessor()) { if (currentFlags.accessor) { auto *curAccessor = vmcast<PropertyAccessor>(curValueOrAccessor); auto *newAccessor = vmcast<PropertyAccessor>(valueOrAccessor.get()); if ((!dpFlags.setGetter || curAccessor->getter == newAccessor->getter) && (!dpFlags.setSetter || curAccessor->setter == newAccessor->setter)) { return std::make_pair(PropertyUpdateStatus::done, currentFlags); } } } else { if (!currentFlags.accessor && (!dpFlags.setValue || isSameValue(curValueOrAccessor, valueOrAccessor.get())) && (!dpFlags.setWritable || dpFlags.writable == currentFlags.writable)) { return std::make_pair(PropertyUpdateStatus::done, currentFlags); } } } // 8.12.9 [7] // If the property is not configurable, some aspects are not changeable. if (!currentFlags.configurable) { // Trying to change non-configurable to configurable? if (dpFlags.configurable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } // Trying to change the enumerability of non-configurable property? if (dpFlags.setEnumerable && dpFlags.enumerable != currentFlags.enumerable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } } PropertyFlags newFlags = currentFlags; // 8.12.9 [8] If IsGenericDescriptor(Desc) is true, then no further validation // is required. if (!(dpFlags.setValue || dpFlags.setWritable || dpFlags.setGetter || dpFlags.setSetter)) { // Do nothing } // 8.12.9 [9] // Changing between accessor and data descriptor? else if (currentFlags.accessor != dpFlags.isAccessor()) { if (!currentFlags.configurable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } // If we change from accessor to data descriptor, Preserve the existing // values of the converted property’s [[Configurable]] and [[Enumerable]] // attributes and set the rest of the property’s attributes to their default // values. // If it's the other way around, since the accessor doesn't have the // [[Writable]] attribute, do nothing. newFlags.writable = 0; // If we are changing from accessor to non-accessor, we must set a new // value. if (!dpFlags.isAccessor()) dpFlags.setValue = 1; } // 8.12.9 [10] if both are data descriptors. else if (!currentFlags.accessor) { if (!currentFlags.configurable) { if (!currentFlags.writable) { // If the current property is not writable, but the new one is. if (dpFlags.writable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } // If we are setting a different value. if (dpFlags.setValue && !isSameValue(curValueOrAccessor, valueOrAccessor.get())) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not writable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } } } } // 8.12.9 [11] Both are accessors. else { auto *curAccessor = vmcast<PropertyAccessor>(curValueOrAccessor); auto *newAccessor = vmcast<PropertyAccessor>(valueOrAccessor.get()); // If not configurable, make sure that nothing is changing. if (!currentFlags.configurable) { if ((dpFlags.setGetter && newAccessor->getter != curAccessor->getter) || (dpFlags.setSetter && newAccessor->setter != curAccessor->setter)) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } } // If not setting the getter or the setter, re-use the current one. if (!dpFlags.setGetter) newAccessor->getter.set( runtime, curAccessor->getter, &runtime->getHeap()); if (!dpFlags.setSetter) newAccessor->setter.set( runtime, curAccessor->setter, &runtime->getHeap()); } // 8.12.9 [12] For each attribute field of Desc that is present, set the // correspondingly named attribute of the property named P of object O to the // value of the field. if (dpFlags.setEnumerable) newFlags.enumerable = dpFlags.enumerable; if (dpFlags.setWritable) newFlags.writable = dpFlags.writable; if (dpFlags.setConfigurable) newFlags.configurable = dpFlags.configurable; if (dpFlags.setValue) newFlags.accessor = false; else if (dpFlags.isAccessor()) newFlags.accessor = true; else return std::make_pair(PropertyUpdateStatus::done, newFlags); return std::make_pair(PropertyUpdateStatus::needSet, newFlags); } CallResult<bool> JSObject::internalSetter( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, NamedPropertyDescriptor /*desc*/, Handle<> value, PropOpFlags opFlags) { if (vmisa<JSArray>(selfHandle.get())) { if (name == Predefined::getSymbolID(Predefined::length)) { return JSArray::setLength( Handle<JSArray>::vmcast(selfHandle), runtime, value, opFlags); } } llvm_unreachable("unhandled property in Object::internalSetter()"); } namespace { /// Helper function to add all the property names of an object to an /// array, starting at the given index. Only enumerable properties are /// incluced. Returns the index after the last property added, but... CallResult<uint32_t> appendAllPropertyNames( Handle<JSObject> obj, Runtime *runtime, MutableHandle<BigStorage> &arr, uint32_t beginIndex) { uint32_t size = beginIndex; // We know that duplicate property names can only exist between objects in // the prototype chain. Hence there should not be duplicated properties // before we start to look at any prototype. bool needDedup = false; MutableHandle<> prop(runtime); MutableHandle<JSObject> head(runtime, obj.get()); MutableHandle<StringPrimitive> tmpVal{runtime}; while (head.get()) { GCScope gcScope(runtime); // enumerableProps will contain all enumerable own properties from obj. // Impl note: this is the only place where getOwnPropertyKeys will be // called without IncludeNonEnumerable on a Proxy. Everywhere else, // trap ordering is specified but ES9 13.7.5.15 says "The mechanics and // order of enumerating the properties is not specified", which is // unusual. auto cr = JSObject::getOwnPropertyNames(head, runtime, true /* onlyEnumerable */); if (LLVM_UNLIKELY(cr == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto enumerableProps = *cr; auto marker = gcScope.createMarker(); for (unsigned i = 0, e = enumerableProps->getEndIndex(); i < e; ++i) { gcScope.flushToMarker(marker); prop = enumerableProps->at(runtime, i); if (!needDedup) { // If no dedup is needed, add it directly. if (LLVM_UNLIKELY( BigStorage::push_back(arr, runtime, prop) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } ++size; continue; } // Otherwise loop through all existing properties and check if we // have seen it before. bool dupFound = false; if (prop->isNumber()) { for (uint32_t j = beginIndex; j < size && !dupFound; ++j) { HermesValue val = arr->at(j); if (val.isNumber()) { dupFound = val.getNumber() == prop->getNumber(); } else { // val is string, prop is number. tmpVal = val.getString(); auto valNum = toArrayIndex( StringPrimitive::createStringView(runtime, tmpVal)); dupFound = valNum && valNum.getValue() == prop->getNumber(); } } } else { for (uint32_t j = beginIndex; j < size && !dupFound; ++j) { HermesValue val = arr->at(j); if (val.isNumber()) { // val is number, prop is string. auto propNum = toArrayIndex(StringPrimitive::createStringView( runtime, Handle<StringPrimitive>::vmcast(prop))); dupFound = propNum && (propNum.getValue() == val.getNumber()); } else { dupFound = val.getString()->equals(prop->getString()); } } } if (LLVM_LIKELY(!dupFound)) { if (LLVM_UNLIKELY( BigStorage::push_back(arr, runtime, prop) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } ++size; } } // Continue to follow the prototype chain. CallResult<PseudoHandle<JSObject>> parentRes = JSObject::getPrototypeOf(head, runtime); if (LLVM_UNLIKELY(parentRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } head = parentRes->get(); needDedup = true; } return size; } /// Adds the hidden classes of the prototype chain of obj to arr, /// starting with the prototype of obj at index 0, etc., and /// terminates with null. /// /// \param obj The object whose prototype chain should be output /// \param[out] arr The array where the classes will be appended. This /// array is cleared if any object is unsuitable for caching. ExecutionStatus setProtoClasses( Runtime *runtime, Handle<JSObject> obj, MutableHandle<BigStorage> &arr) { // Layout of a JSArray stored in the for-in cache: // [class(proto(obj)), class(proto(proto(obj))), ..., null, prop0, prop1, ...] if (!obj->shouldCacheForIn(runtime)) { arr->clear(runtime); return ExecutionStatus::RETURNED; } MutableHandle<JSObject> head(runtime, obj->getParent(runtime)); MutableHandle<> clazz(runtime); GCScopeMarkerRAII marker{runtime}; while (head.get()) { if (!head->shouldCacheForIn(runtime)) { arr->clear(runtime); return ExecutionStatus::RETURNED; } if (JSObject::Helper::flags(*head).lazyObject) { // Ensure all properties have been initialized before caching the hidden // class. Not doing this will result in changes to the hidden class // when getOwnPropertyKeys is called later. JSObject::initializeLazyObject(runtime, head); } clazz = HermesValue::encodeObjectValue(head->getClass(runtime)); if (LLVM_UNLIKELY( BigStorage::push_back(arr, runtime, clazz) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } head = head->getParent(runtime); marker.flush(); } clazz = HermesValue::encodeNullValue(); return BigStorage::push_back(arr, runtime, clazz); } /// Verifies that the classes of obj's prototype chain still matches those /// previously prefixed to arr by setProtoClasses. /// /// \param obj The object whose prototype chain should be verified /// \param arr Array previously populated by setProtoClasses /// \return The index after the terminating null if everything matches, /// otherwise 0. uint32_t matchesProtoClasses( Runtime *runtime, Handle<JSObject> obj, Handle<BigStorage> arr) { MutableHandle<JSObject> head(runtime, obj->getParent(runtime)); uint32_t i = 0; while (head.get()) { HermesValue protoCls = arr->at(i++); if (protoCls.isNull() || protoCls.getObject() != head->getClass(runtime) || head->isProxyObject()) { return 0; } head = head->getParent(runtime); } // The chains must both end at the same point. if (head || !arr->at(i++).isNull()) { return 0; } assert(i > 0 && "success should be positive"); return i; } } // namespace CallResult<Handle<BigStorage>> getForInPropertyNames( Runtime *runtime, Handle<JSObject> obj, uint32_t &beginIndex, uint32_t &endIndex) { Handle<HiddenClass> clazz(runtime, obj->getClass(runtime)); // Fast case: Check the cache. MutableHandle<BigStorage> arr(runtime, clazz->getForInCache(runtime)); if (arr) { beginIndex = matchesProtoClasses(runtime, obj, arr); if (beginIndex) { // Cache is valid for this object, so use it. endIndex = arr->size(); return arr; } // Invalid for this object. We choose to clear the cache since the // changes to the prototype chain probably affect other objects too. clazz->clearForInCache(runtime); // Clear arr to slightly reduce risk of OOM from allocation below. arr = nullptr; } // Slow case: Build the array of properties. auto ownPropEstimate = clazz->getNumProperties(); auto arrRes = obj->shouldCacheForIn(runtime) ? BigStorage::createLongLived(runtime, ownPropEstimate) : BigStorage::create(runtime, ownPropEstimate); if (LLVM_UNLIKELY(arrRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } arr = std::move(*arrRes); if (setProtoClasses(runtime, obj, arr) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } beginIndex = arr->size(); // If obj or any of its prototypes are unsuitable for caching, then // beginIndex is 0 and we return an array with only the property names. bool canCache = beginIndex; auto end = appendAllPropertyNames(obj, runtime, arr, beginIndex); if (end == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } endIndex = *end; // Avoid degenerate memory explosion: if > 75% of the array is properties // or classes from prototypes, then don't cache it. const bool tooMuchProto = *end / 4 > ownPropEstimate; if (canCache && !tooMuchProto) { assert(beginIndex > 0 && "cached array must start with proto classes"); #ifdef HERMES_SLOW_DEBUG assert(beginIndex == matchesProtoClasses(runtime, obj, arr) && "matches"); #endif clazz->setForInCache(*arr, runtime); } return arr; } //===----------------------------------------------------------------------===// // class PropertyAccessor VTable PropertyAccessor::vt{CellKind::PropertyAccessorKind, cellSize<PropertyAccessor>()}; void PropertyAccessorBuildMeta(const GCCell *cell, Metadata::Builder &mb) { const auto *self = static_cast<const PropertyAccessor *>(cell); mb.addField("getter", &self->getter); mb.addField("setter", &self->setter); } #ifdef HERMESVM_SERIALIZE PropertyAccessor::PropertyAccessor(Deserializer &d) : GCCell(&d.getRuntime()->getHeap(), &vt) { d.readRelocation(&getter, RelocationKind::GCPointer); d.readRelocation(&setter, RelocationKind::GCPointer); } void PropertyAccessorSerialize(Serializer &s, const GCCell *cell) { auto *self = vmcast<const PropertyAccessor>(cell); s.writeRelocation(self->getter.get(s.getRuntime())); s.writeRelocation(self->setter.get(s.getRuntime())); s.endObject(cell); } void PropertyAccessorDeserialize(Deserializer &d, CellKind kind) { assert(kind == CellKind::PropertyAccessorKind && "Expected PropertyAccessor"); void *mem = d.getRuntime()->alloc(cellSize<PropertyAccessor>()); auto *cell = new (mem) PropertyAccessor(d); d.endObject(cell); } #endif CallResult<HermesValue> PropertyAccessor::create( Runtime *runtime, Handle<Callable> getter, Handle<Callable> setter) { void *mem = runtime->alloc(cellSize<PropertyAccessor>()); return HermesValue::encodeObjectValue( new (mem) PropertyAccessor(runtime, *getter, *setter)); } } // namespace vm } // namespace hermes
./CrossVul/dataset_final_sorted/CWE-843/cpp/bad_4255_1
crossvul-cpp_data_bad_4749_0
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-2016 Facebook, Inc. (http://www.facebook.com) | | Copyright (c) 1997-2010 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ #include "hphp/runtime/ext/extension.h" #include "hphp/runtime/base/runtime-error.h" #include "hphp/runtime/ext/std/ext_std_math.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #define NON_FREE #define MCRYPT2 #include <mcrypt.h> namespace HPHP { /////////////////////////////////////////////////////////////////////////////// struct MCrypt : SweepableResourceData { explicit MCrypt(MCRYPT td) : m_td(td), m_init(false) {} ~MCrypt() { MCrypt::close(); } bool isInvalid() const override { return m_td == MCRYPT_FAILED; } void close() { if (m_td != MCRYPT_FAILED) { mcrypt_generic_deinit(m_td); mcrypt_module_close(m_td); m_td = MCRYPT_FAILED; } } CLASSNAME_IS("mcrypt"); // overriding ResourceData const String& o_getClassNameHook() const override { return classnameof(); } DECLARE_RESOURCE_ALLOCATION(MCrypt) public: MCRYPT m_td; bool m_init; }; IMPLEMENT_RESOURCE_ALLOCATION(MCrypt) typedef enum { RANDOM = 0, URANDOM, RAND } iv_source; struct mcrypt_data { std::string algorithms_dir; std::string modes_dir; }; static mcrypt_data s_globals; #define MCG(n) (s_globals.n) #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #define MCRYPT_OPEN_MODULE_FAILED(str) \ raise_warning("%s(): Module initialization failed", str); static Variant php_mcrypt_do_crypt(const String& cipher, const String& key, const String& data, const String& mode, const String& iv, bool dencrypt, char *name) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)mode.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED(name); return false; } /* Checking for key-length */ int max_key_length = mcrypt_enc_get_key_size(td); if (key.size() > max_key_length) { raise_warning("Size of key is too large for this algorithm"); } int count; int *key_length_sizes = mcrypt_enc_get_supported_key_sizes(td, &count); int use_key_length; char *key_s = nullptr; if (count == 0 && key_length_sizes == nullptr) { // all lengths 1 - k_l_s = OK use_key_length = key.size(); key_s = (char*)malloc(use_key_length); memcpy(key_s, key.data(), use_key_length); } else if (count == 1) { /* only m_k_l = OK */ key_s = (char*)malloc(key_length_sizes[0]); memset(key_s, 0, key_length_sizes[0]); memcpy(key_s, key.data(), MIN(key.size(), key_length_sizes[0])); use_key_length = key_length_sizes[0]; } else { /* dertermine smallest supported key > length of requested key */ use_key_length = max_key_length; /* start with max key length */ for (int i = 0; i < count; i++) { if (key_length_sizes[i] >= key.size() && key_length_sizes[i] < use_key_length) { use_key_length = key_length_sizes[i]; } } key_s = (char*)malloc(use_key_length); memset(key_s, 0, use_key_length); memcpy(key_s, key.data(), MIN(key.size(), use_key_length)); } mcrypt_free(key_length_sizes); /* Check IV */ char *iv_s = nullptr; int iv_size = mcrypt_enc_get_iv_size(td); /* IV is required */ if (mcrypt_enc_mode_has_iv(td) == 1) { if (!iv.empty()) { if (iv_size != iv.size()) { raise_warning("%s(): The IV parameter must be as long as " "the blocksize", name); } else { iv_s = (char*)malloc(iv_size + 1); memcpy(iv_s, iv.data(), iv_size); } } else { raise_warning("%s(): The IV parameter must be as long as " "the blocksize", name); iv_s = (char*)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); } } int block_size; unsigned long int data_size; String s; char *data_s; /* Check blocksize */ if (mcrypt_enc_is_block_mode(td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(td); data_size = (((data.size() - 1) / block_size) + 1) * block_size; s = String(data_size, ReserveString); data_s = (char*)s.mutableData(); memset(data_s, 0, data_size); memcpy(data_s, data.data(), data.size()); } else { /* It's not a block algorithm */ data_size = data.size(); s = String(data_size, ReserveString); data_s = (char*)s.mutableData(); memcpy(data_s, data.data(), data.size()); } if (mcrypt_generic_init(td, key_s, use_key_length, iv_s) < 0) { raise_warning("Mcrypt initialisation failed"); return false; } if (dencrypt) { mdecrypt_generic(td, data_s, data_size); } else { mcrypt_generic(td, data_s, data_size); } /* freeing vars */ mcrypt_generic_end(td); if (key_s != nullptr) { free(key_s); } if (iv_s != nullptr) { free(iv_s); } s.setSize(data_size); return s; } static req::ptr<MCrypt> get_valid_mcrypt_resource(const Resource& td) { auto pm = dyn_cast_or_null<MCrypt>(td); if (pm == nullptr || pm->isInvalid()) { raise_warning("supplied argument is not a valid MCrypt resource"); return nullptr; } return pm; } static Variant mcrypt_generic(const Resource& td, const String& data, bool dencrypt) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } else if (!pm->m_init) { raise_warning("Operation disallowed prior to mcrypt_generic_init()."); return false; } if (data.empty()) { raise_warning("An empty string was passed"); return false; } String s; unsigned char* data_s; int block_size, data_size; /* Check blocksize */ if (mcrypt_enc_is_block_mode(pm->m_td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(pm->m_td); data_size = (((data.size() - 1) / block_size) + 1) * block_size; s = String(data_size, ReserveString); data_s = (unsigned char *)s.mutableData(); memset(data_s, 0, data_size); memcpy(data_s, data.data(), data.size()); } else { /* It's not a block algorithm */ data_size = data.size(); s = String(data_size, ReserveString); data_s = (unsigned char *)s.mutableData(); memcpy(data_s, data.data(), data.size()); } if (dencrypt) { mdecrypt_generic(pm->m_td, data_s, data_size); } else { mcrypt_generic(pm->m_td, data_s, data_size); } s.setSize(data_size); return s; } /////////////////////////////////////////////////////////////////////////////// Variant HHVM_FUNCTION(mcrypt_module_open, const String& algorithm, const String& algorithm_directory, const String& mode, const String& mode_directory) { MCRYPT td = mcrypt_module_open ((char*)algorithm.data(), (char*)(algorithm_directory.empty() ? MCG(algorithms_dir).data() : algorithm_directory.data()), (char*)mode.data(), (char*)(mode_directory.empty() ? (char*)MCG(modes_dir).data() : mode_directory.data())); if (td == MCRYPT_FAILED) { raise_warning("Could not open encryption module"); return false; } return Variant(req::make<MCrypt>(td)); } bool HHVM_FUNCTION(mcrypt_module_close, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } pm->close(); return true; } Array HHVM_FUNCTION(mcrypt_list_algorithms, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; int count = 0; char **modules = mcrypt_list_algorithms((char*)dir.data(), &count); if (count == 0) { raise_warning("No algorithms found in module dir"); } Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(String(modules[i], CopyString)); } mcrypt_free_p(modules, count); return ret; } Array HHVM_FUNCTION(mcrypt_list_modes, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir; int count = 0; char **modules = mcrypt_list_modes((char*)dir.data(), &count); if (count == 0) { raise_warning("No modes found in module dir"); } Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(String(modules[i], CopyString)); } mcrypt_free_p(modules, count); return ret; } int64_t HHVM_FUNCTION(mcrypt_module_get_algo_block_size, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_get_algo_block_size((char*)algorithm.data(), (char*)dir.data()); } int64_t HHVM_FUNCTION(mcrypt_module_get_algo_key_size, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_get_algo_key_size((char*)algorithm.data(), (char*)dir.data()); } Array HHVM_FUNCTION(mcrypt_module_get_supported_key_sizes, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; int count = 0; int *key_sizes = mcrypt_module_get_algo_supported_key_sizes ((char*)algorithm.data(), (char*)dir.data(), &count); Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(key_sizes[i]); } mcrypt_free(key_sizes); return ret; } bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm_mode, const String& mode, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir; return mcrypt_module_is_block_algorithm_mode((char*)mode.data(), (char*)dir.data()) == 1; } bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_is_block_algorithm((char*)algorithm.data(), (char*)dir.data()) == 1; } bool HHVM_FUNCTION(mcrypt_module_is_block_mode, const String& mode, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir; return mcrypt_module_is_block_mode((char*)mode.data(), (char*)dir.data()) == 1; } bool HHVM_FUNCTION(mcrypt_module_self_test, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_self_test((char*)algorithm.data(), (char*)dir.data()) == 0; } Variant HHVM_FUNCTION(mcrypt_create_iv, int size, int source /* = 0 */) { if (size <= 0 || size >= INT_MAX) { raise_warning("Can not create an IV with a size of less than 1 or " "greater than %d", INT_MAX); return false; } int n = 0; char *iv = (char*)calloc(size + 1, 1); if (source == RANDOM || source == URANDOM) { int fd = open(source == RANDOM ? "/dev/random" : "/dev/urandom", O_RDONLY); if (fd < 0) { free(iv); raise_warning("Cannot open source device"); return false; } int read_bytes; for (read_bytes = 0; read_bytes < size && n >= 0; read_bytes += n) { n = read(fd, iv + read_bytes, size - read_bytes); } n = read_bytes; close(fd); if (n < size) { free(iv); raise_warning("Could not gather sufficient random data"); return false; } } else { n = size; while (size) { // Use userspace rand() function because it handles auto-seeding iv[--size] = (char)HHVM_FN(rand)(0, 255); } } return String(iv, n, AttachString); } Variant HHVM_FUNCTION(mcrypt_encrypt, const String& cipher, const String& key, const String& data, const String& mode, const Variant& viv /* = null_string */) { String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, mode, iv, false, "mcrypt_encrypt"); } Variant HHVM_FUNCTION(mcrypt_decrypt, const String& cipher, const String& key, const String& data, const String& mode, const Variant& viv /* = null_string */) { String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, mode, iv, true, "mcrypt_decrypt"); } Variant HHVM_FUNCTION(mcrypt_cbc, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_cbc() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "cbc", iv, mode.toInt32(), "mcrypt_cbc"); } Variant HHVM_FUNCTION(mcrypt_cfb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_cfb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "cfb", iv, mode.toInt32(), "mcrypt_cfb"); } Variant HHVM_FUNCTION(mcrypt_ecb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_ecb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "ecb", iv, mode.toInt32(), "mcrypt_ecb"); } Variant HHVM_FUNCTION(mcrypt_ofb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_ofb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "ofb", iv, mode.toInt32(), "mcrypt_ofb"); } Variant HHVM_FUNCTION(mcrypt_get_block_size, const String& cipher, const Variant& module /* = null_string */) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)module.asCStrRef().data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_block_size"); return false; } int64_t ret = mcrypt_enc_get_block_size(td); mcrypt_module_close(td); return ret; } Variant HHVM_FUNCTION(mcrypt_get_cipher_name, const String& cipher) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)"ecb", (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)"stream", (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_cipher_name"); return false; } } char *cipher_name = mcrypt_enc_get_algorithms_name(td); mcrypt_module_close(td); String ret(cipher_name, CopyString); mcrypt_free(cipher_name); return ret; } Variant HHVM_FUNCTION(mcrypt_get_iv_size, const String& cipher, const String& mode) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)mode.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_iv_size"); return false; } int64_t ret = mcrypt_enc_get_iv_size(td); mcrypt_module_close(td); return ret; } Variant HHVM_FUNCTION(mcrypt_get_key_size, const String& cipher, const String& module) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)module.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_key_size"); return false; } int64_t ret = mcrypt_enc_get_key_size(td); mcrypt_module_close(td); return ret; } Variant HHVM_FUNCTION(mcrypt_enc_get_algorithms_name, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } char *name = mcrypt_enc_get_algorithms_name(pm->m_td); String ret(name, CopyString); mcrypt_free(name); return ret; } Variant HHVM_FUNCTION(mcrypt_enc_get_block_size, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_get_block_size(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_enc_get_iv_size, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_get_iv_size(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_enc_get_key_size, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_get_key_size(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_enc_get_modes_name, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } char *name = mcrypt_enc_get_modes_name(pm->m_td); String ret(name, CopyString); mcrypt_free(name); return ret; } Variant HHVM_FUNCTION(mcrypt_enc_get_supported_key_sizes, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int count = 0; int *key_sizes = mcrypt_enc_get_supported_key_sizes(pm->m_td, &count); Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(key_sizes[i]); } mcrypt_free(key_sizes); return ret; } bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm_mode, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_is_block_algorithm_mode(pm->m_td) == 1; } bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_is_block_algorithm(pm->m_td) == 1; } bool HHVM_FUNCTION(mcrypt_enc_is_block_mode, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_is_block_mode(pm->m_td) == 1; } Variant HHVM_FUNCTION(mcrypt_enc_self_test, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_self_test(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; } Variant HHVM_FUNCTION(mcrypt_generic, const Resource& td, const String& data) { return mcrypt_generic(td, data, false); } Variant HHVM_FUNCTION(mdecrypt_generic, const Resource& td, const String& data) { return mcrypt_generic(td, data, true); } bool HHVM_FUNCTION(mcrypt_generic_deinit, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } if (mcrypt_generic_deinit(pm->m_td) < 0) { raise_warning("Could not terminate encryption specifier"); return false; } pm->m_init = false; return true; } bool HHVM_FUNCTION(mcrypt_generic_end, const Resource& td) { return HHVM_FUNCTION(mcrypt_generic_deinit, td); } /////////////////////////////////////////////////////////////////////////////// struct McryptExtension final : Extension { McryptExtension() : Extension("mcrypt") {} void moduleInit() override { HHVM_RC_STR(MCRYPT_3DES, "tripledes"); HHVM_RC_STR(MCRYPT_ARCFOUR, "arcfour"); HHVM_RC_STR(MCRYPT_ARCFOUR_IV, "arcfour-iv"); HHVM_RC_STR(MCRYPT_BLOWFISH, "blowfish"); HHVM_RC_STR(MCRYPT_BLOWFISH_COMPAT, "blowfish-compat"); HHVM_RC_STR(MCRYPT_CAST_128, "cast-128"); HHVM_RC_STR(MCRYPT_CAST_256, "cast-256"); HHVM_RC_STR(MCRYPT_CRYPT, "crypt"); HHVM_RC_INT(MCRYPT_DECRYPT, 1); HHVM_RC_STR(MCRYPT_DES, "des"); HHVM_RC_INT(MCRYPT_DEV_RANDOM, RANDOM); HHVM_RC_INT(MCRYPT_DEV_URANDOM, URANDOM); HHVM_RC_INT(MCRYPT_ENCRYPT, 0); HHVM_RC_STR(MCRYPT_ENIGNA, "crypt"); HHVM_RC_STR(MCRYPT_GOST, "gost"); HHVM_RC_STR(MCRYPT_IDEA, "idea"); HHVM_RC_STR(MCRYPT_LOKI97, "loki97"); HHVM_RC_STR(MCRYPT_MARS, "mars"); HHVM_RC_STR(MCRYPT_MODE_CBC, "cbc"); HHVM_RC_STR(MCRYPT_MODE_CFB, "cfb"); HHVM_RC_STR(MCRYPT_MODE_ECB, "ecb"); HHVM_RC_STR(MCRYPT_MODE_NOFB, "nofb"); HHVM_RC_STR(MCRYPT_MODE_OFB, "ofb"); HHVM_RC_STR(MCRYPT_MODE_STREAM, "stream"); HHVM_RC_STR(MCRYPT_PANAMA, "panama"); HHVM_RC_INT(MCRYPT_RAND, RAND); HHVM_RC_STR(MCRYPT_RC2, "rc2"); HHVM_RC_STR(MCRYPT_RC6, "rc6"); HHVM_RC_STR(MCRYPT_RIJNDAEL_128, "rijndael-128"); HHVM_RC_STR(MCRYPT_RIJNDAEL_192, "rijndael-192"); HHVM_RC_STR(MCRYPT_RIJNDAEL_256, "rijndael-256"); HHVM_RC_STR(MCRYPT_SAFER128, "safer-sk128"); HHVM_RC_STR(MCRYPT_SAFER64, "safer-sk64"); HHVM_RC_STR(MCRYPT_SAFERPLUS, "saferplus"); HHVM_RC_STR(MCRYPT_SERPENT, "serpent"); HHVM_RC_STR(MCRYPT_SKIPJACK, "skipjack"); HHVM_RC_STR(MCRYPT_THREEWAY, "threeway"); HHVM_RC_STR(MCRYPT_TRIPLEDES, "tripledes"); HHVM_RC_STR(MCRYPT_TWOFISH, "twofish"); HHVM_RC_STR(MCRYPT_WAKE, "wake"); HHVM_RC_STR(MCRYPT_XTEA, "xtea"); HHVM_FE(mcrypt_module_open); HHVM_FE(mcrypt_module_close); HHVM_FE(mcrypt_list_algorithms); HHVM_FE(mcrypt_list_modes); HHVM_FE(mcrypt_module_get_algo_block_size); HHVM_FE(mcrypt_module_get_algo_key_size); HHVM_FE(mcrypt_module_get_supported_key_sizes); HHVM_FE(mcrypt_module_is_block_algorithm_mode); HHVM_FE(mcrypt_module_is_block_algorithm); HHVM_FE(mcrypt_module_is_block_mode); HHVM_FE(mcrypt_module_self_test); HHVM_FE(mcrypt_create_iv); HHVM_FE(mcrypt_encrypt); HHVM_FE(mcrypt_decrypt); HHVM_FE(mcrypt_cbc); HHVM_FE(mcrypt_cfb); HHVM_FE(mcrypt_ecb); HHVM_FE(mcrypt_ofb); HHVM_FE(mcrypt_get_block_size); HHVM_FE(mcrypt_get_cipher_name); HHVM_FE(mcrypt_get_iv_size); HHVM_FE(mcrypt_get_key_size); HHVM_FE(mcrypt_enc_get_algorithms_name); HHVM_FE(mcrypt_enc_get_block_size); HHVM_FE(mcrypt_enc_get_iv_size); HHVM_FE(mcrypt_enc_get_key_size); HHVM_FE(mcrypt_enc_get_modes_name); HHVM_FE(mcrypt_enc_get_supported_key_sizes); HHVM_FE(mcrypt_enc_is_block_algorithm_mode); HHVM_FE(mcrypt_enc_is_block_algorithm); HHVM_FE(mcrypt_enc_is_block_mode); HHVM_FE(mcrypt_enc_self_test); HHVM_FE(mcrypt_generic_init); HHVM_FE(mcrypt_generic); HHVM_FE(mdecrypt_generic); HHVM_FE(mcrypt_generic_deinit); HHVM_FE(mcrypt_generic_end); loadSystemlib(); } } s_mcrypt_extension; /////////////////////////////////////////////////////////////////////////////// }
./CrossVul/dataset_final_sorted/CWE-843/cpp/bad_4749_0
crossvul-cpp_data_good_2808_0
/***************************************************************** | | AP4 - sample entries | | Copyright 2002-2008 Axiomatic Systems, LLC | | | This file is part of Bento4/AP4 (MP4 Atom Processing Library). | | Unless you have obtained Bento4 under a difference license, | this version of Bento4 is Bento4|GPL. | Bento4|GPL is free software; you can redistribute it and/or modify | it under the terms of the GNU General Public License as published by | the Free Software Foundation; either version 2, or (at your option) | any later version. | | Bento4|GPL is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with Bento4|GPL; see the file COPYING. If not, write to the | Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA | 02111-1307, USA. | ****************************************************************/ /*---------------------------------------------------------------------- | includes +---------------------------------------------------------------------*/ #include "Ap4SampleEntry.h" #include "Ap4Utils.h" #include "Ap4AtomFactory.h" #include "Ap4TimsAtom.h" #include "Ap4SampleDescription.h" #include "Ap4AvccAtom.h" /*---------------------------------------------------------------------- | dynamic cast support +---------------------------------------------------------------------*/ AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_SampleEntry) /*---------------------------------------------------------------------- | AP4_SampleEntry::AP4_SampleEntry +---------------------------------------------------------------------*/ AP4_SampleEntry::AP4_SampleEntry(AP4_Atom::Type format, const AP4_AtomParent* details) : AP4_ContainerAtom(format), m_DataReferenceIndex(1) { m_Reserved1[0] = 0; m_Reserved1[1] = 0; m_Reserved1[2] = 0; m_Reserved1[3] = 0; m_Reserved1[4] = 0; m_Reserved1[5] = 0; m_Size32 += 8; if (details) { details->CopyChildren(*this); } } /*---------------------------------------------------------------------- | AP4_SampleEntry::AP4_SampleEntry +---------------------------------------------------------------------*/ AP4_SampleEntry::AP4_SampleEntry(AP4_Atom::Type format, AP4_Size size) : AP4_ContainerAtom(format, (AP4_UI64)size, false), m_DataReferenceIndex(1) { m_Reserved1[0] = 0; m_Reserved1[1] = 0; m_Reserved1[2] = 0; m_Reserved1[3] = 0; m_Reserved1[4] = 0; m_Reserved1[5] = 0; } /*---------------------------------------------------------------------- | AP4_SampleEntry::AP4_SampleEntry +---------------------------------------------------------------------*/ AP4_SampleEntry::AP4_SampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_ContainerAtom(format, (AP4_UI64)size, false) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_SampleEntry::Clone +---------------------------------------------------------------------*/ AP4_Atom* AP4_SampleEntry::Clone() { return this->AP4_Atom::Clone(); } /*---------------------------------------------------------------------- | AP4_SampleEntry::Read +---------------------------------------------------------------------*/ void AP4_SampleEntry::Read(AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) { // read the fields before the children atoms ReadFields(stream); // read children atoms (ex: esds and maybe others) // NOTE: not all sample entries have children atoms AP4_Size payload_size = (AP4_Size)(GetSize()-GetHeaderSize()); AP4_Size fields_size = GetFieldsSize(); if (payload_size > fields_size) { ReadChildren(atom_factory, stream, payload_size-fields_size); } } /*---------------------------------------------------------------------- | AP4_SampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_SampleEntry::GetFieldsSize() { return 8; } /*---------------------------------------------------------------------- | AP4_SampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::ReadFields(AP4_ByteStream& stream) { stream.Read(m_Reserved1, sizeof(m_Reserved1)); stream.ReadUI16(m_DataReferenceIndex); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // reserved1 result = stream.Write(m_Reserved1, sizeof(m_Reserved1)); if (AP4_FAILED(result)) return result; // data reference index result = stream.WriteUI16(m_DataReferenceIndex); if (AP4_FAILED(result)) return result; return result; } /*---------------------------------------------------------------------- | AP4_SampleEntry::Write +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::Write(AP4_ByteStream& stream) { AP4_Result result; // write the header result = WriteHeader(stream); if (AP4_FAILED(result)) return result; // write the fields result = WriteFields(stream); if (AP4_FAILED(result)) return result; // write the children atoms return m_Children.Apply(AP4_AtomListWriter(stream)); } /*---------------------------------------------------------------------- | AP4_SampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("data_reference_index", m_DataReferenceIndex); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SampleEntry::Inspect +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::Inspect(AP4_AtomInspector& inspector) { // inspect the header InspectHeader(inspector); // inspect the fields InspectFields(inspector); // inspect children m_Children.Apply(AP4_AtomListInspector(inspector)); // finish inspector.EndAtom(); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SampleEntry::OnChildChanged +---------------------------------------------------------------------*/ void AP4_SampleEntry::OnChildChanged(AP4_Atom*) { // recompute our size AP4_UI64 size = GetHeaderSize()+GetFieldsSize(); m_Children.Apply(AP4_AtomSizeAdder(size)); m_Size32 = (AP4_UI32)size; // update our parent if (m_Parent) m_Parent->OnChildChanged(this); } /*---------------------------------------------------------------------- | AP4_SampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_SampleEntry::ToSampleDescription() { return new AP4_SampleDescription(AP4_SampleDescription::TYPE_UNKNOWN, m_Type, this); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::AP4_UnknownSampleEntry +---------------------------------------------------------------------*/ AP4_UnknownSampleEntry::AP4_UnknownSampleEntry(AP4_Atom::Type type, AP4_Size size, AP4_ByteStream& stream) : AP4_SampleEntry(type, size) { if (size > AP4_ATOM_HEADER_SIZE+AP4_SampleEntry::GetFieldsSize()) { m_Payload.SetDataSize(size-(AP4_ATOM_HEADER_SIZE+AP4_SampleEntry::GetFieldsSize())); ReadFields(stream); } } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::AP4_UnknownSampleEntry +---------------------------------------------------------------------*/ AP4_UnknownSampleEntry::AP4_UnknownSampleEntry(AP4_Atom::Type type, AP4_DataBuffer& payload) : AP4_SampleEntry(type), m_Payload(payload) { m_Size32 += payload.GetDataSize(); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::Clone +---------------------------------------------------------------------*/ AP4_Atom* AP4_UnknownSampleEntry::Clone() { return new AP4_UnknownSampleEntry(m_Type, m_Payload); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_UnknownSampleEntry::ToSampleDescription() { return new AP4_UnknownSampleDescription(this); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_UnknownSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize()+m_Payload.GetDataSize(); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_UnknownSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (AP4_FAILED(result)) return result; // read the payload return stream.Read(m_Payload.UseData(), m_Payload.GetDataSize()); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_UnknownSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // write the payload return stream.Write(m_Payload.GetData(), m_Payload.GetDataSize()); } /*---------------------------------------------------------------------- | AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry +---------------------------------------------------------------------*/ AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry( AP4_UI32 type, AP4_EsDescriptor* descriptor) : AP4_SampleEntry(type) { if (descriptor) AddChild(new AP4_EsdsAtom(descriptor)); } /*---------------------------------------------------------------------- | AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry +---------------------------------------------------------------------*/ AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry( AP4_UI32 type, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(type, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_MpegSystemSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_MpegSystemSampleEntry::ToSampleDescription() { return new AP4_MpegSystemSampleDescription( AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS))); } /*---------------------------------------------------------------------- | AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry(AP4_EsDescriptor* descriptor) : AP4_MpegSystemSampleEntry(AP4_ATOM_TYPE_MP4S, descriptor) { } /*---------------------------------------------------------------------- | AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_MpegSystemSampleEntry(AP4_ATOM_TYPE_MP4S, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_Mp4sSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_Mp4sSampleEntry::ToSampleDescription() { // create a sample description return new AP4_MpegSystemSampleDescription( AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS))); } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::AP4_AudioSampleEntry +---------------------------------------------------------------------*/ AP4_AudioSampleEntry::AP4_AudioSampleEntry(AP4_Atom::Type format, AP4_UI32 sample_rate, AP4_UI16 sample_size, AP4_UI16 channel_count) : AP4_SampleEntry(format), m_QtVersion(0), m_QtRevision(0), m_QtVendor(0), m_ChannelCount(channel_count), m_SampleSize(sample_size), m_QtCompressionId(0), m_QtPacketSize(0), m_SampleRate(sample_rate), m_QtV1SamplesPerPacket(0), m_QtV1BytesPerPacket(0), m_QtV1BytesPerFrame(0), m_QtV1BytesPerSample(0), m_QtV2StructSize(0), m_QtV2SampleRate64(0.0), m_QtV2ChannelCount(0), m_QtV2Reserved(0), m_QtV2BitsPerChannel(0), m_QtV2FormatSpecificFlags(0), m_QtV2BytesPerAudioPacket(0), m_QtV2LPCMFramesPerAudioPacket(0) { m_Size32 += 20; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::AP4_AudioSampleEntry +---------------------------------------------------------------------*/ AP4_AudioSampleEntry::AP4_AudioSampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(format, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_AudioSampleEntry::GetFieldsSize() { AP4_Size size = AP4_SampleEntry::GetFieldsSize()+20; if (m_QtVersion == 1) { size += 16; } else if (m_QtVersion == 2) { size += 36+m_QtV2Extension.GetDataSize(); } return size; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::GetSampleRate +---------------------------------------------------------------------*/ AP4_UI32 AP4_AudioSampleEntry::GetSampleRate() { if (m_QtVersion == 2) { return (AP4_UI32)(m_QtV2SampleRate64); } else { return m_SampleRate>>16; } } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::GetChannelCount +---------------------------------------------------------------------*/ AP4_UI16 AP4_AudioSampleEntry::GetChannelCount() { if (m_QtVersion == 2) { return (AP4_UI16)m_QtV2ChannelCount; } else { return m_ChannelCount; } } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_AudioSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // read the fields of this class stream.ReadUI16(m_QtVersion); stream.ReadUI16(m_QtRevision); stream.ReadUI32(m_QtVendor); stream.ReadUI16(m_ChannelCount); stream.ReadUI16(m_SampleSize); stream.ReadUI16(m_QtCompressionId); stream.ReadUI16(m_QtPacketSize); stream.ReadUI32(m_SampleRate); // if this is a QT V1 entry, read the extension if (m_QtVersion == 1) { stream.ReadUI32(m_QtV1SamplesPerPacket); stream.ReadUI32(m_QtV1BytesPerPacket); stream.ReadUI32(m_QtV1BytesPerFrame); stream.ReadUI32(m_QtV1BytesPerSample); } else if (m_QtVersion == 2) { stream.ReadUI32(m_QtV2StructSize); stream.ReadDouble(m_QtV2SampleRate64); stream.ReadUI32(m_QtV2ChannelCount); stream.ReadUI32(m_QtV2Reserved); stream.ReadUI32(m_QtV2BitsPerChannel); stream.ReadUI32(m_QtV2FormatSpecificFlags); stream.ReadUI32(m_QtV2BytesPerAudioPacket); stream.ReadUI32(m_QtV2LPCMFramesPerAudioPacket); if (m_QtV2StructSize > 72) { unsigned int ext_size = m_QtV2StructSize-72; m_QtV2Extension.SetDataSize(ext_size); stream.Read(m_QtV2Extension.UseData(), ext_size); } m_QtV1SamplesPerPacket = m_QtV1BytesPerPacket = m_QtV1BytesPerFrame = m_QtV1BytesPerSample = 0; } else { m_QtV1SamplesPerPacket = 0; m_QtV1BytesPerPacket = 0; m_QtV1BytesPerFrame = 0; m_QtV1BytesPerSample = 0; m_QtV2StructSize = 0; m_QtV2SampleRate64 = 0.0; m_QtV2ChannelCount = 0; m_QtV2Reserved = 0; m_QtV2BitsPerChannel = 0; m_QtV2FormatSpecificFlags = 0; m_QtV2BytesPerAudioPacket = 0; m_QtV2LPCMFramesPerAudioPacket = 0; } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_AudioSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // QT version result = stream.WriteUI16(m_QtVersion); if (AP4_FAILED(result)) return result; // QT revision result = stream.WriteUI16(m_QtRevision); if (AP4_FAILED(result)) return result; // QT vendor result = stream.WriteUI32(m_QtVendor); if (AP4_FAILED(result)) return result; // channel count result = stream.WriteUI16(m_ChannelCount); if (AP4_FAILED(result)) return result; // sample size result = stream.WriteUI16(m_SampleSize); if (AP4_FAILED(result)) return result; // QT compression ID result = stream.WriteUI16(m_QtCompressionId); if (AP4_FAILED(result)) return result; // QT packet size result = stream.WriteUI16(m_QtPacketSize); if (AP4_FAILED(result)) return result; // sample rate result = stream.WriteUI32(m_SampleRate); if (AP4_FAILED(result)) return result; if (m_QtVersion == 1) { result = stream.WriteUI32(m_QtV1SamplesPerPacket); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_QtV1BytesPerPacket); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_QtV1BytesPerFrame); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_QtV1BytesPerSample); if (AP4_FAILED(result)) return result; } else if (m_QtVersion == 2) { stream.WriteUI32(m_QtV2StructSize); stream.WriteDouble(m_QtV2SampleRate64); stream.WriteUI32(m_QtV2ChannelCount); stream.WriteUI32(m_QtV2Reserved); stream.WriteUI32(m_QtV2BitsPerChannel); stream.WriteUI32(m_QtV2FormatSpecificFlags); stream.WriteUI32(m_QtV2BytesPerAudioPacket); stream.WriteUI32(m_QtV2LPCMFramesPerAudioPacket); if (m_QtV2Extension.GetDataSize()) { stream.Write(m_QtV2Extension.GetData(), m_QtV2Extension.GetDataSize()); } } return result; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_AudioSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // dump the fields from the base class AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("channel_count", GetChannelCount()); inspector.AddField("sample_size", GetSampleSize()); inspector.AddField("sample_rate", GetSampleRate()); if (m_QtVersion) { inspector.AddField("qt_version", m_QtVersion); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_AudioSampleEntry::ToSampleDescription() { // create a sample description return new AP4_GenericAudioSampleDescription( m_Type, GetSampleRate(), GetSampleSize(), GetChannelCount(), this); } /*---------------------------------------------------------------------- | AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry +---------------------------------------------------------------------*/ AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry( AP4_UI32 type, AP4_UI32 sample_rate, AP4_UI16 sample_size, AP4_UI16 channel_count, AP4_EsDescriptor* descriptor) : AP4_AudioSampleEntry(type, sample_rate, sample_size, channel_count) { if (descriptor) AddChild(new AP4_EsdsAtom(descriptor)); } /*---------------------------------------------------------------------- | AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry +---------------------------------------------------------------------*/ AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry( AP4_UI32 type, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_AudioSampleEntry(type, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_MpegAudioSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_MpegAudioSampleEntry::ToSampleDescription() { // find the esds atom AP4_EsdsAtom* esds = AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS)); if (esds == NULL) { // check if this is a quicktime style sample description if (m_QtVersion > 0) { esds = AP4_DYNAMIC_CAST(AP4_EsdsAtom, FindChild("wave/esds")); } } // create a sample description return new AP4_MpegAudioSampleDescription(GetSampleRate(), GetSampleSize(), GetChannelCount(), esds); } /*---------------------------------------------------------------------- | AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry(AP4_UI32 sample_rate, AP4_UI16 sample_size, AP4_UI16 channel_count, AP4_EsDescriptor* descriptor) : AP4_MpegAudioSampleEntry(AP4_ATOM_TYPE_MP4A, sample_rate, sample_size, channel_count, descriptor) { } /*---------------------------------------------------------------------- | AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_MpegAudioSampleEntry(AP4_ATOM_TYPE_MP4A, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::AP4_VisualSampleEntry +---------------------------------------------------------------------*/ AP4_VisualSampleEntry::AP4_VisualSampleEntry( AP4_Atom::Type format, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, const AP4_AtomParent* details) : AP4_SampleEntry(format, details), m_Predefined1(0), m_Reserved2(0), m_Width(width), m_Height(height), m_HorizResolution(0x00480000), m_VertResolution(0x00480000), m_Reserved3(0), m_FrameCount(1), m_CompressorName(compressor_name), m_Depth(depth), m_Predefined3(0xFFFF) { memset(m_Predefined2, 0, sizeof(m_Predefined2)); m_Size32 += 70; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::AP4_VisualSampleEntry +---------------------------------------------------------------------*/ AP4_VisualSampleEntry::AP4_VisualSampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(format, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_VisualSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize()+70; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_VisualSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // read fields from this class stream.ReadUI16(m_Predefined1); stream.ReadUI16(m_Reserved2); stream.Read(m_Predefined2, sizeof(m_Predefined2)); stream.ReadUI16(m_Width); stream.ReadUI16(m_Height); stream.ReadUI32(m_HorizResolution); stream.ReadUI32(m_VertResolution); stream.ReadUI32(m_Reserved3); stream.ReadUI16(m_FrameCount); AP4_UI08 compressor_name[33]; compressor_name[32] = 0; stream.Read(compressor_name, 32); AP4_UI08 name_length = compressor_name[0]; if (name_length < 32) { compressor_name[name_length+1] = 0; // force null termination m_CompressorName = (const char*)(&compressor_name[1]); } stream.ReadUI16(m_Depth); stream.ReadUI16(m_Predefined3); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_VisualSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // predefined1 result = stream.WriteUI16(m_Predefined1); if (AP4_FAILED(result)) return result; // reserved2 result = stream.WriteUI16(m_Reserved2); if (AP4_FAILED(result)) return result; // predefined2 result = stream.Write(m_Predefined2, sizeof(m_Predefined2)); if (AP4_FAILED(result)) return result; // width result = stream.WriteUI16(m_Width); if (AP4_FAILED(result)) return result; // height result = stream.WriteUI16(m_Height); if (AP4_FAILED(result)) return result; // horizontal resolution result = stream.WriteUI32(m_HorizResolution); if (AP4_FAILED(result)) return result; // vertical resolution result = stream.WriteUI32(m_VertResolution); if (AP4_FAILED(result)) return result; // reserved3 result = stream.WriteUI32(m_Reserved3); if (AP4_FAILED(result)) return result; // frame count result = stream.WriteUI16(m_FrameCount); if (AP4_FAILED(result)) return result; // compressor name unsigned char compressor_name[32]; unsigned int name_length = m_CompressorName.GetLength(); if (name_length > 31) name_length = 31; compressor_name[0] = (unsigned char)name_length; for (unsigned int i=0; i<name_length; i++) { compressor_name[i+1] = m_CompressorName[i]; } for (unsigned int i=name_length+1; i<32; i++) { compressor_name[i] = 0; } result = stream.Write(compressor_name, 32); if (AP4_FAILED(result)) return result; // depth result = stream.WriteUI16(m_Depth); if (AP4_FAILED(result)) return result; // predefined3 result = stream.WriteUI16(m_Predefined3); if (AP4_FAILED(result)) return result; return result; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_VisualSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // dump the fields of the base class AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("width", m_Width); inspector.AddField("height", m_Height); inspector.AddField("compressor", m_CompressorName.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_VisualSampleEntry::ToSampleDescription() { // create a sample description return new AP4_GenericVideoSampleDescription( m_Type, m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), this); } /*---------------------------------------------------------------------- | AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry +---------------------------------------------------------------------*/ AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry( AP4_UI32 type, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, AP4_EsDescriptor* descriptor) : AP4_VisualSampleEntry(type, width, height, depth, compressor_name) { if (descriptor) AddChild(new AP4_EsdsAtom(descriptor)); } /*---------------------------------------------------------------------- | AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry +---------------------------------------------------------------------*/ AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry( AP4_UI32 type, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_VisualSampleEntry(type, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_MpegVideoSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_MpegVideoSampleEntry::ToSampleDescription() { // create a sample description return new AP4_MpegVideoSampleDescription( m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS))); } /*---------------------------------------------------------------------- | AP4_Mp4vSampleEntry::AP4_Mp4vSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4vSampleEntry::AP4_Mp4vSampleEntry(AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, AP4_EsDescriptor* descriptor) : AP4_MpegVideoSampleEntry(AP4_ATOM_TYPE_MP4V, width, height, depth, compressor_name, descriptor) { } /*---------------------------------------------------------------------- | AP4_Mp4vSampleEntry::AP4_Mp4aSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4vSampleEntry::AP4_Mp4vSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_MpegVideoSampleEntry(AP4_ATOM_TYPE_MP4V, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_AvcSampleEntry::AP4_AvcSSampleEntry +---------------------------------------------------------------------*/ AP4_AvcSampleEntry::AP4_AvcSampleEntry(AP4_UI32 format, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, const AP4_AtomParent* details) : AP4_VisualSampleEntry(format, width, height, depth, compressor_name, details) { } /*---------------------------------------------------------------------- | AP4_AvcSampleEntry::AP4_AvcSampleEntry +---------------------------------------------------------------------*/ AP4_AvcSampleEntry::AP4_AvcSampleEntry(AP4_UI32 format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_VisualSampleEntry(format, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_HevcSampleEntry::AP4_HevcSSampleEntry +---------------------------------------------------------------------*/ AP4_HevcSampleEntry::AP4_HevcSampleEntry(AP4_UI32 format, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, const AP4_AtomParent* details) : AP4_VisualSampleEntry(format, width, height, depth, compressor_name, details) { } /*---------------------------------------------------------------------- | AP4_AvcSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_AvcSampleEntry::ToSampleDescription() { return new AP4_AvcSampleDescription( m_Type, m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), this); } /*---------------------------------------------------------------------- | AP4_HevcSampleEntry::AP4_HevcSampleEntry +---------------------------------------------------------------------*/ AP4_HevcSampleEntry::AP4_HevcSampleEntry(AP4_UI32 format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_VisualSampleEntry(format, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_HevcSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_HevcSampleEntry::ToSampleDescription() { return new AP4_HevcSampleDescription( m_Type, m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), this); } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry +---------------------------------------------------------------------*/ AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry(AP4_UI16 hint_track_version, AP4_UI16 highest_compatible_version, AP4_UI32 max_packet_size, AP4_UI32 timescale): AP4_SampleEntry(AP4_ATOM_TYPE_RTP_), m_HintTrackVersion(hint_track_version), m_HighestCompatibleVersion(highest_compatible_version), m_MaxPacketSize(max_packet_size) { // build an atom for timescale AddChild(new AP4_TimsAtom(timescale)); } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry +---------------------------------------------------------------------*/ AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory): AP4_SampleEntry(AP4_ATOM_TYPE_RTP_, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_RtpHintSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize()+8; } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_RtpHintSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // data result = stream.ReadUI16(m_HintTrackVersion); if (AP4_FAILED(result)) return result; result = stream.ReadUI16(m_HighestCompatibleVersion); if (AP4_FAILED(result)) return result; result = stream.ReadUI32(m_MaxPacketSize); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_RtpHintSampleEntry::WriteFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // data result = stream.WriteUI16(m_HintTrackVersion); if (AP4_FAILED(result)) return result; result = stream.WriteUI16(m_HighestCompatibleVersion); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_MaxPacketSize); if (AP4_FAILED(result)) return result; return result; } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_RtpHintSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // sample entry AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("hint_track_version", m_HintTrackVersion); inspector.AddField("highest_compatible_version", m_HighestCompatibleVersion); inspector.AddField("max_packet_size", m_MaxPacketSize); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry +---------------------------------------------------------------------*/ AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry( AP4_Atom::Type format, const char* namespce, const char* schema_location, const char* image_mime_type) : AP4_SampleEntry(format), m_Namespace(namespce), m_SchemaLocation(schema_location), m_ImageMimeType(image_mime_type) { SetSize(m_Size32+m_Namespace.GetLength()+1+m_SchemaLocation.GetLength()+1+m_ImageMimeType.GetLength()+1); } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry +---------------------------------------------------------------------*/ AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(format, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_SubtitleSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize() + 3 + m_Namespace.GetLength() + m_SchemaLocation.GetLength() + m_ImageMimeType.GetLength(); } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_SubtitleSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // read fields from this class result = stream.ReadNullTerminatedString(m_Namespace); if (AP4_FAILED(result)) return result; result = stream.ReadNullTerminatedString(m_SchemaLocation); if (AP4_FAILED(result)) return result; result = stream.ReadNullTerminatedString(m_ImageMimeType); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_SubtitleSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // write fields from this class result = stream.WriteString(m_Namespace.GetChars()); if (AP4_FAILED(result)) return result; result = stream.WriteUI08(0); if (AP4_FAILED(result)) return result; result = stream.WriteString(m_SchemaLocation.GetChars()); if (AP4_FAILED(result)) return result; result = stream.WriteUI08(0); if (AP4_FAILED(result)) return result; result = stream.WriteString(m_ImageMimeType.GetChars()); if (AP4_FAILED(result)) return result; result = stream.WriteUI08(0); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_SubtitleSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // dump the fields of the base class AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("namespace", m_Namespace.GetChars()); inspector.AddField("schema_location", m_SchemaLocation.GetChars()); inspector.AddField("image_mime_type", m_ImageMimeType.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_SubtitleSampleEntry::ToSampleDescription() { // create a sample description return new AP4_SubtitleSampleDescription(m_Type, m_Namespace.GetChars(), m_SchemaLocation.GetChars(), m_ImageMimeType.GetChars()); }
./CrossVul/dataset_final_sorted/CWE-843/cpp/good_2808_0
crossvul-cpp_data_good_4749_0
/* +----------------------------------------------------------------------+ | HipHop for PHP | +----------------------------------------------------------------------+ | Copyright (c) 2010-2016 Facebook, Inc. (http://www.facebook.com) | | Copyright (c) 1997-2010 The PHP Group | +----------------------------------------------------------------------+ | This source file is subject to version 3.01 of the PHP license, | | that is bundled with this package in the file LICENSE, and is | | available through the world-wide-web at the following url: | | http://www.php.net/license/3_01.txt | | If you did not receive a copy of the PHP license and are unable to | | obtain it through the world-wide-web, please send a note to | | license@php.net so we can mail you a copy immediately. | +----------------------------------------------------------------------+ */ #include "hphp/runtime/ext/extension.h" #include "hphp/runtime/base/runtime-error.h" #include "hphp/runtime/ext/std/ext_std_math.h" #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #define NON_FREE #define MCRYPT2 #include <mcrypt.h> namespace HPHP { /////////////////////////////////////////////////////////////////////////////// struct MCrypt : SweepableResourceData { explicit MCrypt(MCRYPT td) : m_td(td), m_init(false) {} ~MCrypt() { MCrypt::close(); } bool isInvalid() const override { return m_td == MCRYPT_FAILED; } void close() { if (m_td != MCRYPT_FAILED) { mcrypt_generic_deinit(m_td); mcrypt_module_close(m_td); m_td = MCRYPT_FAILED; } } CLASSNAME_IS("mcrypt"); // overriding ResourceData const String& o_getClassNameHook() const override { return classnameof(); } DECLARE_RESOURCE_ALLOCATION(MCrypt) public: MCRYPT m_td; bool m_init; }; IMPLEMENT_RESOURCE_ALLOCATION(MCrypt) typedef enum { RANDOM = 0, URANDOM, RAND } iv_source; struct mcrypt_data { std::string algorithms_dir; std::string modes_dir; }; static mcrypt_data s_globals; #define MCG(n) (s_globals.n) #ifndef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #endif #define MCRYPT_OPEN_MODULE_FAILED(str) \ raise_warning("%s(): Module initialization failed", str); static Variant php_mcrypt_do_crypt(const String& cipher, const String& key, const String& data, const String& mode, const String& iv, bool dencrypt, char *name) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)mode.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED(name); return false; } /* Checking for key-length */ int max_key_length = mcrypt_enc_get_key_size(td); if (key.size() > max_key_length) { raise_warning("Size of key is too large for this algorithm"); } int count; int *key_length_sizes = mcrypt_enc_get_supported_key_sizes(td, &count); int use_key_length; char *key_s = nullptr; if (count == 0 && key_length_sizes == nullptr) { // all lengths 1 - k_l_s = OK use_key_length = key.size(); key_s = (char*)malloc(use_key_length); memcpy(key_s, key.data(), use_key_length); } else if (count == 1) { /* only m_k_l = OK */ key_s = (char*)malloc(key_length_sizes[0]); memset(key_s, 0, key_length_sizes[0]); memcpy(key_s, key.data(), MIN(key.size(), key_length_sizes[0])); use_key_length = key_length_sizes[0]; } else { /* dertermine smallest supported key > length of requested key */ use_key_length = max_key_length; /* start with max key length */ for (int i = 0; i < count; i++) { if (key_length_sizes[i] >= key.size() && key_length_sizes[i] < use_key_length) { use_key_length = key_length_sizes[i]; } } key_s = (char*)malloc(use_key_length); memset(key_s, 0, use_key_length); memcpy(key_s, key.data(), MIN(key.size(), use_key_length)); } mcrypt_free(key_length_sizes); /* Check IV */ char *iv_s = nullptr; int iv_size = mcrypt_enc_get_iv_size(td); /* IV is required */ if (mcrypt_enc_mode_has_iv(td) == 1) { if (!iv.empty()) { if (iv_size != iv.size()) { raise_warning("%s(): The IV parameter must be as long as " "the blocksize", name); } else { iv_s = (char*)malloc(iv_size + 1); memcpy(iv_s, iv.data(), iv_size); } } else { raise_warning("%s(): The IV parameter must be as long as " "the blocksize", name); iv_s = (char*)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); } } int block_size; unsigned long int data_size; String s; char *data_s; /* Check blocksize */ if (mcrypt_enc_is_block_mode(td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(td); data_size = (((data.size() - 1) / block_size) + 1) * block_size; s = String(data_size, ReserveString); data_s = (char*)s.mutableData(); memset(data_s, 0, data_size); memcpy(data_s, data.data(), data.size()); } else { /* It's not a block algorithm */ data_size = data.size(); s = String(data_size, ReserveString); data_s = (char*)s.mutableData(); memcpy(data_s, data.data(), data.size()); } if (mcrypt_generic_init(td, key_s, use_key_length, iv_s) < 0) { raise_warning("Mcrypt initialisation failed"); return false; } if (dencrypt) { mdecrypt_generic(td, data_s, data_size); } else { mcrypt_generic(td, data_s, data_size); } /* freeing vars */ mcrypt_generic_end(td); if (key_s != nullptr) { free(key_s); } if (iv_s != nullptr) { free(iv_s); } s.setSize(data_size); return s; } static req::ptr<MCrypt> get_valid_mcrypt_resource(const Resource& td) { auto pm = dyn_cast_or_null<MCrypt>(td); if (pm == nullptr || pm->isInvalid()) { raise_warning("supplied argument is not a valid MCrypt resource"); return nullptr; } return pm; } static Variant mcrypt_generic(const Resource& td, const String& data, bool dencrypt) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } else if (!pm->m_init) { raise_warning("Operation disallowed prior to mcrypt_generic_init()."); return false; } if (data.empty()) { raise_warning("An empty string was passed"); return false; } String s; unsigned char* data_s; int block_size, data_size; /* Check blocksize */ if (mcrypt_enc_is_block_mode(pm->m_td) == 1) { /* It's a block algorithm */ block_size = mcrypt_enc_get_block_size(pm->m_td); data_size = (((data.size() - 1) / block_size) + 1) * block_size; s = String(data_size, ReserveString); data_s = (unsigned char *)s.mutableData(); memset(data_s, 0, data_size); memcpy(data_s, data.data(), data.size()); } else { /* It's not a block algorithm */ data_size = data.size(); s = String(data_size, ReserveString); data_s = (unsigned char *)s.mutableData(); memcpy(data_s, data.data(), data.size()); } if (dencrypt) { mdecrypt_generic(pm->m_td, data_s, data_size); } else { mcrypt_generic(pm->m_td, data_s, data_size); } s.setSize(data_size); return s; } /////////////////////////////////////////////////////////////////////////////// Variant HHVM_FUNCTION(mcrypt_module_open, const String& algorithm, const String& algorithm_directory, const String& mode, const String& mode_directory) { MCRYPT td = mcrypt_module_open ((char*)algorithm.data(), (char*)(algorithm_directory.empty() ? MCG(algorithms_dir).data() : algorithm_directory.data()), (char*)mode.data(), (char*)(mode_directory.empty() ? (char*)MCG(modes_dir).data() : mode_directory.data())); if (td == MCRYPT_FAILED) { raise_warning("Could not open encryption module"); return false; } return Variant(req::make<MCrypt>(td)); } bool HHVM_FUNCTION(mcrypt_module_close, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } pm->close(); return true; } Array HHVM_FUNCTION(mcrypt_list_algorithms, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; int count = 0; char **modules = mcrypt_list_algorithms((char*)dir.data(), &count); if (count == 0) { raise_warning("No algorithms found in module dir"); } Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(String(modules[i], CopyString)); } mcrypt_free_p(modules, count); return ret; } Array HHVM_FUNCTION(mcrypt_list_modes, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir; int count = 0; char **modules = mcrypt_list_modes((char*)dir.data(), &count); if (count == 0) { raise_warning("No modes found in module dir"); } Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(String(modules[i], CopyString)); } mcrypt_free_p(modules, count); return ret; } int64_t HHVM_FUNCTION(mcrypt_module_get_algo_block_size, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_get_algo_block_size((char*)algorithm.data(), (char*)dir.data()); } int64_t HHVM_FUNCTION(mcrypt_module_get_algo_key_size, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_get_algo_key_size((char*)algorithm.data(), (char*)dir.data()); } Array HHVM_FUNCTION(mcrypt_module_get_supported_key_sizes, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; int count = 0; int *key_sizes = mcrypt_module_get_algo_supported_key_sizes ((char*)algorithm.data(), (char*)dir.data(), &count); Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(key_sizes[i]); } mcrypt_free(key_sizes); return ret; } bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm_mode, const String& mode, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir; return mcrypt_module_is_block_algorithm_mode((char*)mode.data(), (char*)dir.data()) == 1; } bool HHVM_FUNCTION(mcrypt_module_is_block_algorithm, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_is_block_algorithm((char*)algorithm.data(), (char*)dir.data()) == 1; } bool HHVM_FUNCTION(mcrypt_module_is_block_mode, const String& mode, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(modes_dir)) : lib_dir; return mcrypt_module_is_block_mode((char*)mode.data(), (char*)dir.data()) == 1; } bool HHVM_FUNCTION(mcrypt_module_self_test, const String& algorithm, const String& lib_dir /* = null_string */) { String dir = lib_dir.empty() ? String(MCG(algorithms_dir)) : lib_dir; return mcrypt_module_self_test((char*)algorithm.data(), (char*)dir.data()) == 0; } Variant HHVM_FUNCTION(mcrypt_create_iv, int size, int source /* = 0 */) { if (size <= 0 || size >= INT_MAX) { raise_warning("Can not create an IV with a size of less than 1 or " "greater than %d", INT_MAX); return false; } int n = 0; char *iv = (char*)calloc(size + 1, 1); if (source == RANDOM || source == URANDOM) { int fd = open(source == RANDOM ? "/dev/random" : "/dev/urandom", O_RDONLY); if (fd < 0) { free(iv); raise_warning("Cannot open source device"); return false; } int read_bytes; for (read_bytes = 0; read_bytes < size && n >= 0; read_bytes += n) { n = read(fd, iv + read_bytes, size - read_bytes); } n = read_bytes; close(fd); if (n < size) { free(iv); raise_warning("Could not gather sufficient random data"); return false; } } else { n = size; while (size) { // Use userspace rand() function because it handles auto-seeding iv[--size] = (char)HHVM_FN(rand)(0, 255); } } return String(iv, n, AttachString); } Variant HHVM_FUNCTION(mcrypt_encrypt, const String& cipher, const String& key, const String& data, const String& mode, const Variant& viv /* = null_string */) { String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, mode, iv, false, "mcrypt_encrypt"); } Variant HHVM_FUNCTION(mcrypt_decrypt, const String& cipher, const String& key, const String& data, const String& mode, const Variant& viv /* = null_string */) { String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, mode, iv, true, "mcrypt_decrypt"); } Variant HHVM_FUNCTION(mcrypt_cbc, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_cbc() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "cbc", iv, mode.toInt32(), "mcrypt_cbc"); } Variant HHVM_FUNCTION(mcrypt_cfb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_cfb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "cfb", iv, mode.toInt32(), "mcrypt_cfb"); } Variant HHVM_FUNCTION(mcrypt_ecb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_ecb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "ecb", iv, mode.toInt32(), "mcrypt_ecb"); } Variant HHVM_FUNCTION(mcrypt_ofb, const String& cipher, const String& key, const String& data, const Variant& mode, const Variant& viv /* = null_string */) { raise_deprecated("Function mcrypt_ofb() is deprecated"); String iv = viv.toString(); return php_mcrypt_do_crypt(cipher, key, data, "ofb", iv, mode.toInt32(), "mcrypt_ofb"); } Variant HHVM_FUNCTION(mcrypt_get_block_size, const String& cipher, const String& mode) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)mode.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_block_size"); return false; } int64_t ret = mcrypt_enc_get_block_size(td); mcrypt_module_close(td); return ret; } Variant HHVM_FUNCTION(mcrypt_get_cipher_name, const String& cipher) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)"ecb", (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)"stream", (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_cipher_name"); return false; } } char *cipher_name = mcrypt_enc_get_algorithms_name(td); mcrypt_module_close(td); String ret(cipher_name, CopyString); mcrypt_free(cipher_name); return ret; } Variant HHVM_FUNCTION(mcrypt_get_iv_size, const String& cipher, const String& mode) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)mode.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_iv_size"); return false; } int64_t ret = mcrypt_enc_get_iv_size(td); mcrypt_module_close(td); return ret; } Variant HHVM_FUNCTION(mcrypt_get_key_size, const String& cipher, const String& module) { MCRYPT td = mcrypt_module_open((char*)cipher.data(), (char*)MCG(algorithms_dir).data(), (char*)module.data(), (char*)MCG(modes_dir).data()); if (td == MCRYPT_FAILED) { MCRYPT_OPEN_MODULE_FAILED("mcrypt_get_key_size"); return false; } int64_t ret = mcrypt_enc_get_key_size(td); mcrypt_module_close(td); return ret; } Variant HHVM_FUNCTION(mcrypt_enc_get_algorithms_name, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } char *name = mcrypt_enc_get_algorithms_name(pm->m_td); String ret(name, CopyString); mcrypt_free(name); return ret; } Variant HHVM_FUNCTION(mcrypt_enc_get_block_size, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_get_block_size(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_enc_get_iv_size, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_get_iv_size(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_enc_get_key_size, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_get_key_size(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_enc_get_modes_name, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } char *name = mcrypt_enc_get_modes_name(pm->m_td); String ret(name, CopyString); mcrypt_free(name); return ret; } Variant HHVM_FUNCTION(mcrypt_enc_get_supported_key_sizes, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int count = 0; int *key_sizes = mcrypt_enc_get_supported_key_sizes(pm->m_td, &count); Array ret = Array::Create(); for (int i = 0; i < count; i++) { ret.append(key_sizes[i]); } mcrypt_free(key_sizes); return ret; } bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm_mode, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_is_block_algorithm_mode(pm->m_td) == 1; } bool HHVM_FUNCTION(mcrypt_enc_is_block_algorithm, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_is_block_algorithm(pm->m_td) == 1; } bool HHVM_FUNCTION(mcrypt_enc_is_block_mode, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_is_block_mode(pm->m_td) == 1; } Variant HHVM_FUNCTION(mcrypt_enc_self_test, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } return mcrypt_enc_self_test(pm->m_td); } Variant HHVM_FUNCTION(mcrypt_generic_init, const Resource& td, const String& key, const String& iv) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } int max_key_size = mcrypt_enc_get_key_size(pm->m_td); int iv_size = mcrypt_enc_get_iv_size(pm->m_td); if (key.empty()) { raise_warning("Key size is 0"); } unsigned char *key_s = (unsigned char *)malloc(key.size()); memset(key_s, 0, key.size()); unsigned char *iv_s = (unsigned char *)malloc(iv_size + 1); memset(iv_s, 0, iv_size + 1); int key_size; if (key.size() > max_key_size) { raise_warning("Key size too large; supplied length: %d, max: %d", key.size(), max_key_size); key_size = max_key_size; } else { key_size = key.size(); } memcpy(key_s, key.data(), key.size()); if (iv.size() != iv_size) { raise_warning("Iv size incorrect; supplied length: %d, needed: %d", iv.size(), iv_size); } memcpy(iv_s, iv.data(), std::min(iv_size, iv.size())); mcrypt_generic_deinit(pm->m_td); int result = mcrypt_generic_init(pm->m_td, key_s, key_size, iv_s); /* If this function fails, close the mcrypt module to prevent crashes * when further functions want to access this resource */ if (result < 0) { pm->close(); switch (result) { case -3: raise_warning("Key length incorrect"); break; case -4: raise_warning("Memory allocation error"); break; case -1: default: raise_warning("Unknown error"); break; } } else { pm->m_init = true; } free(iv_s); free(key_s); return result; } Variant HHVM_FUNCTION(mcrypt_generic, const Resource& td, const String& data) { return mcrypt_generic(td, data, false); } Variant HHVM_FUNCTION(mdecrypt_generic, const Resource& td, const String& data) { return mcrypt_generic(td, data, true); } bool HHVM_FUNCTION(mcrypt_generic_deinit, const Resource& td) { auto pm = get_valid_mcrypt_resource(td); if (!pm) { return false; } if (mcrypt_generic_deinit(pm->m_td) < 0) { raise_warning("Could not terminate encryption specifier"); return false; } pm->m_init = false; return true; } bool HHVM_FUNCTION(mcrypt_generic_end, const Resource& td) { return HHVM_FUNCTION(mcrypt_generic_deinit, td); } /////////////////////////////////////////////////////////////////////////////// struct McryptExtension final : Extension { McryptExtension() : Extension("mcrypt") {} void moduleInit() override { HHVM_RC_STR(MCRYPT_3DES, "tripledes"); HHVM_RC_STR(MCRYPT_ARCFOUR, "arcfour"); HHVM_RC_STR(MCRYPT_ARCFOUR_IV, "arcfour-iv"); HHVM_RC_STR(MCRYPT_BLOWFISH, "blowfish"); HHVM_RC_STR(MCRYPT_BLOWFISH_COMPAT, "blowfish-compat"); HHVM_RC_STR(MCRYPT_CAST_128, "cast-128"); HHVM_RC_STR(MCRYPT_CAST_256, "cast-256"); HHVM_RC_STR(MCRYPT_CRYPT, "crypt"); HHVM_RC_INT(MCRYPT_DECRYPT, 1); HHVM_RC_STR(MCRYPT_DES, "des"); HHVM_RC_INT(MCRYPT_DEV_RANDOM, RANDOM); HHVM_RC_INT(MCRYPT_DEV_URANDOM, URANDOM); HHVM_RC_INT(MCRYPT_ENCRYPT, 0); HHVM_RC_STR(MCRYPT_ENIGNA, "crypt"); HHVM_RC_STR(MCRYPT_GOST, "gost"); HHVM_RC_STR(MCRYPT_IDEA, "idea"); HHVM_RC_STR(MCRYPT_LOKI97, "loki97"); HHVM_RC_STR(MCRYPT_MARS, "mars"); HHVM_RC_STR(MCRYPT_MODE_CBC, "cbc"); HHVM_RC_STR(MCRYPT_MODE_CFB, "cfb"); HHVM_RC_STR(MCRYPT_MODE_ECB, "ecb"); HHVM_RC_STR(MCRYPT_MODE_NOFB, "nofb"); HHVM_RC_STR(MCRYPT_MODE_OFB, "ofb"); HHVM_RC_STR(MCRYPT_MODE_STREAM, "stream"); HHVM_RC_STR(MCRYPT_PANAMA, "panama"); HHVM_RC_INT(MCRYPT_RAND, RAND); HHVM_RC_STR(MCRYPT_RC2, "rc2"); HHVM_RC_STR(MCRYPT_RC6, "rc6"); HHVM_RC_STR(MCRYPT_RIJNDAEL_128, "rijndael-128"); HHVM_RC_STR(MCRYPT_RIJNDAEL_192, "rijndael-192"); HHVM_RC_STR(MCRYPT_RIJNDAEL_256, "rijndael-256"); HHVM_RC_STR(MCRYPT_SAFER128, "safer-sk128"); HHVM_RC_STR(MCRYPT_SAFER64, "safer-sk64"); HHVM_RC_STR(MCRYPT_SAFERPLUS, "saferplus"); HHVM_RC_STR(MCRYPT_SERPENT, "serpent"); HHVM_RC_STR(MCRYPT_SKIPJACK, "skipjack"); HHVM_RC_STR(MCRYPT_THREEWAY, "threeway"); HHVM_RC_STR(MCRYPT_TRIPLEDES, "tripledes"); HHVM_RC_STR(MCRYPT_TWOFISH, "twofish"); HHVM_RC_STR(MCRYPT_WAKE, "wake"); HHVM_RC_STR(MCRYPT_XTEA, "xtea"); HHVM_FE(mcrypt_module_open); HHVM_FE(mcrypt_module_close); HHVM_FE(mcrypt_list_algorithms); HHVM_FE(mcrypt_list_modes); HHVM_FE(mcrypt_module_get_algo_block_size); HHVM_FE(mcrypt_module_get_algo_key_size); HHVM_FE(mcrypt_module_get_supported_key_sizes); HHVM_FE(mcrypt_module_is_block_algorithm_mode); HHVM_FE(mcrypt_module_is_block_algorithm); HHVM_FE(mcrypt_module_is_block_mode); HHVM_FE(mcrypt_module_self_test); HHVM_FE(mcrypt_create_iv); HHVM_FE(mcrypt_encrypt); HHVM_FE(mcrypt_decrypt); HHVM_FE(mcrypt_cbc); HHVM_FE(mcrypt_cfb); HHVM_FE(mcrypt_ecb); HHVM_FE(mcrypt_ofb); HHVM_FE(mcrypt_get_block_size); HHVM_FE(mcrypt_get_cipher_name); HHVM_FE(mcrypt_get_iv_size); HHVM_FE(mcrypt_get_key_size); HHVM_FE(mcrypt_enc_get_algorithms_name); HHVM_FE(mcrypt_enc_get_block_size); HHVM_FE(mcrypt_enc_get_iv_size); HHVM_FE(mcrypt_enc_get_key_size); HHVM_FE(mcrypt_enc_get_modes_name); HHVM_FE(mcrypt_enc_get_supported_key_sizes); HHVM_FE(mcrypt_enc_is_block_algorithm_mode); HHVM_FE(mcrypt_enc_is_block_algorithm); HHVM_FE(mcrypt_enc_is_block_mode); HHVM_FE(mcrypt_enc_self_test); HHVM_FE(mcrypt_generic_init); HHVM_FE(mcrypt_generic); HHVM_FE(mdecrypt_generic); HHVM_FE(mcrypt_generic_deinit); HHVM_FE(mcrypt_generic_end); loadSystemlib(); } } s_mcrypt_extension; /////////////////////////////////////////////////////////////////////////////// }
./CrossVul/dataset_final_sorted/CWE-843/cpp/good_4749_0
crossvul-cpp_data_bad_2808_0
/***************************************************************** | | AP4 - sample entries | | Copyright 2002-2008 Axiomatic Systems, LLC | | | This file is part of Bento4/AP4 (MP4 Atom Processing Library). | | Unless you have obtained Bento4 under a difference license, | this version of Bento4 is Bento4|GPL. | Bento4|GPL is free software; you can redistribute it and/or modify | it under the terms of the GNU General Public License as published by | the Free Software Foundation; either version 2, or (at your option) | any later version. | | Bento4|GPL is distributed in the hope that it will be useful, | but WITHOUT ANY WARRANTY; without even the implied warranty of | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | GNU General Public License for more details. | | You should have received a copy of the GNU General Public License | along with Bento4|GPL; see the file COPYING. If not, write to the | Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA | 02111-1307, USA. | ****************************************************************/ /*---------------------------------------------------------------------- | includes +---------------------------------------------------------------------*/ #include "Ap4SampleEntry.h" #include "Ap4Utils.h" #include "Ap4AtomFactory.h" #include "Ap4TimsAtom.h" #include "Ap4SampleDescription.h" #include "Ap4AvccAtom.h" /*---------------------------------------------------------------------- | dynamic cast support +---------------------------------------------------------------------*/ AP4_DEFINE_DYNAMIC_CAST_ANCHOR(AP4_SampleEntry) /*---------------------------------------------------------------------- | AP4_SampleEntry::AP4_SampleEntry +---------------------------------------------------------------------*/ AP4_SampleEntry::AP4_SampleEntry(AP4_Atom::Type format, const AP4_AtomParent* details) : AP4_ContainerAtom(format), m_DataReferenceIndex(1) { m_Reserved1[0] = 0; m_Reserved1[1] = 0; m_Reserved1[2] = 0; m_Reserved1[3] = 0; m_Reserved1[4] = 0; m_Reserved1[5] = 0; m_Size32 += 8; if (details) { details->CopyChildren(*this); } } /*---------------------------------------------------------------------- | AP4_SampleEntry::AP4_SampleEntry +---------------------------------------------------------------------*/ AP4_SampleEntry::AP4_SampleEntry(AP4_Atom::Type format, AP4_Size size) : AP4_ContainerAtom(format, (AP4_UI64)size, false), m_DataReferenceIndex(1) { m_Reserved1[0] = 0; m_Reserved1[1] = 0; m_Reserved1[2] = 0; m_Reserved1[3] = 0; m_Reserved1[4] = 0; m_Reserved1[5] = 0; } /*---------------------------------------------------------------------- | AP4_SampleEntry::AP4_SampleEntry +---------------------------------------------------------------------*/ AP4_SampleEntry::AP4_SampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_ContainerAtom(format, (AP4_UI64)size, false) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_SampleEntry::Clone +---------------------------------------------------------------------*/ AP4_Atom* AP4_SampleEntry::Clone() { return this->AP4_Atom::Clone(); } /*---------------------------------------------------------------------- | AP4_SampleEntry::Read +---------------------------------------------------------------------*/ void AP4_SampleEntry::Read(AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) { // read the fields before the children atoms ReadFields(stream); // read children atoms (ex: esds and maybe others) // NOTE: not all sample entries have children atoms AP4_Size payload_size = (AP4_Size)(GetSize()-GetHeaderSize()); AP4_Size fields_size = GetFieldsSize(); if (payload_size > fields_size) { ReadChildren(atom_factory, stream, payload_size-fields_size); } } /*---------------------------------------------------------------------- | AP4_SampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_SampleEntry::GetFieldsSize() { return 8; } /*---------------------------------------------------------------------- | AP4_SampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::ReadFields(AP4_ByteStream& stream) { stream.Read(m_Reserved1, sizeof(m_Reserved1)); stream.ReadUI16(m_DataReferenceIndex); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // reserved1 result = stream.Write(m_Reserved1, sizeof(m_Reserved1)); if (AP4_FAILED(result)) return result; // data reference index result = stream.WriteUI16(m_DataReferenceIndex); if (AP4_FAILED(result)) return result; return result; } /*---------------------------------------------------------------------- | AP4_SampleEntry::Write +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::Write(AP4_ByteStream& stream) { AP4_Result result; // write the header result = WriteHeader(stream); if (AP4_FAILED(result)) return result; // write the fields result = WriteFields(stream); if (AP4_FAILED(result)) return result; // write the children atoms return m_Children.Apply(AP4_AtomListWriter(stream)); } /*---------------------------------------------------------------------- | AP4_SampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::InspectFields(AP4_AtomInspector& inspector) { inspector.AddField("data_reference_index", m_DataReferenceIndex); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SampleEntry::Inspect +---------------------------------------------------------------------*/ AP4_Result AP4_SampleEntry::Inspect(AP4_AtomInspector& inspector) { // inspect the header InspectHeader(inspector); // inspect the fields InspectFields(inspector); // inspect children m_Children.Apply(AP4_AtomListInspector(inspector)); // finish inspector.EndAtom(); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SampleEntry::OnChildChanged +---------------------------------------------------------------------*/ void AP4_SampleEntry::OnChildChanged(AP4_Atom*) { // recompute our size AP4_UI64 size = GetHeaderSize()+GetFieldsSize(); m_Children.Apply(AP4_AtomSizeAdder(size)); m_Size32 = (AP4_UI32)size; // update our parent if (m_Parent) m_Parent->OnChildChanged(this); } /*---------------------------------------------------------------------- | AP4_SampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_SampleEntry::ToSampleDescription() { return new AP4_SampleDescription(AP4_SampleDescription::TYPE_UNKNOWN, m_Type, this); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::AP4_UnknownSampleEntry +---------------------------------------------------------------------*/ AP4_UnknownSampleEntry::AP4_UnknownSampleEntry(AP4_Atom::Type type, AP4_Size size, AP4_ByteStream& stream) : AP4_SampleEntry(type, size) { if (size > AP4_ATOM_HEADER_SIZE+AP4_SampleEntry::GetFieldsSize()) { m_Payload.SetDataSize(size-(AP4_ATOM_HEADER_SIZE+AP4_SampleEntry::GetFieldsSize())); ReadFields(stream); } } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::AP4_UnknownSampleEntry +---------------------------------------------------------------------*/ AP4_UnknownSampleEntry::AP4_UnknownSampleEntry(AP4_Atom::Type type, AP4_DataBuffer& payload) : AP4_SampleEntry(type), m_Payload(payload) { m_Size32 += payload.GetDataSize(); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::Clone +---------------------------------------------------------------------*/ AP4_Atom* AP4_UnknownSampleEntry::Clone() { return new AP4_UnknownSampleEntry(m_Type, m_Payload); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_UnknownSampleEntry::ToSampleDescription() { return new AP4_UnknownSampleDescription(this); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_UnknownSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize()+m_Payload.GetDataSize(); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_UnknownSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (AP4_FAILED(result)) return result; // read the payload return stream.Read(m_Payload.UseData(), m_Payload.GetDataSize()); } /*---------------------------------------------------------------------- | AP4_UnknownSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_UnknownSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // write the payload return stream.Write(m_Payload.GetData(), m_Payload.GetDataSize()); } /*---------------------------------------------------------------------- | AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry +---------------------------------------------------------------------*/ AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry( AP4_UI32 type, AP4_EsDescriptor* descriptor) : AP4_SampleEntry(type) { if (descriptor) AddChild(new AP4_EsdsAtom(descriptor)); } /*---------------------------------------------------------------------- | AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry +---------------------------------------------------------------------*/ AP4_MpegSystemSampleEntry::AP4_MpegSystemSampleEntry( AP4_UI32 type, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(type, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_MpegSystemSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_MpegSystemSampleEntry::ToSampleDescription() { return new AP4_MpegSystemSampleDescription( AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS))); } /*---------------------------------------------------------------------- | AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry(AP4_EsDescriptor* descriptor) : AP4_MpegSystemSampleEntry(AP4_ATOM_TYPE_MP4S, descriptor) { } /*---------------------------------------------------------------------- | AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4sSampleEntry::AP4_Mp4sSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_MpegSystemSampleEntry(AP4_ATOM_TYPE_MP4S, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_Mp4sSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_Mp4sSampleEntry::ToSampleDescription() { // create a sample description return new AP4_MpegSystemSampleDescription( AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS))); } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::AP4_AudioSampleEntry +---------------------------------------------------------------------*/ AP4_AudioSampleEntry::AP4_AudioSampleEntry(AP4_Atom::Type format, AP4_UI32 sample_rate, AP4_UI16 sample_size, AP4_UI16 channel_count) : AP4_SampleEntry(format), m_QtVersion(0), m_QtRevision(0), m_QtVendor(0), m_ChannelCount(channel_count), m_SampleSize(sample_size), m_QtCompressionId(0), m_QtPacketSize(0), m_SampleRate(sample_rate), m_QtV1SamplesPerPacket(0), m_QtV1BytesPerPacket(0), m_QtV1BytesPerFrame(0), m_QtV1BytesPerSample(0), m_QtV2StructSize(0), m_QtV2SampleRate64(0.0), m_QtV2ChannelCount(0), m_QtV2Reserved(0), m_QtV2BitsPerChannel(0), m_QtV2FormatSpecificFlags(0), m_QtV2BytesPerAudioPacket(0), m_QtV2LPCMFramesPerAudioPacket(0) { m_Size32 += 20; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::AP4_AudioSampleEntry +---------------------------------------------------------------------*/ AP4_AudioSampleEntry::AP4_AudioSampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(format, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_AudioSampleEntry::GetFieldsSize() { AP4_Size size = AP4_SampleEntry::GetFieldsSize()+20; if (m_QtVersion == 1) { size += 16; } else if (m_QtVersion == 2) { size += 36+m_QtV2Extension.GetDataSize(); } return size; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::GetSampleRate +---------------------------------------------------------------------*/ AP4_UI32 AP4_AudioSampleEntry::GetSampleRate() { if (m_QtVersion == 2) { return (AP4_UI32)(m_QtV2SampleRate64); } else { return m_SampleRate>>16; } } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::GetChannelCount +---------------------------------------------------------------------*/ AP4_UI16 AP4_AudioSampleEntry::GetChannelCount() { if (m_QtVersion == 2) { return (AP4_UI16)m_QtV2ChannelCount; } else { return m_ChannelCount; } } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_AudioSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // read the fields of this class stream.ReadUI16(m_QtVersion); stream.ReadUI16(m_QtRevision); stream.ReadUI32(m_QtVendor); stream.ReadUI16(m_ChannelCount); stream.ReadUI16(m_SampleSize); stream.ReadUI16(m_QtCompressionId); stream.ReadUI16(m_QtPacketSize); stream.ReadUI32(m_SampleRate); // if this is a QT V1 entry, read the extension if (m_QtVersion == 1) { stream.ReadUI32(m_QtV1SamplesPerPacket); stream.ReadUI32(m_QtV1BytesPerPacket); stream.ReadUI32(m_QtV1BytesPerFrame); stream.ReadUI32(m_QtV1BytesPerSample); } else if (m_QtVersion == 2) { stream.ReadUI32(m_QtV2StructSize); stream.ReadDouble(m_QtV2SampleRate64); stream.ReadUI32(m_QtV2ChannelCount); stream.ReadUI32(m_QtV2Reserved); stream.ReadUI32(m_QtV2BitsPerChannel); stream.ReadUI32(m_QtV2FormatSpecificFlags); stream.ReadUI32(m_QtV2BytesPerAudioPacket); stream.ReadUI32(m_QtV2LPCMFramesPerAudioPacket); if (m_QtV2StructSize > 72) { unsigned int ext_size = m_QtV2StructSize-72; m_QtV2Extension.SetDataSize(ext_size); stream.Read(m_QtV2Extension.UseData(), ext_size); } m_QtV1SamplesPerPacket = m_QtV1BytesPerPacket = m_QtV1BytesPerFrame = m_QtV1BytesPerSample = 0; } else { m_QtV1SamplesPerPacket = 0; m_QtV1BytesPerPacket = 0; m_QtV1BytesPerFrame = 0; m_QtV1BytesPerSample = 0; m_QtV2StructSize = 0; m_QtV2SampleRate64 = 0.0; m_QtV2ChannelCount = 0; m_QtV2Reserved = 0; m_QtV2BitsPerChannel = 0; m_QtV2FormatSpecificFlags = 0; m_QtV2BytesPerAudioPacket = 0; m_QtV2LPCMFramesPerAudioPacket = 0; } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_AudioSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // QT version result = stream.WriteUI16(m_QtVersion); if (AP4_FAILED(result)) return result; // QT revision result = stream.WriteUI16(m_QtRevision); if (AP4_FAILED(result)) return result; // QT vendor result = stream.WriteUI32(m_QtVendor); if (AP4_FAILED(result)) return result; // channel count result = stream.WriteUI16(m_ChannelCount); if (AP4_FAILED(result)) return result; // sample size result = stream.WriteUI16(m_SampleSize); if (AP4_FAILED(result)) return result; // QT compression ID result = stream.WriteUI16(m_QtCompressionId); if (AP4_FAILED(result)) return result; // QT packet size result = stream.WriteUI16(m_QtPacketSize); if (AP4_FAILED(result)) return result; // sample rate result = stream.WriteUI32(m_SampleRate); if (AP4_FAILED(result)) return result; if (m_QtVersion == 1) { result = stream.WriteUI32(m_QtV1SamplesPerPacket); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_QtV1BytesPerPacket); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_QtV1BytesPerFrame); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_QtV1BytesPerSample); if (AP4_FAILED(result)) return result; } else if (m_QtVersion == 2) { stream.WriteUI32(m_QtV2StructSize); stream.WriteDouble(m_QtV2SampleRate64); stream.WriteUI32(m_QtV2ChannelCount); stream.WriteUI32(m_QtV2Reserved); stream.WriteUI32(m_QtV2BitsPerChannel); stream.WriteUI32(m_QtV2FormatSpecificFlags); stream.WriteUI32(m_QtV2BytesPerAudioPacket); stream.WriteUI32(m_QtV2LPCMFramesPerAudioPacket); if (m_QtV2Extension.GetDataSize()) { stream.Write(m_QtV2Extension.GetData(), m_QtV2Extension.GetDataSize()); } } return result; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_AudioSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // dump the fields from the base class AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("channel_count", GetChannelCount()); inspector.AddField("sample_size", GetSampleSize()); inspector.AddField("sample_rate", GetSampleRate()); if (m_QtVersion) { inspector.AddField("qt_version", m_QtVersion); } return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_AudioSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_AudioSampleEntry::ToSampleDescription() { // create a sample description return new AP4_GenericAudioSampleDescription( m_Type, GetSampleRate(), GetSampleSize(), GetChannelCount(), this); } /*---------------------------------------------------------------------- | AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry +---------------------------------------------------------------------*/ AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry( AP4_UI32 type, AP4_UI32 sample_rate, AP4_UI16 sample_size, AP4_UI16 channel_count, AP4_EsDescriptor* descriptor) : AP4_AudioSampleEntry(type, sample_rate, sample_size, channel_count) { if (descriptor) AddChild(new AP4_EsdsAtom(descriptor)); } /*---------------------------------------------------------------------- | AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry +---------------------------------------------------------------------*/ AP4_MpegAudioSampleEntry::AP4_MpegAudioSampleEntry( AP4_UI32 type, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_AudioSampleEntry(type, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_MpegAudioSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_MpegAudioSampleEntry::ToSampleDescription() { // find the esds atom AP4_EsdsAtom* esds = AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS)); if (esds == NULL) { // check if this is a quicktime style sample description if (m_QtVersion > 0) { esds = AP4_DYNAMIC_CAST(AP4_EsdsAtom, FindChild("wave/esds")); } } // create a sample description return new AP4_MpegAudioSampleDescription(GetSampleRate(), GetSampleSize(), GetChannelCount(), esds); } /*---------------------------------------------------------------------- | AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry(AP4_UI32 sample_rate, AP4_UI16 sample_size, AP4_UI16 channel_count, AP4_EsDescriptor* descriptor) : AP4_MpegAudioSampleEntry(AP4_ATOM_TYPE_MP4A, sample_rate, sample_size, channel_count, descriptor) { } /*---------------------------------------------------------------------- | AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4aSampleEntry::AP4_Mp4aSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_MpegAudioSampleEntry(AP4_ATOM_TYPE_MP4A, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::AP4_VisualSampleEntry +---------------------------------------------------------------------*/ AP4_VisualSampleEntry::AP4_VisualSampleEntry( AP4_Atom::Type format, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, const AP4_AtomParent* details) : AP4_SampleEntry(format, details), m_Predefined1(0), m_Reserved2(0), m_Width(width), m_Height(height), m_HorizResolution(0x00480000), m_VertResolution(0x00480000), m_Reserved3(0), m_FrameCount(1), m_CompressorName(compressor_name), m_Depth(depth), m_Predefined3(0xFFFF) { memset(m_Predefined2, 0, sizeof(m_Predefined2)); m_Size32 += 70; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::AP4_VisualSampleEntry +---------------------------------------------------------------------*/ AP4_VisualSampleEntry::AP4_VisualSampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(format, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_VisualSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize()+70; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_VisualSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // read fields from this class stream.ReadUI16(m_Predefined1); stream.ReadUI16(m_Reserved2); stream.Read(m_Predefined2, sizeof(m_Predefined2)); stream.ReadUI16(m_Width); stream.ReadUI16(m_Height); stream.ReadUI32(m_HorizResolution); stream.ReadUI32(m_VertResolution); stream.ReadUI32(m_Reserved3); stream.ReadUI16(m_FrameCount); char compressor_name[33]; compressor_name[32] = 0; stream.Read(compressor_name, 32); int name_length = compressor_name[0]; if (name_length < 32) { compressor_name[name_length+1] = 0; // force null termination m_CompressorName = &compressor_name[1]; } stream.ReadUI16(m_Depth); stream.ReadUI16(m_Predefined3); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_VisualSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // predefined1 result = stream.WriteUI16(m_Predefined1); if (AP4_FAILED(result)) return result; // reserved2 result = stream.WriteUI16(m_Reserved2); if (AP4_FAILED(result)) return result; // predefined2 result = stream.Write(m_Predefined2, sizeof(m_Predefined2)); if (AP4_FAILED(result)) return result; // width result = stream.WriteUI16(m_Width); if (AP4_FAILED(result)) return result; // height result = stream.WriteUI16(m_Height); if (AP4_FAILED(result)) return result; // horizontal resolution result = stream.WriteUI32(m_HorizResolution); if (AP4_FAILED(result)) return result; // vertical resolution result = stream.WriteUI32(m_VertResolution); if (AP4_FAILED(result)) return result; // reserved3 result = stream.WriteUI32(m_Reserved3); if (AP4_FAILED(result)) return result; // frame count result = stream.WriteUI16(m_FrameCount); if (AP4_FAILED(result)) return result; // compressor name unsigned char compressor_name[32]; unsigned int name_length = m_CompressorName.GetLength(); if (name_length > 31) name_length = 31; compressor_name[0] = (unsigned char)name_length; for (unsigned int i=0; i<name_length; i++) { compressor_name[i+1] = m_CompressorName[i]; } for (unsigned int i=name_length+1; i<32; i++) { compressor_name[i] = 0; } result = stream.Write(compressor_name, 32); if (AP4_FAILED(result)) return result; // depth result = stream.WriteUI16(m_Depth); if (AP4_FAILED(result)) return result; // predefined3 result = stream.WriteUI16(m_Predefined3); if (AP4_FAILED(result)) return result; return result; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_VisualSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // dump the fields of the base class AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("width", m_Width); inspector.AddField("height", m_Height); inspector.AddField("compressor", m_CompressorName.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_VisualSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_VisualSampleEntry::ToSampleDescription() { // create a sample description return new AP4_GenericVideoSampleDescription( m_Type, m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), this); } /*---------------------------------------------------------------------- | AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry +---------------------------------------------------------------------*/ AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry( AP4_UI32 type, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, AP4_EsDescriptor* descriptor) : AP4_VisualSampleEntry(type, width, height, depth, compressor_name) { if (descriptor) AddChild(new AP4_EsdsAtom(descriptor)); } /*---------------------------------------------------------------------- | AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry +---------------------------------------------------------------------*/ AP4_MpegVideoSampleEntry::AP4_MpegVideoSampleEntry( AP4_UI32 type, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_VisualSampleEntry(type, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_MpegVideoSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_MpegVideoSampleEntry::ToSampleDescription() { // create a sample description return new AP4_MpegVideoSampleDescription( m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), AP4_DYNAMIC_CAST(AP4_EsdsAtom, GetChild(AP4_ATOM_TYPE_ESDS))); } /*---------------------------------------------------------------------- | AP4_Mp4vSampleEntry::AP4_Mp4vSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4vSampleEntry::AP4_Mp4vSampleEntry(AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, AP4_EsDescriptor* descriptor) : AP4_MpegVideoSampleEntry(AP4_ATOM_TYPE_MP4V, width, height, depth, compressor_name, descriptor) { } /*---------------------------------------------------------------------- | AP4_Mp4vSampleEntry::AP4_Mp4aSampleEntry +---------------------------------------------------------------------*/ AP4_Mp4vSampleEntry::AP4_Mp4vSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_MpegVideoSampleEntry(AP4_ATOM_TYPE_MP4V, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_AvcSampleEntry::AP4_AvcSSampleEntry +---------------------------------------------------------------------*/ AP4_AvcSampleEntry::AP4_AvcSampleEntry(AP4_UI32 format, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, const AP4_AtomParent* details) : AP4_VisualSampleEntry(format, width, height, depth, compressor_name, details) { } /*---------------------------------------------------------------------- | AP4_AvcSampleEntry::AP4_AvcSampleEntry +---------------------------------------------------------------------*/ AP4_AvcSampleEntry::AP4_AvcSampleEntry(AP4_UI32 format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_VisualSampleEntry(format, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_HevcSampleEntry::AP4_HevcSSampleEntry +---------------------------------------------------------------------*/ AP4_HevcSampleEntry::AP4_HevcSampleEntry(AP4_UI32 format, AP4_UI16 width, AP4_UI16 height, AP4_UI16 depth, const char* compressor_name, const AP4_AtomParent* details) : AP4_VisualSampleEntry(format, width, height, depth, compressor_name, details) { } /*---------------------------------------------------------------------- | AP4_AvcSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_AvcSampleEntry::ToSampleDescription() { return new AP4_AvcSampleDescription( m_Type, m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), this); } /*---------------------------------------------------------------------- | AP4_HevcSampleEntry::AP4_HevcSampleEntry +---------------------------------------------------------------------*/ AP4_HevcSampleEntry::AP4_HevcSampleEntry(AP4_UI32 format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_VisualSampleEntry(format, size, stream, atom_factory) { } /*---------------------------------------------------------------------- | AP4_HevcSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_HevcSampleEntry::ToSampleDescription() { return new AP4_HevcSampleDescription( m_Type, m_Width, m_Height, m_Depth, m_CompressorName.GetChars(), this); } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry +---------------------------------------------------------------------*/ AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry(AP4_UI16 hint_track_version, AP4_UI16 highest_compatible_version, AP4_UI32 max_packet_size, AP4_UI32 timescale): AP4_SampleEntry(AP4_ATOM_TYPE_RTP_), m_HintTrackVersion(hint_track_version), m_HighestCompatibleVersion(highest_compatible_version), m_MaxPacketSize(max_packet_size) { // build an atom for timescale AddChild(new AP4_TimsAtom(timescale)); } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry +---------------------------------------------------------------------*/ AP4_RtpHintSampleEntry::AP4_RtpHintSampleEntry(AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory): AP4_SampleEntry(AP4_ATOM_TYPE_RTP_, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_RtpHintSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize()+8; } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_RtpHintSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // data result = stream.ReadUI16(m_HintTrackVersion); if (AP4_FAILED(result)) return result; result = stream.ReadUI16(m_HighestCompatibleVersion); if (AP4_FAILED(result)) return result; result = stream.ReadUI32(m_MaxPacketSize); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_RtpHintSampleEntry::WriteFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // data result = stream.WriteUI16(m_HintTrackVersion); if (AP4_FAILED(result)) return result; result = stream.WriteUI16(m_HighestCompatibleVersion); if (AP4_FAILED(result)) return result; result = stream.WriteUI32(m_MaxPacketSize); if (AP4_FAILED(result)) return result; return result; } /*---------------------------------------------------------------------- | AP4_RtpHintSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_RtpHintSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // sample entry AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("hint_track_version", m_HintTrackVersion); inspector.AddField("highest_compatible_version", m_HighestCompatibleVersion); inspector.AddField("max_packet_size", m_MaxPacketSize); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry +---------------------------------------------------------------------*/ AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry( AP4_Atom::Type format, const char* namespce, const char* schema_location, const char* image_mime_type) : AP4_SampleEntry(format), m_Namespace(namespce), m_SchemaLocation(schema_location), m_ImageMimeType(image_mime_type) { SetSize(m_Size32+m_Namespace.GetLength()+1+m_SchemaLocation.GetLength()+1+m_ImageMimeType.GetLength()+1); } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry +---------------------------------------------------------------------*/ AP4_SubtitleSampleEntry::AP4_SubtitleSampleEntry(AP4_Atom::Type format, AP4_Size size, AP4_ByteStream& stream, AP4_AtomFactory& atom_factory) : AP4_SampleEntry(format, size) { Read(stream, atom_factory); } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::GetFieldsSize +---------------------------------------------------------------------*/ AP4_Size AP4_SubtitleSampleEntry::GetFieldsSize() { return AP4_SampleEntry::GetFieldsSize() + 3 + m_Namespace.GetLength() + m_SchemaLocation.GetLength() + m_ImageMimeType.GetLength(); } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::ReadFields +---------------------------------------------------------------------*/ AP4_Result AP4_SubtitleSampleEntry::ReadFields(AP4_ByteStream& stream) { // sample entry AP4_Result result = AP4_SampleEntry::ReadFields(stream); if (result < 0) return result; // read fields from this class result = stream.ReadNullTerminatedString(m_Namespace); if (AP4_FAILED(result)) return result; result = stream.ReadNullTerminatedString(m_SchemaLocation); if (AP4_FAILED(result)) return result; result = stream.ReadNullTerminatedString(m_ImageMimeType); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::WriteFields +---------------------------------------------------------------------*/ AP4_Result AP4_SubtitleSampleEntry::WriteFields(AP4_ByteStream& stream) { AP4_Result result; // write the fields of the base class result = AP4_SampleEntry::WriteFields(stream); if (AP4_FAILED(result)) return result; // write fields from this class result = stream.WriteString(m_Namespace.GetChars()); if (AP4_FAILED(result)) return result; result = stream.WriteUI08(0); if (AP4_FAILED(result)) return result; result = stream.WriteString(m_SchemaLocation.GetChars()); if (AP4_FAILED(result)) return result; result = stream.WriteUI08(0); if (AP4_FAILED(result)) return result; result = stream.WriteString(m_ImageMimeType.GetChars()); if (AP4_FAILED(result)) return result; result = stream.WriteUI08(0); if (AP4_FAILED(result)) return result; return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::InspectFields +---------------------------------------------------------------------*/ AP4_Result AP4_SubtitleSampleEntry::InspectFields(AP4_AtomInspector& inspector) { // dump the fields of the base class AP4_SampleEntry::InspectFields(inspector); // fields inspector.AddField("namespace", m_Namespace.GetChars()); inspector.AddField("schema_location", m_SchemaLocation.GetChars()); inspector.AddField("image_mime_type", m_ImageMimeType.GetChars()); return AP4_SUCCESS; } /*---------------------------------------------------------------------- | AP4_SubtitleSampleEntry::ToSampleDescription +---------------------------------------------------------------------*/ AP4_SampleDescription* AP4_SubtitleSampleEntry::ToSampleDescription() { // create a sample description return new AP4_SubtitleSampleDescription(m_Type, m_Namespace.GetChars(), m_SchemaLocation.GetChars(), m_ImageMimeType.GetChars()); }
./CrossVul/dataset_final_sorted/CWE-843/cpp/bad_2808_0
crossvul-cpp_data_good_4255_1
/* * Copyright (c) Facebook, Inc. and its affiliates. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include "hermes/VM/JSObject.h" #include "hermes/VM/BuildMetadata.h" #include "hermes/VM/Callable.h" #include "hermes/VM/HostModel.h" #include "hermes/VM/InternalProperty.h" #include "hermes/VM/JSArray.h" #include "hermes/VM/JSDate.h" #include "hermes/VM/JSProxy.h" #include "hermes/VM/Operations.h" #include "hermes/VM/StringView.h" #include "llvh/ADT/SmallSet.h" namespace hermes { namespace vm { ObjectVTable JSObject::vt{ VTable( CellKind::ObjectKind, cellSize<JSObject>(), nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, // externalMemorySize VTable::HeapSnapshotMetadata{HeapSnapshot::NodeType::Object, JSObject::_snapshotNameImpl, JSObject::_snapshotAddEdgesImpl, nullptr, JSObject::_snapshotAddLocationsImpl}), JSObject::_getOwnIndexedRangeImpl, JSObject::_haveOwnIndexedImpl, JSObject::_getOwnIndexedPropertyFlagsImpl, JSObject::_getOwnIndexedImpl, JSObject::_setOwnIndexedImpl, JSObject::_deleteOwnIndexedImpl, JSObject::_checkAllOwnIndexedImpl, }; void ObjectBuildMeta(const GCCell *cell, Metadata::Builder &mb) { // This call is just for debugging and consistency purposes. mb.addJSObjectOverlapSlots(JSObject::numOverlapSlots<JSObject>()); const auto *self = static_cast<const JSObject *>(cell); mb.addField("parent", &self->parent_); mb.addField("class", &self->clazz_); mb.addField("propStorage", &self->propStorage_); // Declare the direct properties. static const char *directPropName[JSObject::DIRECT_PROPERTY_SLOTS] = { "directProp0", "directProp1", "directProp2", "directProp3"}; for (unsigned i = mb.getJSObjectOverlapSlots(); i < JSObject::DIRECT_PROPERTY_SLOTS; ++i) { mb.addField(directPropName[i], self->directProps() + i); } } #ifdef HERMESVM_SERIALIZE void JSObject::serializeObjectImpl( Serializer &s, const GCCell *cell, unsigned overlapSlots) { auto *self = vmcast<const JSObject>(cell); s.writeData(&self->flags_, sizeof(ObjectFlags)); s.writeRelocation(self->parent_.get(s.getRuntime())); s.writeRelocation(self->clazz_.get(s.getRuntime())); // propStorage_ : GCPointer<PropStorage> is also ArrayStorage. Serialize // *propStorage_ with this JSObject. bool hasArray = (bool)self->propStorage_; s.writeInt<uint8_t>(hasArray); if (hasArray) { ArrayStorage::serializeArrayStorage( s, self->propStorage_.get(s.getRuntime())); } // Record the number of overlap slots, so that the deserialization code // doesn't need to keep track of it. s.writeInt<uint8_t>(overlapSlots); for (size_t i = overlapSlots; i < JSObject::DIRECT_PROPERTY_SLOTS; i++) { s.writeHermesValue(self->directProps()[i]); } } void ObjectSerialize(Serializer &s, const GCCell *cell) { JSObject::serializeObjectImpl(s, cell, JSObject::numOverlapSlots<JSObject>()); s.endObject(cell); } void ObjectDeserialize(Deserializer &d, CellKind kind) { assert(kind == CellKind::ObjectKind && "Expected JSObject"); void *mem = d.getRuntime()->alloc</*fixedSize*/ true>(cellSize<JSObject>()); auto *obj = new (mem) JSObject(d, &JSObject::vt.base); d.endObject(obj); } JSObject::JSObject(Deserializer &d, const VTable *vtp) : GCCell(&d.getRuntime()->getHeap(), vtp) { d.readData(&flags_, sizeof(ObjectFlags)); d.readRelocation(&parent_, RelocationKind::GCPointer); d.readRelocation(&clazz_, RelocationKind::GCPointer); if (d.readInt<uint8_t>()) { propStorage_.set( d.getRuntime(), ArrayStorage::deserializeArrayStorage(d), &d.getRuntime()->getHeap()); } auto overlapSlots = d.readInt<uint8_t>(); for (size_t i = overlapSlots; i < JSObject::DIRECT_PROPERTY_SLOTS; i++) { d.readHermesValue(&directProps()[i]); } } #endif PseudoHandle<JSObject> JSObject::create( Runtime *runtime, Handle<JSObject> parentHandle) { JSObjectAlloc<JSObject> mem{runtime}; return mem.initToPseudoHandle(new (mem) JSObject( runtime, &vt.base, *parentHandle, runtime->getHiddenClassForPrototypeRaw( *parentHandle, numOverlapSlots<JSObject>() + ANONYMOUS_PROPERTY_SLOTS), GCPointerBase::NoBarriers())); } PseudoHandle<JSObject> JSObject::create(Runtime *runtime) { JSObjectAlloc<JSObject> mem{runtime}; JSObject *objProto = runtime->objectPrototypeRawPtr; return mem.initToPseudoHandle(new (mem) JSObject( runtime, &vt.base, objProto, runtime->getHiddenClassForPrototypeRaw( objProto, numOverlapSlots<JSObject>() + ANONYMOUS_PROPERTY_SLOTS), GCPointerBase::NoBarriers())); } PseudoHandle<JSObject> JSObject::create( Runtime *runtime, unsigned propertyCount) { JSObjectAlloc<JSObject> mem{runtime}; JSObject *objProto = runtime->objectPrototypeRawPtr; auto self = mem.initToPseudoHandle(new (mem) JSObject( runtime, &vt.base, objProto, runtime->getHiddenClassForPrototypeRaw( objProto, numOverlapSlots<JSObject>() + ANONYMOUS_PROPERTY_SLOTS), GCPointerBase::NoBarriers())); return runtime->ignoreAllocationFailure( JSObject::allocatePropStorage(std::move(self), runtime, propertyCount)); } PseudoHandle<JSObject> JSObject::create( Runtime *runtime, Handle<HiddenClass> clazz) { auto obj = JSObject::create(runtime, clazz->getNumProperties()); obj->clazz_.set(runtime, *clazz, &runtime->getHeap()); // If the hidden class has index like property, we need to clear the fast path // flag. if (LLVM_UNLIKELY(obj->clazz_.get(runtime)->getHasIndexLikeProperties())) obj->flags_.fastIndexProperties = false; return obj; } void JSObject::initializeLazyObject( Runtime *runtime, Handle<JSObject> lazyObject) { assert(lazyObject->flags_.lazyObject && "object must be lazy"); // object is now assumed to be a regular object. lazyObject->flags_.lazyObject = 0; // only functions can be lazy. assert(vmisa<Callable>(lazyObject.get()) && "unexpected lazy object"); Callable::defineLazyProperties(Handle<Callable>::vmcast(lazyObject), runtime); } ObjectID JSObject::getObjectID(JSObject *self, Runtime *runtime) { if (LLVM_LIKELY(self->flags_.objectID)) return self->flags_.objectID; // Object ID does not yet exist, get next unique global ID.. self->flags_.objectID = runtime->generateNextObjectID(); // Make sure it is not zero. if (LLVM_UNLIKELY(!self->flags_.objectID)) --self->flags_.objectID; return self->flags_.objectID; } CallResult<PseudoHandle<JSObject>> JSObject::getPrototypeOf( PseudoHandle<JSObject> selfHandle, Runtime *runtime) { if (LLVM_LIKELY(!selfHandle->isProxyObject())) { return createPseudoHandle(selfHandle->getParent(runtime)); } return JSProxy::getPrototypeOf( runtime->makeHandle(std::move(selfHandle)), runtime); } namespace { CallResult<bool> proxyOpFlags( Runtime *runtime, PropOpFlags opFlags, const char *msg, CallResult<bool> res) { if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*res && opFlags.getThrowOnError()) { return runtime->raiseTypeError(msg); } return res; } } // namespace CallResult<bool> JSObject::setParent( JSObject *self, Runtime *runtime, JSObject *parent, PropOpFlags opFlags) { if (LLVM_UNLIKELY(self->isProxyObject())) { return proxyOpFlags( runtime, opFlags, "Object is not extensible.", JSProxy::setPrototypeOf( runtime->makeHandle(self), runtime, runtime->makeHandle(parent))); } // ES9 9.1.2 // 4. if (self->parent_.get(runtime) == parent) return true; // 5. if (!self->isExtensible()) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError("Object is not extensible."); } else { return false; } } // 6-8. Check for a prototype cycle. for (JSObject *cur = parent; cur; cur = cur->parent_.get(runtime)) { if (cur == self) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError("Prototype cycle detected"); } else { return false; } } else if (LLVM_UNLIKELY(cur->isProxyObject())) { // TODO this branch should also be used for module namespace and // immutable prototype exotic objects. break; } } // 9. self->parent_.set(runtime, parent, &runtime->getHeap()); // 10. return true; } void JSObject::allocateNewSlotStorage( Handle<JSObject> selfHandle, Runtime *runtime, SlotIndex newSlotIndex, Handle<> valueHandle) { // If it is a direct property, just store the value and we are done. if (LLVM_LIKELY(newSlotIndex < DIRECT_PROPERTY_SLOTS)) { selfHandle->directProps()[newSlotIndex].set( *valueHandle, &runtime->getHeap()); return; } // Make the slot index relative to the indirect storage. newSlotIndex -= DIRECT_PROPERTY_SLOTS; // Allocate a new property storage if not already allocated. if (LLVM_UNLIKELY(!selfHandle->propStorage_)) { // Allocate new storage. assert(newSlotIndex == 0 && "allocated slot must be at end"); auto arrRes = runtime->ignoreAllocationFailure( PropStorage::create(runtime, DEFAULT_PROPERTY_CAPACITY)); selfHandle->propStorage_.set( runtime, vmcast<PropStorage>(arrRes), &runtime->getHeap()); } else if (LLVM_UNLIKELY( newSlotIndex >= selfHandle->propStorage_.get(runtime)->capacity())) { // Reallocate the existing one. assert( newSlotIndex == selfHandle->propStorage_.get(runtime)->size() && "allocated slot must be at end"); auto hnd = runtime->makeMutableHandle(selfHandle->propStorage_); PropStorage::resize(hnd, runtime, newSlotIndex + 1); selfHandle->propStorage_.set(runtime, *hnd, &runtime->getHeap()); } { NoAllocScope scope{runtime}; auto *const propStorage = selfHandle->propStorage_.getNonNull(runtime); if (newSlotIndex >= propStorage->size()) { assert( newSlotIndex == propStorage->size() && "allocated slot must be at end"); PropStorage::resizeWithinCapacity(propStorage, runtime, newSlotIndex + 1); } // If we don't need to resize, just store it directly. propStorage->at(newSlotIndex).set(*valueHandle, &runtime->getHeap()); } } CallResult<PseudoHandle<>> JSObject::getNamedPropertyValue_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<JSObject> propObj, NamedPropertyDescriptor desc) { assert( !selfHandle->flags_.proxyObject && !propObj->flags_.proxyObject && "getNamedPropertyValue_RJS cannot be used with proxy objects"); if (LLVM_LIKELY(!desc.flags.accessor)) return createPseudoHandle(getNamedSlotValue(propObj.get(), runtime, desc)); auto *accessor = vmcast<PropertyAccessor>(getNamedSlotValue(propObj.get(), runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return accessor->getter.get(runtime)->executeCall0( runtime->makeHandle(accessor->getter), runtime, selfHandle); } CallResult<PseudoHandle<>> JSObject::getComputedPropertyValue_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<JSObject> propObj, ComputedPropertyDescriptor desc) { assert( !selfHandle->flags_.proxyObject && !propObj->flags_.proxyObject && "getComputedPropertyValue_RJS cannot be used with proxy objects"); if (LLVM_LIKELY(!desc.flags.accessor)) return createPseudoHandle( getComputedSlotValue(propObj.get(), runtime, desc)); auto *accessor = vmcast<PropertyAccessor>( getComputedSlotValue(propObj.get(), runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return accessor->getter.get(runtime)->executeCall0( runtime->makeHandle(accessor->getter), runtime, selfHandle); } CallResult<PseudoHandle<>> JSObject::getComputedPropertyValue_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<JSObject> propObj, ComputedPropertyDescriptor desc, Handle<> nameValHandle) { if (!propObj) { return createPseudoHandle(HermesValue::encodeEmptyValue()); } if (LLVM_LIKELY(!desc.flags.proxyObject)) { return JSObject::getComputedPropertyValue_RJS( selfHandle, runtime, propObj, desc); } CallResult<Handle<>> keyRes = toPropertyKey(runtime, nameValHandle); if (LLVM_UNLIKELY(keyRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } CallResult<bool> hasRes = JSProxy::hasComputed(propObj, runtime, *keyRes); if (LLVM_UNLIKELY(hasRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*hasRes) { return createPseudoHandle(HermesValue::encodeEmptyValue()); } return JSProxy::getComputed(propObj, runtime, *keyRes, selfHandle); } CallResult<Handle<JSArray>> JSObject::getOwnPropertyKeys( Handle<JSObject> selfHandle, Runtime *runtime, OwnKeysFlags okFlags) { assert( (okFlags.getIncludeNonSymbols() || okFlags.getIncludeSymbols()) && "Can't exclude symbols and strings"); if (LLVM_UNLIKELY( selfHandle->flags_.lazyObject || selfHandle->flags_.proxyObject)) { if (selfHandle->flags_.proxyObject) { CallResult<PseudoHandle<JSArray>> proxyRes = JSProxy::ownPropertyKeys(selfHandle, runtime, okFlags); if (LLVM_UNLIKELY(proxyRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return runtime->makeHandle(std::move(*proxyRes)); } assert(selfHandle->flags_.lazyObject && "descriptor flags are impossible"); initializeLazyObject(runtime, selfHandle); } auto range = getOwnIndexedRange(selfHandle.get(), runtime); // Estimate the capacity of the output array. This estimate is only // reasonable for the non-symbol case. uint32_t capacity = okFlags.getIncludeNonSymbols() ? (selfHandle->clazz_.get(runtime)->getNumProperties() + range.second - range.first) : 0; auto arrayRes = JSArray::create(runtime, capacity, 0); if (LLVM_UNLIKELY(arrayRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto array = runtime->makeHandle(std::move(*arrayRes)); // Optional array of SymbolIDs reported via host object API llvh::Optional<Handle<JSArray>> hostObjectSymbols; size_t hostObjectSymbolCount = 0; // If current object is a host object we need to deduplicate its properties llvh::SmallSet<SymbolID::RawType, 16> dedupSet; // Output index. uint32_t index = 0; // Avoid allocating a new handle per element. MutableHandle<> tmpHandle{runtime}; // Number of indexed properties. uint32_t numIndexed = 0; // Regular properties with names that are array indexes are stashed here, if // encountered. llvh::SmallVector<uint32_t, 8> indexNames{}; // Iterate the named properties excluding those which use Symbols. if (okFlags.getIncludeNonSymbols()) { // Get host object property names if (LLVM_UNLIKELY(selfHandle->flags_.hostObject)) { assert( range.first == range.second && "Host objects cannot own indexed range"); auto hostSymbolsRes = vmcast<HostObject>(selfHandle.get())->getHostPropertyNames(); if (hostSymbolsRes == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } if ((hostObjectSymbolCount = (**hostSymbolsRes)->getEndIndex()) != 0) { Handle<JSArray> hostSymbols = *hostSymbolsRes; hostObjectSymbols = std::move(hostSymbols); capacity += hostObjectSymbolCount; } } // Iterate the indexed properties. GCScopeMarkerRAII marker{runtime}; for (auto i = range.first; i != range.second; ++i) { auto res = getOwnIndexedPropertyFlags(selfHandle.get(), runtime, i); if (!res) continue; // If specified, check whether it is enumerable. if (!okFlags.getIncludeNonEnumerable() && !res->enumerable) continue; tmpHandle = HermesValue::encodeDoubleValue(i); JSArray::setElementAt(array, runtime, index++, tmpHandle); marker.flush(); } numIndexed = index; HiddenClass::forEachProperty( runtime->makeHandle(selfHandle->clazz_), runtime, [runtime, okFlags, array, hostObjectSymbolCount, &index, &indexNames, &tmpHandle, &dedupSet](SymbolID id, NamedPropertyDescriptor desc) { if (!isPropertyNamePrimitive(id)) { return; } // If specified, check whether it is enumerable. if (!okFlags.getIncludeNonEnumerable()) { if (!desc.flags.enumerable) return; } // Host properties might overlap with the ones recognized by the // hidden class. If we're dealing with a host object then keep track // of hidden class properties for the deduplication purposes. if (LLVM_UNLIKELY(hostObjectSymbolCount > 0)) { dedupSet.insert(id.unsafeGetRaw()); } // Check if this property is an integer index. If it is, we stash it // away to deal with it later. This check should be fast since most // property names don't start with a digit. auto propNameAsIndex = toArrayIndex( runtime->getIdentifierTable().getStringView(runtime, id)); if (LLVM_UNLIKELY(propNameAsIndex)) { indexNames.push_back(*propNameAsIndex); return; } tmpHandle = HermesValue::encodeStringValue( runtime->getStringPrimFromSymbolID(id)); JSArray::setElementAt(array, runtime, index++, tmpHandle); }); // Iterate over HostObject properties and append them to the array. Do not // append duplicates. if (LLVM_UNLIKELY(hostObjectSymbols)) { for (size_t i = 0; i < hostObjectSymbolCount; ++i) { assert( (*hostObjectSymbols)->at(runtime, i).isSymbol() && "Host object needs to return array of SymbolIDs"); marker.flush(); SymbolID id = (*hostObjectSymbols)->at(runtime, i).getSymbol(); if (dedupSet.count(id.unsafeGetRaw()) == 0) { dedupSet.insert(id.unsafeGetRaw()); assert( !InternalProperty::isInternal(id) && "host object returned reserved symbol"); auto propNameAsIndex = toArrayIndex( runtime->getIdentifierTable().getStringView(runtime, id)); if (LLVM_UNLIKELY(propNameAsIndex)) { indexNames.push_back(*propNameAsIndex); continue; } tmpHandle = HermesValue::encodeStringValue( runtime->getStringPrimFromSymbolID(id)); JSArray::setElementAt(array, runtime, index++, tmpHandle); } } } } // Now iterate the named properties again, including only Symbols. // We could iterate only once, if we chose to ignore (and disallow) // own properties on HostObjects, as we do with Proxies. if (okFlags.getIncludeSymbols()) { MutableHandle<SymbolID> idHandle{runtime}; HiddenClass::forEachProperty( runtime->makeHandle(selfHandle->clazz_), runtime, [runtime, okFlags, array, &index, &idHandle]( SymbolID id, NamedPropertyDescriptor desc) { if (!isSymbolPrimitive(id)) { return; } // If specified, check whether it is enumerable. if (!okFlags.getIncludeNonEnumerable()) { if (!desc.flags.enumerable) return; } idHandle = id; JSArray::setElementAt(array, runtime, index++, idHandle); }); } // The end (exclusive) of the named properties. uint32_t endNamed = index; // Properly set the length of the array. auto cr = JSArray::setLength( array, runtime, endNamed + indexNames.size(), PropOpFlags{}); (void)cr; assert( cr != ExecutionStatus::EXCEPTION && *cr && "JSArray::setLength() failed"); // If we have no index-like names, we are done. if (LLVM_LIKELY(indexNames.empty())) return array; // In the unlikely event that we encountered index-like names, we need to sort // them and merge them with the real indexed properties. Note that it is // guaranteed that there are no clashes. std::sort(indexNames.begin(), indexNames.end()); // Also make space for the new elements by shifting all the named properties // to the right. First, resize the array. JSArray::setStorageEndIndex(array, runtime, endNamed + indexNames.size()); // Shift the non-index property names. The region [numIndexed..endNamed) is // moved to [numIndexed+indexNames.size()..array->size()). // TODO: optimize this by implementing memcpy-like functionality in ArrayImpl. for (uint32_t last = endNamed, toLast = array->getEndIndex(); last != numIndexed;) { --last; --toLast; tmpHandle = array->at(runtime, last); JSArray::setElementAt(array, runtime, toLast, tmpHandle); } // Now we need to merge the indexes in indexNames and the array // [0..numIndexed). We start from the end and copy the larger element from // either array. // 1+ the destination position to copy into. for (uint32_t toLast = numIndexed + indexNames.size(), indexNamesLast = indexNames.size(); toLast != 0;) { if (numIndexed) { uint32_t a = (uint32_t)array->at(runtime, numIndexed - 1).getNumber(); uint32_t b; if (indexNamesLast && (b = indexNames[indexNamesLast - 1]) > a) { tmpHandle = HermesValue::encodeDoubleValue(b); --indexNamesLast; } else { tmpHandle = HermesValue::encodeDoubleValue(a); --numIndexed; } } else { assert(indexNamesLast && "prematurely ran out of source values"); tmpHandle = HermesValue::encodeDoubleValue(indexNames[indexNamesLast - 1]); --indexNamesLast; } --toLast; JSArray::setElementAt(array, runtime, toLast, tmpHandle); } return array; } /// Convert a value to string unless already converted /// \param nameValHandle [Handle<>] the value to convert /// \param str [MutableHandle<StringPrimitive>] the string is stored /// there. Must be initialized to null initially. #define LAZY_TO_STRING(runtime, nameValHandle, str) \ do { \ if (!str) { \ auto status = toString_RJS(runtime, nameValHandle); \ assert( \ status != ExecutionStatus::EXCEPTION && \ "toString() of primitive cannot fail"); \ str = status->get(); \ } \ } while (0) /// Convert a value to an identifier unless already converted /// \param nameValHandle [Handle<>] the value to convert /// \param id [SymbolID] the identifier is stored there. Must be initialized /// to INVALID_IDENTIFIER_ID initially. #define LAZY_TO_IDENTIFIER(runtime, nameValHandle, id) \ do { \ if (id.isInvalid()) { \ CallResult<Handle<SymbolID>> idRes = \ valueToSymbolID(runtime, nameValHandle); \ if (LLVM_UNLIKELY(idRes == ExecutionStatus::EXCEPTION)) { \ return ExecutionStatus::EXCEPTION; \ } \ id = **idRes; \ } \ } while (0) /// Convert a value to array index, if possible. /// \param nameValHandle [Handle<>] the value to convert /// \param str [MutableHandle<StringPrimitive>] the string is stored /// there. Must be initialized to null initially. /// \param arrayIndex [OptValue<uint32_t>] the array index is stored /// there. #define TO_ARRAY_INDEX(runtime, nameValHandle, str, arrayIndex) \ do { \ arrayIndex = toArrayIndexFastPath(*nameValHandle); \ if (!arrayIndex && !nameValHandle->isSymbol()) { \ LAZY_TO_STRING(runtime, nameValHandle, str); \ arrayIndex = toArrayIndex(runtime, str); \ } \ } while (0) /// \return true if the flags of a new property make it suitable for indexed /// storage. All new indexed properties are enumerable, writable and /// configurable and have no accessors. static bool canNewPropertyBeIndexed(DefinePropertyFlags dpf) { return dpf.setEnumerable && dpf.enumerable && dpf.setWritable && dpf.writable && dpf.setConfigurable && dpf.configurable && !dpf.setSetter && !dpf.setGetter; } struct JSObject::Helper { public: LLVM_ATTRIBUTE_ALWAYS_INLINE static ObjectFlags &flags(JSObject *self) { return self->flags_; } LLVM_ATTRIBUTE_ALWAYS_INLINE static OptValue<PropertyFlags> getOwnIndexedPropertyFlags(JSObject *self, Runtime *runtime, uint32_t index) { return JSObject::getOwnIndexedPropertyFlags(self, runtime, index); } LLVM_ATTRIBUTE_ALWAYS_INLINE static NamedPropertyDescriptor &castToNamedPropertyDescriptorRef( ComputedPropertyDescriptor &desc) { return desc.castToNamedPropertyDescriptorRef(); } }; namespace { /// ES5.1 8.12.1. /// A helper which takes a SymbolID which caches the conversion of /// nameValHandle if it's needed. It should be default constructed, /// and may or may not be set. This has been measured to be a useful /// perf win. Note that always_inline seems to be ignored on static /// methods, so this function has to be local to the cpp file in order /// to be inlined for the perf win. LLVM_ATTRIBUTE_ALWAYS_INLINE CallResult<bool> getOwnComputedPrimitiveDescriptorImpl( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, JSObject::IgnoreProxy ignoreProxy, SymbolID &id, ComputedPropertyDescriptor &desc) { assert( !nameValHandle->isObject() && "nameValHandle passed to " "getOwnComputedPrimitiveDescriptor " "cannot be an object"); // Try the fast paths first if we have "fast" index properties and the // property name is an obvious index. if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { if (JSObject::Helper::flags(*selfHandle).fastIndexProperties) { auto res = JSObject::Helper::getOwnIndexedPropertyFlags( selfHandle.get(), runtime, *arrayIndex); if (res) { // This a valid array index, residing in our indexed storage. desc.flags = *res; desc.flags.indexed = 1; desc.slot = *arrayIndex; return true; } // This a valid array index, but we don't have it in our indexed storage, // and we don't have index-like named properties. return false; } if (!selfHandle->getClass(runtime)->getHasIndexLikeProperties() && !selfHandle->isHostObject() && !selfHandle->isLazy() && !selfHandle->isProxyObject()) { // Early return to handle the case where an object definitely has no // index-like properties. This avoids allocating a new StringPrimitive and // uniquing it below. return false; } } // Convert the string to a SymbolID LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); // Look for a named property with this name. if (JSObject::getOwnNamedDescriptor( selfHandle, runtime, id, JSObject::Helper::castToNamedPropertyDescriptorRef(desc))) { return true; } if (LLVM_LIKELY( !JSObject::Helper::flags(*selfHandle).indexedStorage && !selfHandle->isLazy() && !selfHandle->isProxyObject())) { return false; } MutableHandle<StringPrimitive> strPrim{runtime}; // If we have indexed storage, perform potentially expensive conversions // to array index and check it. if (JSObject::Helper::flags(*selfHandle).indexedStorage) { // If the name is a valid integer array index, store it here. OptValue<uint32_t> arrayIndex; // Try to convert the property name to an array index. TO_ARRAY_INDEX(runtime, nameValHandle, strPrim, arrayIndex); if (arrayIndex) { auto res = JSObject::Helper::getOwnIndexedPropertyFlags( selfHandle.get(), runtime, *arrayIndex); if (res) { desc.flags = *res; desc.flags.indexed = 1; desc.slot = *arrayIndex; return true; } } return false; } if (selfHandle->isLazy()) { JSObject::initializeLazyObject(runtime, selfHandle); return JSObject::getOwnComputedPrimitiveDescriptor( selfHandle, runtime, nameValHandle, ignoreProxy, desc); } assert(selfHandle->isProxyObject() && "descriptor flags are impossible"); if (ignoreProxy == JSObject::IgnoreProxy::Yes) { return false; } return JSProxy::getOwnProperty( selfHandle, runtime, nameValHandle, desc, nullptr); } } // namespace CallResult<bool> JSObject::getOwnComputedPrimitiveDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, JSObject::IgnoreProxy ignoreProxy, ComputedPropertyDescriptor &desc) { SymbolID id{}; return getOwnComputedPrimitiveDescriptorImpl( selfHandle, runtime, nameValHandle, ignoreProxy, id, desc); } CallResult<bool> JSObject::getOwnComputedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, ComputedPropertyDescriptor &desc) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return JSObject::getOwnComputedPrimitiveDescriptor( selfHandle, runtime, *converted, IgnoreProxy::No, desc); } CallResult<bool> JSObject::getOwnComputedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, ComputedPropertyDescriptor &desc, MutableHandle<> &valueOrAccessor) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } // The proxy is ignored here so we can avoid calling // JSProxy::getOwnProperty twice on proxies, since // getOwnComputedPrimitiveDescriptor doesn't pass back the // valueOrAccessor. CallResult<bool> res = JSObject::getOwnComputedPrimitiveDescriptor( selfHandle, runtime, *converted, IgnoreProxy::Yes, desc); if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (*res) { valueOrAccessor = getComputedSlotValue(selfHandle.get(), runtime, desc); return true; } if (LLVM_UNLIKELY(selfHandle->isProxyObject())) { return JSProxy::getOwnProperty( selfHandle, runtime, nameValHandle, desc, &valueOrAccessor); } return false; } JSObject *JSObject::getNamedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropertyFlags expectedFlags, NamedPropertyDescriptor &desc) { if (findProperty(selfHandle, runtime, name, expectedFlags, desc)) return *selfHandle; // Check here for host object flag. This means that "normal" own // properties above win over host-defined properties, but there's no // cost imposed on own property lookups. This should do what we // need in practice, and we can define host vs js property // disambiguation however we want. This is here in order to avoid // impacting perf for the common case where an own property exists // in normal storage. if (LLVM_UNLIKELY(selfHandle->flags_.hostObject)) { desc.flags.hostObject = true; desc.flags.writable = true; return *selfHandle; } if (LLVM_UNLIKELY(selfHandle->flags_.lazyObject)) { assert( !selfHandle->flags_.proxyObject && "Proxy objects should never be lazy"); // Initialize the object and perform the lookup again. JSObject::initializeLazyObject(runtime, selfHandle); if (findProperty(selfHandle, runtime, name, expectedFlags, desc)) return *selfHandle; } if (LLVM_UNLIKELY(selfHandle->flags_.proxyObject)) { desc.flags.proxyObject = true; return *selfHandle; } if (selfHandle->parent_) { MutableHandle<JSObject> mutableSelfHandle{ runtime, selfHandle->parent_.getNonNull(runtime)}; do { // Check the most common case first, at the cost of some code duplication. if (LLVM_LIKELY( !mutableSelfHandle->flags_.lazyObject && !mutableSelfHandle->flags_.hostObject && !mutableSelfHandle->flags_.proxyObject)) { findProp: if (findProperty( mutableSelfHandle, runtime, name, PropertyFlags::invalid(), desc)) { assert( !selfHandle->flags_.proxyObject && "Proxy object parents should never have own properties"); return *mutableSelfHandle; } } else if (LLVM_UNLIKELY(mutableSelfHandle->flags_.lazyObject)) { JSObject::initializeLazyObject(runtime, mutableSelfHandle); goto findProp; } else if (LLVM_UNLIKELY(mutableSelfHandle->flags_.hostObject)) { desc.flags.hostObject = true; desc.flags.writable = true; return *mutableSelfHandle; } else { assert( mutableSelfHandle->flags_.proxyObject && "descriptor flags are impossible"); desc.flags.proxyObject = true; return *mutableSelfHandle; } } while ((mutableSelfHandle = mutableSelfHandle->parent_.get(runtime))); } return nullptr; } ExecutionStatus JSObject::getComputedPrimitiveDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, MutableHandle<JSObject> &propObj, ComputedPropertyDescriptor &desc) { assert( !nameValHandle->isObject() && "nameValHandle passed to " "getComputedPrimitiveDescriptor cannot " "be an object"); propObj = selfHandle.get(); SymbolID id{}; GCScopeMarkerRAII marker{runtime}; do { // A proxy is ignored here so we can check the bit later and // return it back to the caller for additional processing. Handle<JSObject> loopHandle = propObj; CallResult<bool> res = getOwnComputedPrimitiveDescriptorImpl( loopHandle, runtime, nameValHandle, IgnoreProxy::Yes, id, desc); if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (*res) { return ExecutionStatus::RETURNED; } if (LLVM_UNLIKELY(propObj->flags_.hostObject)) { desc.flags.hostObject = true; desc.flags.writable = true; return ExecutionStatus::RETURNED; } if (LLVM_UNLIKELY(propObj->flags_.proxyObject)) { desc.flags.proxyObject = true; return ExecutionStatus::RETURNED; } // This isn't a proxy, so use the faster getParent() instead of // getPrototypeOf. propObj = propObj->getParent(runtime); // Flush at the end of the loop to allow first iteration to be as fast as // possible. marker.flush(); } while (propObj); return ExecutionStatus::RETURNED; } ExecutionStatus JSObject::getComputedDescriptor( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, MutableHandle<JSObject> &propObj, ComputedPropertyDescriptor &desc) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return getComputedPrimitiveDescriptor( selfHandle, runtime, *converted, propObj, desc); } CallResult<PseudoHandle<>> JSObject::getNamedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, Handle<> receiver, PropOpFlags opFlags, PropertyCacheEntry *cacheEntry) { NamedPropertyDescriptor desc; // Locate the descriptor. propObj contains the object which may be anywhere // along the prototype chain. JSObject *propObj = getNamedDescriptor(selfHandle, runtime, name, desc); if (!propObj) { if (LLVM_UNLIKELY(opFlags.getMustExist())) { return runtime->raiseReferenceError( TwineChar16("Property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' doesn't exist"); } return createPseudoHandle(HermesValue::encodeUndefinedValue()); } if (LLVM_LIKELY( !desc.flags.accessor && !desc.flags.hostObject && !desc.flags.proxyObject)) { // Populate the cache if requested. if (cacheEntry && !propObj->getClass(runtime)->isDictionaryNoCache()) { cacheEntry->clazz = propObj->getClassGCPtr().getStorageType(); cacheEntry->slot = desc.slot; } return createPseudoHandle(getNamedSlotValue(propObj, runtime, desc)); } if (desc.flags.accessor) { auto *accessor = vmcast<PropertyAccessor>(getNamedSlotValue(propObj, runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return Callable::executeCall0( runtime->makeHandle(accessor->getter), runtime, receiver); } else if (desc.flags.hostObject) { auto res = vmcast<HostObject>(propObj)->get(name); if (LLVM_UNLIKELY(res == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return createPseudoHandle(*res); } else { assert(desc.flags.proxyObject && "descriptor flags are impossible"); return JSProxy::getNamed( runtime->makeHandle(propObj), runtime, name, receiver); } } CallResult<PseudoHandle<>> JSObject::getNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { // Note that getStringView can be satisfied without materializing the // Identifier. const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { return getComputed_RJS( selfHandle, runtime, runtime->makeHandle(HermesValue::encodeNumberValue(*nameAsIndex))); } // Here we have indexed properties but the symbol was not index-like. // Fall through to getNamed(). } return getNamed_RJS(selfHandle, runtime, name, opFlags); } CallResult<PseudoHandle<>> JSObject::getComputedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, Handle<> receiver) { // Try the fast-path first: no "index-like" properties and the "name" already // is a valid integer index. if (selfHandle->flags_.fastIndexProperties) { if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { // Do we have this value present in our array storage? If so, return it. PseudoHandle<> ourValue = createPseudoHandle( getOwnIndexed(selfHandle.get(), runtime, *arrayIndex)); if (LLVM_LIKELY(!ourValue->isEmpty())) return ourValue; } } // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; ComputedPropertyDescriptor desc; // Locate the descriptor. propObj contains the object which may be anywhere // along the prototype chain. MutableHandle<JSObject> propObj{runtime}; if (LLVM_UNLIKELY( getComputedPrimitiveDescriptor( selfHandle, runtime, nameValPrimitiveHandle, propObj, desc) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!propObj) return createPseudoHandle(HermesValue::encodeUndefinedValue()); if (LLVM_LIKELY( !desc.flags.accessor && !desc.flags.hostObject && !desc.flags.proxyObject)) return createPseudoHandle( getComputedSlotValue(propObj.get(), runtime, desc)); if (desc.flags.accessor) { auto *accessor = vmcast<PropertyAccessor>( getComputedSlotValue(propObj.get(), runtime, desc)); if (!accessor->getter) return createPseudoHandle(HermesValue::encodeUndefinedValue()); // Execute the accessor on this object. return accessor->getter.get(runtime)->executeCall0( runtime->makeHandle(accessor->getter), runtime, receiver); } else if (desc.flags.hostObject) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); auto propRes = vmcast<HostObject>(propObj.get())->get(id); if (propRes == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return createPseudoHandle(*propRes); } else { assert(desc.flags.proxyObject && "descriptor flags are impossible"); CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return JSProxy::getComputed(propObj, runtime, *key, receiver); } } CallResult<bool> JSObject::hasNamed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name) { NamedPropertyDescriptor desc; JSObject *propObj = getNamedDescriptor(selfHandle, runtime, name, desc); if (propObj == nullptr) { return false; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { return JSProxy::hasNamed(runtime->makeHandle(propObj), runtime, name); } return true; } CallResult<bool> JSObject::hasNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { if (haveOwnIndexed(selfHandle.get(), runtime, *nameAsIndex)) { return true; } if (selfHandle->flags_.fastIndexProperties) { return false; } } // Here we have indexed properties but the symbol was not stored in the // indexedStorage. // Fall through to getNamed(). } return hasNamed(selfHandle, runtime, name); } CallResult<bool> JSObject::hasComputed( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle) { // Try the fast-path first: no "index-like" properties and the "name" already // is a valid integer index. if (selfHandle->flags_.fastIndexProperties) { if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { // Do we have this value present in our array storage? If so, return true. if (haveOwnIndexed(selfHandle.get(), runtime, *arrayIndex)) { return true; } } } // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; ComputedPropertyDescriptor desc; MutableHandle<JSObject> propObj{runtime}; if (getComputedPrimitiveDescriptor( selfHandle, runtime, nameValPrimitiveHandle, propObj, desc) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } if (!propObj) { return false; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return JSProxy::hasComputed(propObj, runtime, *key); } // For compatibility with polyfills we want to pretend that all HostObject // properties are "own" properties in 'in'. Since there is no way to check for // a HostObject property, we must always assume success. In practice the // property name would have been obtained from enumerating the properties in // JS code that looks something like this: // for(key in hostObj) { // if (key in hostObj) // ... // } return true; } static ExecutionStatus raiseErrorForOverridingStaticBuiltin( Handle<JSObject> selfHandle, Runtime *runtime, Handle<SymbolID> name) { Handle<StringPrimitive> methodNameHnd = runtime->makeHandle(runtime->getStringPrimFromSymbolID(name.get())); // If the 'name' property does not exist or is an accessor, we don't display // the name. NamedPropertyDescriptor desc; auto *obj = JSObject::getNamedDescriptor( selfHandle, runtime, Predefined::getSymbolID(Predefined::name), desc); assert( !selfHandle->isProxyObject() && "raiseErrorForOverridingStaticBuiltin cannot be used with proxy objects"); if (!obj || desc.flags.accessor) { return runtime->raiseTypeError( TwineChar16("Attempting to override read-only builtin method '") + TwineChar16(methodNameHnd.get()) + "'"); } // Display the name property of the builtin object if it is a string. StringPrimitive *objName = dyn_vmcast<StringPrimitive>( JSObject::getNamedSlotValue(selfHandle.get(), runtime, desc)); if (!objName) { return runtime->raiseTypeError( TwineChar16("Attempting to override read-only builtin method '") + TwineChar16(methodNameHnd.get()) + "'"); } return runtime->raiseTypeError( TwineChar16("Attempting to override read-only builtin method '") + TwineChar16(objName) + "." + TwineChar16(methodNameHnd.get()) + "'"); } CallResult<bool> JSObject::putNamedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, Handle<> valueHandle, Handle<> receiver, PropOpFlags opFlags) { NamedPropertyDescriptor desc; // Look for the property in this object or along the prototype chain. JSObject *propObj = getNamedDescriptor( selfHandle, runtime, name, PropertyFlags::defaultNewNamedPropertyFlags(), desc); // If the property exists (or, we hit a proxy/hostobject on the way // up the chain) if (propObj) { // Get the simple case out of the way: If the property already // exists on selfHandle, is not an accessor, selfHandle and // receiver are the same, selfHandle is not a host // object/proxy/internal setter, and the property is writable, // just write into the same slot. if (LLVM_LIKELY( *selfHandle == propObj && selfHandle.getHermesValue().getRaw() == receiver->getRaw() && !desc.flags.accessor && !desc.flags.internalSetter && !desc.flags.hostObject && !desc.flags.proxyObject && desc.flags.writable)) { setNamedSlotValue( *selfHandle, runtime, desc, valueHandle.getHermesValue()); return true; } if (LLVM_UNLIKELY(desc.flags.accessor)) { auto *accessor = vmcast<PropertyAccessor>(getNamedSlotValue(propObj, runtime, desc)); // If it is a read-only accessor, fail. if (!accessor->setter) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Cannot assign to property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' which has only a getter"); } return false; } // Execute the accessor on this object. if (accessor->setter.get(runtime)->executeCall1( runtime->makeHandle(accessor->setter), runtime, receiver, *valueHandle) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } return true; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { assert( !opFlags.getMustExist() && "MustExist cannot be used with Proxy objects"); CallResult<bool> setRes = JSProxy::setNamed( runtime->makeHandle(propObj), runtime, name, valueHandle, receiver); if (LLVM_UNLIKELY(setRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*setRes && opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Proxy set returned false for property '") + runtime->getIdentifierTable().getStringView(runtime, name) + "'"); } return setRes; } if (LLVM_UNLIKELY(!desc.flags.writable)) { if (desc.flags.staticBuiltin) { return raiseErrorForOverridingStaticBuiltin( selfHandle, runtime, runtime->makeHandle(name)); } if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Cannot assign to read-only property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "'"); } return false; } if (*selfHandle == propObj && desc.flags.internalSetter) { return internalSetter( selfHandle, runtime, name, desc, valueHandle, opFlags); } } // The property does not exist as an conventional own property on // this object. MutableHandle<JSObject> receiverHandle{runtime, *selfHandle}; if (selfHandle.getHermesValue().getRaw() != receiver->getRaw() || receiverHandle->isHostObject() || receiverHandle->isProxyObject()) { if (selfHandle.getHermesValue().getRaw() != receiver->getRaw()) { receiverHandle = dyn_vmcast<JSObject>(*receiver); } if (!receiverHandle) { return false; } if (getOwnNamedDescriptor(receiverHandle, runtime, name, desc)) { if (LLVM_UNLIKELY(desc.flags.accessor || !desc.flags.writable)) { return false; } assert( !receiverHandle->isHostObject() && !receiverHandle->isProxyObject() && "getOwnNamedDescriptor never sets hostObject or proxyObject flags"); setNamedSlotValue( *receiverHandle, runtime, desc, valueHandle.getHermesValue()); return true; } // Now deal with host and proxy object cases. We need to call // getOwnComputedPrimitiveDescriptor because it knows how to call // the [[getOwnProperty]] Proxy impl if needed. if (LLVM_UNLIKELY( receiverHandle->isHostObject() || receiverHandle->isProxyObject())) { if (receiverHandle->isHostObject()) { return vmcast<HostObject>(receiverHandle.get()) ->set(name, *valueHandle); } ComputedPropertyDescriptor desc; CallResult<bool> descDefinedRes = getOwnComputedPrimitiveDescriptor( receiverHandle, runtime, name.isUniqued() ? runtime->makeHandle(HermesValue::encodeStringValue( runtime->getStringPrimFromSymbolID(name))) : runtime->makeHandle(name), IgnoreProxy::No, desc); if (LLVM_UNLIKELY(descDefinedRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } DefinePropertyFlags dpf; if (*descDefinedRes) { dpf.setValue = 1; } else { dpf = DefinePropertyFlags::getDefaultNewPropertyFlags(); } return JSProxy::defineOwnProperty( receiverHandle, runtime, name, dpf, valueHandle, opFlags); } } // Does the caller require it to exist? if (LLVM_UNLIKELY(opFlags.getMustExist())) { return runtime->raiseReferenceError( TwineChar16("Property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' doesn't exist"); } // Add a new property. return addOwnProperty( receiverHandle, runtime, name, DefinePropertyFlags::getDefaultNewPropertyFlags(), valueHandle, opFlags); } CallResult<bool> JSObject::putNamedOrIndexed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, Handle<> valueHandle, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->flags_.indexedStorage)) { // Note that getStringView can be satisfied without materializing the // Identifier. const auto strView = runtime->getIdentifierTable().getStringView(runtime, name); if (auto nameAsIndex = toArrayIndex(strView)) { return putComputed_RJS( selfHandle, runtime, runtime->makeHandle(HermesValue::encodeNumberValue(*nameAsIndex)), valueHandle, opFlags); } // Here we have indexed properties but the symbol was not index-like. // Fall through to putNamed(). } return putNamed_RJS(selfHandle, runtime, name, valueHandle, opFlags); } CallResult<bool> JSObject::putComputedWithReceiver_RJS( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, Handle<> valueHandle, Handle<> receiver, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "mustExist flag cannot be used with computed properties"); // Try the fast-path first: has "index-like" properties, the "name" // already is a valid integer index, selfHandle and receiver are the // same, and it is present in storage. if (selfHandle->flags_.fastIndexProperties) { if (auto arrayIndex = toArrayIndexFastPath(*nameValHandle)) { if (selfHandle.getHermesValue().getRaw() == receiver->getRaw()) { if (haveOwnIndexed(selfHandle.get(), runtime, *arrayIndex)) { auto result = setOwnIndexed(selfHandle, runtime, *arrayIndex, valueHandle); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (LLVM_LIKELY(*result)) return true; if (opFlags.getThrowOnError()) { // TODO: better message. return runtime->raiseTypeError( "Cannot assign to read-only property"); } return false; } } } } // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; ComputedPropertyDescriptor desc; // Look for the property in this object or along the prototype chain. MutableHandle<JSObject> propObj{runtime}; if (LLVM_UNLIKELY( getComputedPrimitiveDescriptor( selfHandle, runtime, nameValPrimitiveHandle, propObj, desc) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } // If the property exists (or, we hit a proxy/hostobject on the way // up the chain) if (propObj) { // Get the simple case out of the way: If the property already // exists on selfHandle, is not an accessor, selfHandle and // receiver are the same, selfHandle is not a host // object/proxy/internal setter, and the property is writable, // just write into the same slot. if (LLVM_LIKELY( selfHandle == propObj && selfHandle.getHermesValue().getRaw() == receiver->getRaw() && !desc.flags.accessor && !desc.flags.internalSetter && !desc.flags.hostObject && !desc.flags.proxyObject && desc.flags.writable)) { if (LLVM_UNLIKELY( setComputedSlotValue(selfHandle, runtime, desc, valueHandle) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return true; } // Is it an accessor? if (LLVM_UNLIKELY(desc.flags.accessor)) { auto *accessor = vmcast<PropertyAccessor>( getComputedSlotValue(propObj.get(), runtime, desc)); // If it is a read-only accessor, fail. if (!accessor->setter) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeErrorForValue( "Cannot assign to property ", nameValPrimitiveHandle, " which has only a getter"); } return false; } // Execute the accessor on this object. if (accessor->setter.get(runtime)->executeCall1( runtime->makeHandle(accessor->setter), runtime, receiver, valueHandle.get()) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } return true; } if (LLVM_UNLIKELY(desc.flags.proxyObject)) { assert( !opFlags.getMustExist() && "MustExist cannot be used with Proxy objects"); CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; CallResult<bool> setRes = JSProxy::setComputed(propObj, runtime, *key, valueHandle, receiver); if (LLVM_UNLIKELY(setRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } if (!*setRes && opFlags.getThrowOnError()) { // TODO: better message. return runtime->raiseTypeError( TwineChar16("Proxy trap returned false for property")); } return setRes; } if (LLVM_UNLIKELY(!desc.flags.writable)) { if (desc.flags.staticBuiltin) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); return raiseErrorForOverridingStaticBuiltin( selfHandle, runtime, runtime->makeHandle(id)); } if (opFlags.getThrowOnError()) { return runtime->raiseTypeErrorForValue( "Cannot assign to read-only property ", nameValPrimitiveHandle, ""); } return false; } if (selfHandle == propObj && desc.flags.internalSetter) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); return internalSetter( selfHandle, runtime, id, desc.castToNamedPropertyDescriptorRef(), valueHandle, opFlags); } } // The property does not exist as an conventional own property on // this object. MutableHandle<JSObject> receiverHandle{runtime, *selfHandle}; if (selfHandle.getHermesValue().getRaw() != receiver->getRaw() || receiverHandle->isHostObject() || receiverHandle->isProxyObject()) { if (selfHandle.getHermesValue().getRaw() != receiver->getRaw()) { receiverHandle = dyn_vmcast<JSObject>(*receiver); } if (!receiverHandle) { return false; } CallResult<bool> descDefinedRes = getOwnComputedPrimitiveDescriptor( receiverHandle, runtime, nameValPrimitiveHandle, IgnoreProxy::No, desc); if (LLVM_UNLIKELY(descDefinedRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } DefinePropertyFlags dpf; if (*descDefinedRes) { if (LLVM_UNLIKELY(desc.flags.accessor || !desc.flags.writable)) { return false; } if (LLVM_LIKELY( !desc.flags.internalSetter && !receiverHandle->isHostObject() && !receiverHandle->isProxyObject())) { if (LLVM_UNLIKELY( setComputedSlotValue( receiverHandle, runtime, desc, valueHandle) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return true; } } if (LLVM_UNLIKELY( desc.flags.internalSetter || receiverHandle->isHostObject() || receiverHandle->isProxyObject())) { SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); if (desc.flags.internalSetter) { return internalSetter( receiverHandle, runtime, id, desc.castToNamedPropertyDescriptorRef(), valueHandle, opFlags); } else if (receiverHandle->isHostObject()) { return vmcast<HostObject>(receiverHandle.get())->set(id, *valueHandle); } assert( receiverHandle->isProxyObject() && "descriptor flags are impossible"); if (*descDefinedRes) { dpf.setValue = 1; } else { dpf = DefinePropertyFlags::getDefaultNewPropertyFlags(); } return JSProxy::defineOwnProperty( receiverHandle, runtime, id, dpf, valueHandle, opFlags); } } /// Can we add more properties? if (LLVM_UNLIKELY(!receiverHandle->isExtensible())) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "cannot add a new property"); // TODO: better message. } return false; } // If we have indexed storage we must check whether the property is an index, // and if it is, store it in indexed storage. if (receiverHandle->flags_.indexedStorage) { OptValue<uint32_t> arrayIndex; MutableHandle<StringPrimitive> strPrim{runtime}; TO_ARRAY_INDEX(runtime, nameValPrimitiveHandle, strPrim, arrayIndex); if (arrayIndex) { // Check whether we need to update array's ".length" property. if (auto *array = dyn_vmcast<JSArray>(receiverHandle.get())) { if (LLVM_UNLIKELY(*arrayIndex >= JSArray::getLength(array))) { auto cr = putNamed_RJS( receiverHandle, runtime, Predefined::getSymbolID(Predefined::length), runtime->makeHandle( HermesValue::encodeNumberValue(*arrayIndex + 1)), opFlags); if (LLVM_UNLIKELY(cr == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (LLVM_UNLIKELY(!*cr)) return false; } } auto result = setOwnIndexed(receiverHandle, runtime, *arrayIndex, valueHandle); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (LLVM_LIKELY(*result)) return true; if (opFlags.getThrowOnError()) { // TODO: better message. return runtime->raiseTypeError("Cannot assign to read-only property"); } return false; } } SymbolID id{}; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); // Add a new named property. return addOwnProperty( receiverHandle, runtime, id, DefinePropertyFlags::getDefaultNewPropertyFlags(), valueHandle, opFlags); } CallResult<bool> JSObject::deleteNamed( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "mustExist cannot be specified when deleting"); // Find the property by name. NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, name, desc); // If the property doesn't exist in this object, return success. if (!pos) { if (LLVM_LIKELY( !selfHandle->flags_.lazyObject && !selfHandle->flags_.proxyObject)) { return true; } else if (selfHandle->flags_.lazyObject) { // object is lazy, initialize and read again. initializeLazyObject(runtime, selfHandle); pos = findProperty(selfHandle, runtime, name, desc); if (!pos) // still not there, return true. return true; } else { assert(selfHandle->flags_.proxyObject && "object flags are impossible"); return proxyOpFlags( runtime, opFlags, "Proxy delete returned false", JSProxy::deleteNamed(selfHandle, runtime, name)); } } // If the property isn't configurable, fail. if (LLVM_UNLIKELY(!desc.flags.configurable)) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "' is not configurable"); } return false; } // Clear the deleted property value to prevent memory leaks. setNamedSlotValue( *selfHandle, runtime, desc, HermesValue::encodeEmptyValue()); // Perform the actual deletion. auto newClazz = HiddenClass::deleteProperty( runtime->makeHandle(selfHandle->clazz_), runtime, *pos); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); return true; } CallResult<bool> JSObject::deleteComputed( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "mustExist cannot be specified when deleting"); // If nameValHandle is an object, we should convert it to string now, // because toString may have side-effect, and we want to do this only // once. auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto nameValPrimitiveHandle = *converted; // If the name is a valid integer array index, store it here. OptValue<uint32_t> arrayIndex; // If we have indexed storage, we must attempt to convert the name to array // index, even if the conversion is expensive. if (selfHandle->flags_.indexedStorage) { MutableHandle<StringPrimitive> strPrim{runtime}; TO_ARRAY_INDEX(runtime, nameValPrimitiveHandle, strPrim, arrayIndex); } // Try the fast-path first: the "name" is a valid array index and we don't // have "index-like" named properties. if (arrayIndex && selfHandle->flags_.fastIndexProperties) { // Delete the indexed property. if (deleteOwnIndexed(selfHandle, runtime, *arrayIndex)) return true; // Cannot delete property (for example this may be a typed array). if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot delete property"); } return false; } // slow path, check if object is lazy before continuing. if (LLVM_UNLIKELY(selfHandle->flags_.lazyObject)) { // initialize and try again. initializeLazyObject(runtime, selfHandle); return deleteComputed(selfHandle, runtime, nameValHandle, opFlags); } // Convert the string to an SymbolID; SymbolID id; LAZY_TO_IDENTIFIER(runtime, nameValPrimitiveHandle, id); // Find the property by name. NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, id, desc); // If the property exists, make sure it is configurable. if (pos) { // If the property isn't configurable, fail. if (LLVM_UNLIKELY(!desc.flags.configurable)) { if (opFlags.getThrowOnError()) { // TODO: a better message. return runtime->raiseTypeError("Property is not configurable"); } return false; } } // At this point we know that the named property either doesn't exist, or // is configurable and so can be deleted, or the object is a Proxy. // If it is an "index-like" property, we must also delete the "shadow" indexed // property in order to keep Array.length correct. if (arrayIndex) { if (!deleteOwnIndexed(selfHandle, runtime, *arrayIndex)) { // Cannot delete property (for example this may be a typed array). if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot delete property"); } return false; } } if (pos) { // delete the named property (if it exists). // Clear the deleted property value to prevent memory leaks. setNamedSlotValue( *selfHandle, runtime, desc, HermesValue::encodeEmptyValue()); // Remove the property descriptor. auto newClazz = HiddenClass::deleteProperty( runtime->makeHandle(selfHandle->clazz_), runtime, *pos); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); } else if (LLVM_UNLIKELY(selfHandle->flags_.proxyObject)) { CallResult<Handle<>> key = toPropertyKey(runtime, nameValPrimitiveHandle); if (key == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; return proxyOpFlags( runtime, opFlags, "Proxy delete returned false", JSProxy::deleteComputed(selfHandle, runtime, *key)); } return true; } CallResult<bool> JSObject::defineOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { assert( !opFlags.getMustExist() && "cannot use mustExist with defineOwnProperty"); assert( !(dpFlags.setValue && dpFlags.isAccessor()) && "Cannot set both value and accessor"); assert( (dpFlags.setValue || dpFlags.isAccessor() || valueOrAccessor.get().isUndefined()) && "value must be undefined when all of setValue/setSetter/setGetter are " "false"); #ifndef NDEBUG if (dpFlags.isAccessor()) { assert(valueOrAccessor.get().isPointer() && "accessor must be non-empty"); assert( !dpFlags.setWritable && !dpFlags.writable && "writable must not be set with accessors"); } #endif // Is it an existing property. NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, name, desc); if (pos) { return updateOwnProperty( selfHandle, runtime, name, *pos, desc, dpFlags, valueOrAccessor, opFlags); } if (LLVM_UNLIKELY( selfHandle->flags_.lazyObject || selfHandle->flags_.proxyObject)) { if (selfHandle->flags_.proxyObject) { return JSProxy::defineOwnProperty( selfHandle, runtime, name, dpFlags, valueOrAccessor, opFlags); } assert(selfHandle->flags_.lazyObject && "descriptor flags are impossible"); // if the property was not found and the object is lazy we need to // initialize it and try again. JSObject::initializeLazyObject(runtime, selfHandle); return defineOwnProperty( selfHandle, runtime, name, dpFlags, valueOrAccessor, opFlags); } return addOwnProperty( selfHandle, runtime, name, dpFlags, valueOrAccessor, opFlags); } ExecutionStatus JSObject::defineNewOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropertyFlags propertyFlags, Handle<> valueOrAccessor) { assert( !selfHandle->flags_.proxyObject && "definedNewOwnProperty cannot be used with proxy objects"); assert( !(propertyFlags.accessor && !valueOrAccessor.get().isPointer()) && "accessor must be non-empty"); assert( !(propertyFlags.accessor && propertyFlags.writable) && "writable must not be set with accessors"); assert( !HiddenClass::debugIsPropertyDefined( selfHandle->clazz_.get(runtime), runtime, name) && "new property is already defined"); return addOwnPropertyImpl( selfHandle, runtime, name, propertyFlags, valueOrAccessor); } CallResult<bool> JSObject::defineOwnComputedPrimitive( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { assert( !nameValHandle->isObject() && "nameValHandle passed to " "defineOwnComputedPrimitive() cannot be " "an object"); assert( !opFlags.getMustExist() && "cannot use mustExist with defineOwnProperty"); assert( !(dpFlags.setValue && dpFlags.isAccessor()) && "Cannot set both value and accessor"); assert( (dpFlags.setValue || dpFlags.isAccessor() || valueOrAccessor.get().isUndefined()) && "value must be undefined when all of setValue/setSetter/setGetter are " "false"); assert( !dpFlags.enableInternalSetter && "Cannot set internalSetter on a computed property"); #ifndef NDEBUG if (dpFlags.isAccessor()) { assert(valueOrAccessor.get().isPointer() && "accessor must be non-empty"); assert( !dpFlags.setWritable && !dpFlags.writable && "writable must not be set with accessors"); } #endif // If the name is a valid integer array index, store it here. OptValue<uint32_t> arrayIndex; // If we have indexed storage, we must attempt to convert the name to array // index, even if the conversion is expensive. if (selfHandle->flags_.indexedStorage) { MutableHandle<StringPrimitive> strPrim{runtime}; TO_ARRAY_INDEX(runtime, nameValHandle, strPrim, arrayIndex); } SymbolID id{}; // If not storing a property with an array index name, or if we don't have // indexed storage, just pass to the named routine. if (!arrayIndex) { LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); return defineOwnProperty( selfHandle, runtime, id, dpFlags, valueOrAccessor, opFlags); } // At this point we know that we have indexed storage and that the property // has an index-like name. // First check if a named property with the same name exists. if (selfHandle->clazz_.get(runtime)->getHasIndexLikeProperties()) { LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); NamedPropertyDescriptor desc; auto pos = findProperty(selfHandle, runtime, id, desc); // If we found a named property, update it. if (pos) { return updateOwnProperty( selfHandle, runtime, id, *pos, desc, dpFlags, valueOrAccessor, opFlags); } } // Does an indexed property with that index exist? auto indexedPropPresent = getOwnIndexedPropertyFlags(selfHandle.get(), runtime, *arrayIndex); if (indexedPropPresent) { // The current value of the property. HermesValue curValueOrAccessor = getOwnIndexed(selfHandle.get(), runtime, *arrayIndex); auto updateStatus = checkPropertyUpdate( runtime, *indexedPropPresent, dpFlags, curValueOrAccessor, valueOrAccessor, opFlags); if (updateStatus == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; if (updateStatus->first == PropertyUpdateStatus::failed) return false; // The property update is valid, but can the property remain an "indexed" // property, or do we need to convert it to a named property? // If the property flags didn't change, the property remains indexed. if (updateStatus->second == *indexedPropPresent) { // If the value doesn't change, we are done. if (updateStatus->first == PropertyUpdateStatus::done) return true; // If we successfully updated the value, we are done. auto result = setOwnIndexed(selfHandle, runtime, *arrayIndex, valueOrAccessor); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (*result) return true; if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError( "cannot change read-only property value"); } return false; } // OK, we need to convert an indexed property to a named one. // Check whether to use the supplied value, or to reuse the old one, as we // are simply reconfiguring it. MutableHandle<> value{runtime}; if (dpFlags.setValue || dpFlags.isAccessor()) { value = valueOrAccessor.get(); } else { value = curValueOrAccessor; } // Update dpFlags to match the existing property flags. dpFlags.setEnumerable = 1; dpFlags.setWritable = 1; dpFlags.setConfigurable = 1; dpFlags.enumerable = updateStatus->second.enumerable; dpFlags.writable = updateStatus->second.writable; dpFlags.configurable = updateStatus->second.configurable; // Delete the existing indexed property. if (!deleteOwnIndexed(selfHandle, runtime, *arrayIndex)) { if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot define property"); } return false; } // Add the new named property. LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); return addOwnProperty(selfHandle, runtime, id, dpFlags, value, opFlags); } /// Can we add new properties? if (!selfHandle->isExtensible()) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "cannot add a new property"); // TODO: better message. } return false; } // This is a new property with an index-like name. // Check whether we need to update array's ".length" property. bool updateLength = false; if (auto arrayHandle = Handle<JSArray>::dyn_vmcast(selfHandle)) { if (LLVM_UNLIKELY(*arrayIndex >= JSArray::getLength(*arrayHandle))) { NamedPropertyDescriptor lengthDesc; bool lengthPresent = getOwnNamedDescriptor( arrayHandle, runtime, Predefined::getSymbolID(Predefined::length), lengthDesc); (void)lengthPresent; assert(lengthPresent && ".length must be present in JSArray"); if (!lengthDesc.flags.writable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "Cannot assign to read-only 'length' property of array"); } return false; } updateLength = true; } } bool newIsIndexed = canNewPropertyBeIndexed(dpFlags); if (newIsIndexed) { auto result = setOwnIndexed( selfHandle, runtime, *arrayIndex, dpFlags.setValue ? valueOrAccessor : Runtime::getUndefinedValue()); if (LLVM_UNLIKELY(result == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; if (!*result) { if (opFlags.getThrowOnError()) { // TODO: better error message. return runtime->raiseTypeError("Cannot define property"); } return false; } } // If this is an array and we need to update ".length", do so. if (updateLength) { // This should always succeed since we are simply enlarging the length. auto res = JSArray::setLength( Handle<JSArray>::vmcast(selfHandle), runtime, *arrayIndex + 1, opFlags); (void)res; assert( res != ExecutionStatus::EXCEPTION && *res && "JSArray::setLength() failed unexpectedly"); } if (newIsIndexed) return true; // We are adding a new property with an index-like name. LAZY_TO_IDENTIFIER(runtime, nameValHandle, id); return addOwnProperty( selfHandle, runtime, id, dpFlags, valueOrAccessor, opFlags); } CallResult<bool> JSObject::defineOwnComputed( Handle<JSObject> selfHandle, Runtime *runtime, Handle<> nameValHandle, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { auto converted = toPropertyKeyIfObject(runtime, nameValHandle); if (LLVM_UNLIKELY(converted == ExecutionStatus::EXCEPTION)) return ExecutionStatus::EXCEPTION; return defineOwnComputedPrimitive( selfHandle, runtime, *converted, dpFlags, valueOrAccessor, opFlags); } std::string JSObject::getHeuristicTypeName(GC *gc) { PointerBase *const base = gc->getPointerBase(); if (auto constructorVal = tryGetNamedNoAlloc( this, base, Predefined::getSymbolID(Predefined::constructor))) { if (auto *constructor = dyn_vmcast<JSObject>(*constructorVal)) { auto name = constructor->getNameIfExists(base); // If the constructor's name doesn't exist, or it is just the object // constructor, attempt to find a different name. if (!name.empty() && name != "Object") return name; } } std::string name = getVT()->base.snapshotMetaData.defaultNameForNode(this); // A constructor's name was not found, check if the object is in dictionary // mode. if (getClass(base)->isDictionary()) { return name + "(Dictionary)"; } // If it's not an Object, the CellKind is most likely good enough on its own if (getKind() != CellKind::ObjectKind) { return name; } // If the object isn't a dictionary, and it has only a few property names, // make the name based on those property names. std::vector<std::string> propertyNames; HiddenClass::forEachPropertyNoAlloc( getClass(base), base, [gc, &propertyNames](SymbolID id, NamedPropertyDescriptor) { if (InternalProperty::isInternal(id)) { // Internal properties aren't user-visible, skip them. return; } propertyNames.emplace_back(gc->convertSymbolToUTF8(id)); }); // NOTE: One option is to sort the property names before truncation, to // reduce the number of groups; however, by not sorting them it makes it // easier to spot sets of objects with the same properties but in different // orders, and thus find HiddenClass optimizations to make. // For objects with a lot of properties but aren't in dictionary mode yet, // keep the number displayed small. constexpr int kMaxPropertiesForTypeName = 5; bool truncated = false; if (propertyNames.size() > kMaxPropertiesForTypeName) { propertyNames.erase( propertyNames.begin() + kMaxPropertiesForTypeName, propertyNames.end()); truncated = true; } // The final name should look like Object(a, b, c). if (propertyNames.empty()) { // Don't add parentheses for objects with no properties. return name; } name += "("; bool first = true; for (const auto &prop : propertyNames) { if (!first) { name += ", "; } first = false; name += prop; } if (truncated) { // No need to check for comma edge case because this only happens for // greater than one property. static_assert( kMaxPropertiesForTypeName >= 1, "Property truncation should not happen for 0 properties"); name += ", ..."; } name += ")"; return name; } std::string JSObject::getNameIfExists(PointerBase *base) { // Try "displayName" first, if it is defined. if (auto nameVal = tryGetNamedNoAlloc( this, base, Predefined::getSymbolID(Predefined::displayName))) { if (auto *name = dyn_vmcast<StringPrimitive>(*nameVal)) { return converter(name); } } // Next, use "name" if it is defined. if (auto nameVal = tryGetNamedNoAlloc( this, base, Predefined::getSymbolID(Predefined::name))) { if (auto *name = dyn_vmcast<StringPrimitive>(*nameVal)) { return converter(name); } } // There is no other way to access the "name" property on an object. return ""; } std::string JSObject::_snapshotNameImpl(GCCell *cell, GC *gc) { auto *const self = vmcast<JSObject>(cell); return self->getHeuristicTypeName(gc); } void JSObject::_snapshotAddEdgesImpl(GCCell *cell, GC *gc, HeapSnapshot &snap) { auto *const self = vmcast<JSObject>(cell); // Add the prototype as a property edge, so it's easy for JS developers to // walk the prototype chain on their own. if (self->parent_) { snap.addNamedEdge( HeapSnapshot::EdgeType::Property, // __proto__ chosen for similarity to V8. "__proto__", gc->getObjectID(self->parent_)); } HiddenClass::forEachPropertyNoAlloc( self->clazz_.get(gc->getPointerBase()), gc->getPointerBase(), [self, gc, &snap](SymbolID id, NamedPropertyDescriptor desc) { if (InternalProperty::isInternal(id)) { // Internal properties aren't user-visible, skip them. return; } // Else, it's a user-visible property. GCHermesValue &prop = namedSlotRef(self, gc->getPointerBase(), desc.slot); const llvh::Optional<HeapSnapshot::NodeID> idForProp = gc->getSnapshotID(prop); if (!idForProp) { return; } std::string propName = gc->convertSymbolToUTF8(id); // If the property name is a valid array index, display it as an // "element" instead of a "property". This will put square brackets // around the number and sort it numerically rather than // alphabetically. if (auto index = ::hermes::toArrayIndex(propName)) { snap.addIndexedEdge( HeapSnapshot::EdgeType::Element, index.getValue(), idForProp.getValue()); } else { snap.addNamedEdge( HeapSnapshot::EdgeType::Property, propName, idForProp.getValue()); } }); } void JSObject::_snapshotAddLocationsImpl( GCCell *cell, GC *gc, HeapSnapshot &snap) { auto *const self = vmcast<JSObject>(cell); PointerBase *const base = gc->getPointerBase(); // Add the location of the constructor function for this object, if that // constructor is a user-defined JS function. if (auto constructorVal = tryGetNamedNoAlloc( self, base, Predefined::getSymbolID(Predefined::constructor))) { if (constructorVal->isObject()) { if (auto *constructor = dyn_vmcast<JSFunction>(*constructorVal)) { constructor->addLocationToSnapshot(snap, gc->getObjectID(self)); } } } } std::pair<uint32_t, uint32_t> JSObject::_getOwnIndexedRangeImpl( JSObject *self, Runtime *runtime) { return {0, 0}; } bool JSObject::_haveOwnIndexedImpl(JSObject *self, Runtime *, uint32_t) { return false; } OptValue<PropertyFlags> JSObject::_getOwnIndexedPropertyFlagsImpl( JSObject *self, Runtime *runtime, uint32_t) { return llvh::None; } HermesValue JSObject::_getOwnIndexedImpl(JSObject *, Runtime *, uint32_t) { return HermesValue::encodeEmptyValue(); } CallResult<bool> JSObject::_setOwnIndexedImpl(Handle<JSObject>, Runtime *, uint32_t, Handle<>) { return false; } bool JSObject::_deleteOwnIndexedImpl(Handle<JSObject>, Runtime *, uint32_t) { return false; } bool JSObject::_checkAllOwnIndexedImpl( JSObject * /*self*/, Runtime * /*runtime*/, ObjectVTable::CheckAllOwnIndexedMode /*mode*/) { return true; } void JSObject::preventExtensions(JSObject *self) { assert( !self->flags_.proxyObject && "[[Extensible]] slot cannot be set directly on Proxy objects"); self->flags_.noExtend = true; } CallResult<bool> JSObject::preventExtensions( Handle<JSObject> selfHandle, Runtime *runtime, PropOpFlags opFlags) { if (LLVM_UNLIKELY(selfHandle->isProxyObject())) { return JSProxy::preventExtensions(selfHandle, runtime, opFlags); } JSObject::preventExtensions(*selfHandle); return true; } ExecutionStatus JSObject::seal(Handle<JSObject> selfHandle, Runtime *runtime) { CallResult<bool> statusRes = JSObject::preventExtensions( selfHandle, runtime, PropOpFlags().plusThrowOnError()); if (LLVM_UNLIKELY(statusRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } assert( *statusRes && "seal preventExtensions with ThrowOnError returned false"); // Already sealed? if (selfHandle->flags_.sealed) return ExecutionStatus::RETURNED; auto newClazz = HiddenClass::makeAllNonConfigurable( runtime->makeHandle(selfHandle->clazz_), runtime); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); selfHandle->flags_.sealed = true; return ExecutionStatus::RETURNED; } ExecutionStatus JSObject::freeze( Handle<JSObject> selfHandle, Runtime *runtime) { CallResult<bool> statusRes = JSObject::preventExtensions( selfHandle, runtime, PropOpFlags().plusThrowOnError()); if (LLVM_UNLIKELY(statusRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } assert( *statusRes && "freeze preventExtensions with ThrowOnError returned false"); // Already frozen? if (selfHandle->flags_.frozen) return ExecutionStatus::RETURNED; auto newClazz = HiddenClass::makeAllReadOnly( runtime->makeHandle(selfHandle->clazz_), runtime); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); selfHandle->flags_.frozen = true; selfHandle->flags_.sealed = true; return ExecutionStatus::RETURNED; } void JSObject::updatePropertyFlagsWithoutTransitions( Handle<JSObject> selfHandle, Runtime *runtime, PropertyFlags flagsToClear, PropertyFlags flagsToSet, OptValue<llvh::ArrayRef<SymbolID>> props) { auto newClazz = HiddenClass::updatePropertyFlagsWithoutTransitions( runtime->makeHandle(selfHandle->clazz_), runtime, flagsToClear, flagsToSet, props); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); } CallResult<bool> JSObject::isExtensible( PseudoHandle<JSObject> self, Runtime *runtime) { if (LLVM_UNLIKELY(self->isProxyObject())) { return JSProxy::isExtensible(runtime->makeHandle(std::move(self)), runtime); } return self->isExtensible(); } bool JSObject::isSealed(PseudoHandle<JSObject> self, Runtime *runtime) { if (self->flags_.sealed) return true; if (!self->flags_.noExtend) return false; auto selfHandle = runtime->makeHandle(std::move(self)); if (!HiddenClass::areAllNonConfigurable( runtime->makeHandle(selfHandle->clazz_), runtime)) { return false; } if (!checkAllOwnIndexed( *selfHandle, runtime, ObjectVTable::CheckAllOwnIndexedMode::NonConfigurable)) { return false; } // Now that we know we are sealed, set the flag. selfHandle->flags_.sealed = true; return true; } bool JSObject::isFrozen(PseudoHandle<JSObject> self, Runtime *runtime) { if (self->flags_.frozen) return true; if (!self->flags_.noExtend) return false; auto selfHandle = runtime->makeHandle(std::move(self)); if (!HiddenClass::areAllReadOnly( runtime->makeHandle(selfHandle->clazz_), runtime)) { return false; } if (!checkAllOwnIndexed( *selfHandle, runtime, ObjectVTable::CheckAllOwnIndexedMode::ReadOnly)) { return false; } // Now that we know we are sealed, set the flag. selfHandle->flags_.frozen = true; selfHandle->flags_.sealed = true; return true; } CallResult<bool> JSObject::addOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { /// Can we add more properties? if (!selfHandle->isExtensible() && !opFlags.getInternalForce()) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( TwineChar16("Cannot add new property '") + runtime->getIdentifierTable().getStringViewForDev(runtime, name) + "'"); } return false; } PropertyFlags flags{}; // Accessors don't set writeable. if (dpFlags.isAccessor()) { dpFlags.setWritable = 0; flags.accessor = 1; } // Override the default flags if specified. if (dpFlags.setEnumerable) flags.enumerable = dpFlags.enumerable; if (dpFlags.setWritable) flags.writable = dpFlags.writable; if (dpFlags.setConfigurable) flags.configurable = dpFlags.configurable; flags.internalSetter = dpFlags.enableInternalSetter; if (LLVM_UNLIKELY( addOwnPropertyImpl( selfHandle, runtime, name, flags, valueOrAccessor) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } return true; } ExecutionStatus JSObject::addOwnPropertyImpl( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, PropertyFlags propertyFlags, Handle<> valueOrAccessor) { assert( !selfHandle->flags_.proxyObject && "Internal properties cannot be added to Proxy objects"); // Add a new property to the class. // TODO: if we check for OOM here in the future, we must undo the slot // allocation. auto addResult = HiddenClass::addProperty( runtime->makeHandle(selfHandle->clazz_), runtime, name, propertyFlags); if (LLVM_UNLIKELY(addResult == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } selfHandle->clazz_.set(runtime, *addResult->first, &runtime->getHeap()); allocateNewSlotStorage( selfHandle, runtime, addResult->second, valueOrAccessor); // If this is an index-like property, we need to clear the fast path flags. if (LLVM_UNLIKELY( selfHandle->clazz_.getNonNull(runtime)->getHasIndexLikeProperties())) selfHandle->flags_.fastIndexProperties = false; return ExecutionStatus::RETURNED; } CallResult<bool> JSObject::updateOwnProperty( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, HiddenClass::PropertyPos propertyPos, NamedPropertyDescriptor desc, const DefinePropertyFlags dpFlags, Handle<> valueOrAccessor, PropOpFlags opFlags) { auto updateStatus = checkPropertyUpdate( runtime, desc.flags, dpFlags, getNamedSlotValue(selfHandle.get(), runtime, desc), valueOrAccessor, opFlags); if (updateStatus == ExecutionStatus::EXCEPTION) return ExecutionStatus::EXCEPTION; if (updateStatus->first == PropertyUpdateStatus::failed) return false; // If the property flags changed, update them. if (updateStatus->second != desc.flags) { desc.flags = updateStatus->second; auto newClazz = HiddenClass::updateProperty( runtime->makeHandle(selfHandle->clazz_), runtime, propertyPos, desc.flags); selfHandle->clazz_.set(runtime, *newClazz, &runtime->getHeap()); } if (updateStatus->first == PropertyUpdateStatus::done) return true; assert( updateStatus->first == PropertyUpdateStatus::needSet && "unexpected PropertyUpdateStatus"); if (dpFlags.setValue) { if (LLVM_LIKELY(!desc.flags.internalSetter)) setNamedSlotValue(selfHandle.get(), runtime, desc, valueOrAccessor.get()); else return internalSetter( selfHandle, runtime, name, desc, valueOrAccessor, opFlags); } else if (dpFlags.isAccessor()) { setNamedSlotValue(selfHandle.get(), runtime, desc, valueOrAccessor.get()); } else { // If checkPropertyUpdate() returned needSet, but there is no value or // accessor, clear the value. setNamedSlotValue( selfHandle.get(), runtime, desc, HermesValue::encodeUndefinedValue()); } return true; } CallResult<std::pair<JSObject::PropertyUpdateStatus, PropertyFlags>> JSObject::checkPropertyUpdate( Runtime *runtime, const PropertyFlags currentFlags, DefinePropertyFlags dpFlags, const HermesValue curValueOrAccessor, Handle<> valueOrAccessor, PropOpFlags opFlags) { // 8.12.9 [5] Return true, if every field in Desc is absent. if (dpFlags.isEmpty()) return std::make_pair(PropertyUpdateStatus::done, currentFlags); assert( (!dpFlags.isAccessor() || (!dpFlags.setWritable && !dpFlags.writable)) && "can't set both accessor and writable"); assert( !dpFlags.enableInternalSetter && "cannot change the value of internalSetter"); // 8.12.9 [6] Return true, if every field in Desc also occurs in current and // the value of every field in Desc is the same value as the corresponding // field in current when compared using the SameValue algorithm (9.12). // TODO: this would probably be much more efficient with bitmasks. if ((!dpFlags.setEnumerable || dpFlags.enumerable == currentFlags.enumerable) && (!dpFlags.setConfigurable || dpFlags.configurable == currentFlags.configurable)) { if (dpFlags.isAccessor()) { if (currentFlags.accessor) { auto *curAccessor = vmcast<PropertyAccessor>(curValueOrAccessor); auto *newAccessor = vmcast<PropertyAccessor>(valueOrAccessor.get()); if ((!dpFlags.setGetter || curAccessor->getter == newAccessor->getter) && (!dpFlags.setSetter || curAccessor->setter == newAccessor->setter)) { return std::make_pair(PropertyUpdateStatus::done, currentFlags); } } } else { if (!currentFlags.accessor && (!dpFlags.setValue || isSameValue(curValueOrAccessor, valueOrAccessor.get())) && (!dpFlags.setWritable || dpFlags.writable == currentFlags.writable)) { return std::make_pair(PropertyUpdateStatus::done, currentFlags); } } } // 8.12.9 [7] // If the property is not configurable, some aspects are not changeable. if (!currentFlags.configurable) { // Trying to change non-configurable to configurable? if (dpFlags.configurable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } // Trying to change the enumerability of non-configurable property? if (dpFlags.setEnumerable && dpFlags.enumerable != currentFlags.enumerable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } } PropertyFlags newFlags = currentFlags; // 8.12.9 [8] If IsGenericDescriptor(Desc) is true, then no further validation // is required. if (!(dpFlags.setValue || dpFlags.setWritable || dpFlags.setGetter || dpFlags.setSetter)) { // Do nothing } // 8.12.9 [9] // Changing between accessor and data descriptor? else if (currentFlags.accessor != dpFlags.isAccessor()) { if (!currentFlags.configurable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } // If we change from accessor to data descriptor, Preserve the existing // values of the converted property’s [[Configurable]] and [[Enumerable]] // attributes and set the rest of the property’s attributes to their default // values. // If it's the other way around, since the accessor doesn't have the // [[Writable]] attribute, do nothing. newFlags.writable = 0; // If we are changing from accessor to non-accessor, we must set a new // value. if (!dpFlags.isAccessor()) dpFlags.setValue = 1; } // 8.12.9 [10] if both are data descriptors. else if (!currentFlags.accessor) { if (!currentFlags.configurable) { if (!currentFlags.writable) { // If the current property is not writable, but the new one is. if (dpFlags.writable) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } // If we are setting a different value. if (dpFlags.setValue && !isSameValue(curValueOrAccessor, valueOrAccessor.get())) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not writable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } } } } // 8.12.9 [11] Both are accessors. else { auto *curAccessor = vmcast<PropertyAccessor>(curValueOrAccessor); auto *newAccessor = vmcast<PropertyAccessor>(valueOrAccessor.get()); // If not configurable, make sure that nothing is changing. if (!currentFlags.configurable) { if ((dpFlags.setGetter && newAccessor->getter != curAccessor->getter) || (dpFlags.setSetter && newAccessor->setter != curAccessor->setter)) { if (opFlags.getThrowOnError()) { return runtime->raiseTypeError( "property is not configurable"); // TODO: better message. } return std::make_pair(PropertyUpdateStatus::failed, PropertyFlags{}); } } // If not setting the getter or the setter, re-use the current one. if (!dpFlags.setGetter) newAccessor->getter.set( runtime, curAccessor->getter, &runtime->getHeap()); if (!dpFlags.setSetter) newAccessor->setter.set( runtime, curAccessor->setter, &runtime->getHeap()); } // 8.12.9 [12] For each attribute field of Desc that is present, set the // correspondingly named attribute of the property named P of object O to the // value of the field. if (dpFlags.setEnumerable) newFlags.enumerable = dpFlags.enumerable; if (dpFlags.setWritable) newFlags.writable = dpFlags.writable; if (dpFlags.setConfigurable) newFlags.configurable = dpFlags.configurable; if (dpFlags.setValue) newFlags.accessor = false; else if (dpFlags.isAccessor()) newFlags.accessor = true; else return std::make_pair(PropertyUpdateStatus::done, newFlags); return std::make_pair(PropertyUpdateStatus::needSet, newFlags); } CallResult<bool> JSObject::internalSetter( Handle<JSObject> selfHandle, Runtime *runtime, SymbolID name, NamedPropertyDescriptor /*desc*/, Handle<> value, PropOpFlags opFlags) { if (vmisa<JSArray>(selfHandle.get())) { if (name == Predefined::getSymbolID(Predefined::length)) { return JSArray::setLength( Handle<JSArray>::vmcast(selfHandle), runtime, value, opFlags); } } llvm_unreachable("unhandled property in Object::internalSetter()"); } namespace { /// Helper function to add all the property names of an object to an /// array, starting at the given index. Only enumerable properties are /// incluced. Returns the index after the last property added, but... CallResult<uint32_t> appendAllPropertyNames( Handle<JSObject> obj, Runtime *runtime, MutableHandle<BigStorage> &arr, uint32_t beginIndex) { uint32_t size = beginIndex; // We know that duplicate property names can only exist between objects in // the prototype chain. Hence there should not be duplicated properties // before we start to look at any prototype. bool needDedup = false; MutableHandle<> prop(runtime); MutableHandle<JSObject> head(runtime, obj.get()); MutableHandle<StringPrimitive> tmpVal{runtime}; while (head.get()) { GCScope gcScope(runtime); // enumerableProps will contain all enumerable own properties from obj. // Impl note: this is the only place where getOwnPropertyKeys will be // called without IncludeNonEnumerable on a Proxy. Everywhere else, // trap ordering is specified but ES9 13.7.5.15 says "The mechanics and // order of enumerating the properties is not specified", which is // unusual. auto cr = JSObject::getOwnPropertyNames(head, runtime, true /* onlyEnumerable */); if (LLVM_UNLIKELY(cr == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } auto enumerableProps = *cr; auto marker = gcScope.createMarker(); for (unsigned i = 0, e = enumerableProps->getEndIndex(); i < e; ++i) { gcScope.flushToMarker(marker); prop = enumerableProps->at(runtime, i); if (!needDedup) { // If no dedup is needed, add it directly. if (LLVM_UNLIKELY( BigStorage::push_back(arr, runtime, prop) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } ++size; continue; } // Otherwise loop through all existing properties and check if we // have seen it before. bool dupFound = false; if (prop->isNumber()) { for (uint32_t j = beginIndex; j < size && !dupFound; ++j) { HermesValue val = arr->at(j); if (val.isNumber()) { dupFound = val.getNumber() == prop->getNumber(); } else { // val is string, prop is number. tmpVal = val.getString(); auto valNum = toArrayIndex( StringPrimitive::createStringView(runtime, tmpVal)); dupFound = valNum && valNum.getValue() == prop->getNumber(); } } } else { for (uint32_t j = beginIndex; j < size && !dupFound; ++j) { HermesValue val = arr->at(j); if (val.isNumber()) { // val is number, prop is string. auto propNum = toArrayIndex(StringPrimitive::createStringView( runtime, Handle<StringPrimitive>::vmcast(prop))); dupFound = propNum && (propNum.getValue() == val.getNumber()); } else { dupFound = val.getString()->equals(prop->getString()); } } } if (LLVM_LIKELY(!dupFound)) { if (LLVM_UNLIKELY( BigStorage::push_back(arr, runtime, prop) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } ++size; } } // Continue to follow the prototype chain. CallResult<PseudoHandle<JSObject>> parentRes = JSObject::getPrototypeOf(head, runtime); if (LLVM_UNLIKELY(parentRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } head = parentRes->get(); needDedup = true; } return size; } /// Adds the hidden classes of the prototype chain of obj to arr, /// starting with the prototype of obj at index 0, etc., and /// terminates with null. /// /// \param obj The object whose prototype chain should be output /// \param[out] arr The array where the classes will be appended. This /// array is cleared if any object is unsuitable for caching. ExecutionStatus setProtoClasses( Runtime *runtime, Handle<JSObject> obj, MutableHandle<BigStorage> &arr) { // Layout of a JSArray stored in the for-in cache: // [class(proto(obj)), class(proto(proto(obj))), ..., null, prop0, prop1, ...] if (!obj->shouldCacheForIn(runtime)) { arr->clear(runtime); return ExecutionStatus::RETURNED; } MutableHandle<JSObject> head(runtime, obj->getParent(runtime)); MutableHandle<> clazz(runtime); GCScopeMarkerRAII marker{runtime}; while (head.get()) { if (!head->shouldCacheForIn(runtime)) { arr->clear(runtime); return ExecutionStatus::RETURNED; } if (JSObject::Helper::flags(*head).lazyObject) { // Ensure all properties have been initialized before caching the hidden // class. Not doing this will result in changes to the hidden class // when getOwnPropertyKeys is called later. JSObject::initializeLazyObject(runtime, head); } clazz = HermesValue::encodeObjectValue(head->getClass(runtime)); if (LLVM_UNLIKELY( BigStorage::push_back(arr, runtime, clazz) == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } head = head->getParent(runtime); marker.flush(); } clazz = HermesValue::encodeNullValue(); return BigStorage::push_back(arr, runtime, clazz); } /// Verifies that the classes of obj's prototype chain still matches those /// previously prefixed to arr by setProtoClasses. /// /// \param obj The object whose prototype chain should be verified /// \param arr Array previously populated by setProtoClasses /// \return The index after the terminating null if everything matches, /// otherwise 0. uint32_t matchesProtoClasses( Runtime *runtime, Handle<JSObject> obj, Handle<BigStorage> arr) { MutableHandle<JSObject> head(runtime, obj->getParent(runtime)); uint32_t i = 0; while (head.get()) { HermesValue protoCls = arr->at(i++); if (protoCls.isNull() || protoCls.getObject() != head->getClass(runtime) || head->isProxyObject()) { return 0; } head = head->getParent(runtime); } // The chains must both end at the same point. if (head || !arr->at(i++).isNull()) { return 0; } assert(i > 0 && "success should be positive"); return i; } } // namespace CallResult<Handle<BigStorage>> getForInPropertyNames( Runtime *runtime, Handle<JSObject> obj, uint32_t &beginIndex, uint32_t &endIndex) { Handle<HiddenClass> clazz(runtime, obj->getClass(runtime)); // Fast case: Check the cache. MutableHandle<BigStorage> arr(runtime, clazz->getForInCache(runtime)); if (arr) { beginIndex = matchesProtoClasses(runtime, obj, arr); if (beginIndex) { // Cache is valid for this object, so use it. endIndex = arr->size(); return arr; } // Invalid for this object. We choose to clear the cache since the // changes to the prototype chain probably affect other objects too. clazz->clearForInCache(runtime); // Clear arr to slightly reduce risk of OOM from allocation below. arr = nullptr; } // Slow case: Build the array of properties. auto ownPropEstimate = clazz->getNumProperties(); auto arrRes = obj->shouldCacheForIn(runtime) ? BigStorage::createLongLived(runtime, ownPropEstimate) : BigStorage::create(runtime, ownPropEstimate); if (LLVM_UNLIKELY(arrRes == ExecutionStatus::EXCEPTION)) { return ExecutionStatus::EXCEPTION; } arr = std::move(*arrRes); if (setProtoClasses(runtime, obj, arr) == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } beginIndex = arr->size(); // If obj or any of its prototypes are unsuitable for caching, then // beginIndex is 0 and we return an array with only the property names. bool canCache = beginIndex; auto end = appendAllPropertyNames(obj, runtime, arr, beginIndex); if (end == ExecutionStatus::EXCEPTION) { return ExecutionStatus::EXCEPTION; } endIndex = *end; // Avoid degenerate memory explosion: if > 75% of the array is properties // or classes from prototypes, then don't cache it. const bool tooMuchProto = *end / 4 > ownPropEstimate; if (canCache && !tooMuchProto) { assert(beginIndex > 0 && "cached array must start with proto classes"); #ifdef HERMES_SLOW_DEBUG assert(beginIndex == matchesProtoClasses(runtime, obj, arr) && "matches"); #endif clazz->setForInCache(*arr, runtime); } return arr; } //===----------------------------------------------------------------------===// // class PropertyAccessor VTable PropertyAccessor::vt{CellKind::PropertyAccessorKind, cellSize<PropertyAccessor>()}; void PropertyAccessorBuildMeta(const GCCell *cell, Metadata::Builder &mb) { const auto *self = static_cast<const PropertyAccessor *>(cell); mb.addField("getter", &self->getter); mb.addField("setter", &self->setter); } #ifdef HERMESVM_SERIALIZE PropertyAccessor::PropertyAccessor(Deserializer &d) : GCCell(&d.getRuntime()->getHeap(), &vt) { d.readRelocation(&getter, RelocationKind::GCPointer); d.readRelocation(&setter, RelocationKind::GCPointer); } void PropertyAccessorSerialize(Serializer &s, const GCCell *cell) { auto *self = vmcast<const PropertyAccessor>(cell); s.writeRelocation(self->getter.get(s.getRuntime())); s.writeRelocation(self->setter.get(s.getRuntime())); s.endObject(cell); } void PropertyAccessorDeserialize(Deserializer &d, CellKind kind) { assert(kind == CellKind::PropertyAccessorKind && "Expected PropertyAccessor"); void *mem = d.getRuntime()->alloc(cellSize<PropertyAccessor>()); auto *cell = new (mem) PropertyAccessor(d); d.endObject(cell); } #endif CallResult<HermesValue> PropertyAccessor::create( Runtime *runtime, Handle<Callable> getter, Handle<Callable> setter) { void *mem = runtime->alloc(cellSize<PropertyAccessor>()); return HermesValue::encodeObjectValue( new (mem) PropertyAccessor(runtime, *getter, *setter)); } } // namespace vm } // namespace hermes
./CrossVul/dataset_final_sorted/CWE-843/cpp/good_4255_1
crossvul-cpp_data_good_842_0
/* * Copyright (c) 2018-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <fizz/record/EncryptedRecordLayer.h> #include <fizz/crypto/aead/IOBufUtil.h> namespace fizz { using ContentTypeType = typename std::underlying_type<ContentType>::type; using ProtocolVersionType = typename std::underlying_type<ProtocolVersion>::type; static constexpr uint16_t kMaxEncryptedRecordSize = 0x4000 + 256; // 16k + 256 static constexpr size_t kEncryptedHeaderSize = sizeof(ContentType) + sizeof(ProtocolVersion) + sizeof(uint16_t); EncryptedReadRecordLayer::EncryptedReadRecordLayer( EncryptionLevel encryptionLevel) : encryptionLevel_(encryptionLevel) {} folly::Optional<Buf> EncryptedReadRecordLayer::getDecryptedBuf( folly::IOBufQueue& buf) { while (true) { // Cache the front buffer, calling front may invoke and update // of the tail cache. auto frontBuf = buf.front(); folly::io::Cursor cursor(frontBuf); if (buf.empty() || !cursor.canAdvance(kEncryptedHeaderSize)) { return folly::none; } std::array<uint8_t, kEncryptedHeaderSize> ad; folly::io::Cursor adCursor(cursor); adCursor.pull(ad.data(), ad.size()); folly::IOBuf adBuf{folly::IOBuf::wrapBufferAsValue(folly::range(ad))}; auto contentType = static_cast<ContentType>(cursor.readBE<ContentTypeType>()); cursor.skip(sizeof(ProtocolVersion)); auto length = cursor.readBE<uint16_t>(); if (length == 0) { throw std::runtime_error("received 0 length encrypted record"); } if (length > kMaxEncryptedRecordSize) { throw std::runtime_error("received too long encrypted record"); } auto consumedBytes = cursor - frontBuf; if (buf.chainLength() < consumedBytes + length) { return folly::none; } if (contentType == ContentType::alert && length == 2) { auto alert = decode<Alert>(cursor); throw std::runtime_error(folly::to<std::string>( "received plaintext alert in encrypted record: ", toString(alert.description))); } // If we already know that the length of the buffer is the // same as the number of bytes we need, move the entire buffer. std::unique_ptr<folly::IOBuf> encrypted; if (buf.chainLength() == consumedBytes + length) { encrypted = buf.move(); } else { encrypted = buf.split(consumedBytes + length); } trimStart(*encrypted, consumedBytes); if (contentType == ContentType::change_cipher_spec) { encrypted->coalesce(); if (encrypted->length() == 1 && *encrypted->data() == 0x01) { continue; } else { throw FizzException( "received ccs", AlertDescription::illegal_parameter); } } TLSMessage msg; if (seqNum_ == std::numeric_limits<uint64_t>::max()) { throw std::runtime_error("max read seq num"); } if (skipFailedDecryption_) { auto decryptAttempt = aead_->tryDecrypt( std::move(encrypted), useAdditionalData_ ? &adBuf : nullptr, seqNum_); if (decryptAttempt) { seqNum_++; skipFailedDecryption_ = false; return decryptAttempt; } else { continue; } } else { return aead_->decrypt( std::move(encrypted), useAdditionalData_ ? &adBuf : nullptr, seqNum_++); } } } folly::Optional<TLSMessage> EncryptedReadRecordLayer::read( folly::IOBufQueue& buf) { auto decryptedBuf = getDecryptedBuf(buf); if (!decryptedBuf) { return folly::none; } TLSMessage msg{}; // Iterate over the buffers while trying to find // the first non-zero octet. This is much faster than // first iterating and then trimming. auto currentBuf = decryptedBuf->get(); bool nonZeroFound = false; do { currentBuf = currentBuf->prev(); size_t i = currentBuf->length(); while (i > 0 && !nonZeroFound) { nonZeroFound = (currentBuf->data()[i - 1] != 0); i--; } if (nonZeroFound) { msg.type = static_cast<ContentType>(currentBuf->data()[i]); } currentBuf->trimEnd(currentBuf->length() - i); } while (!nonZeroFound && currentBuf != decryptedBuf->get()); if (!nonZeroFound) { throw std::runtime_error("No content type found"); } msg.fragment = std::move(*decryptedBuf); switch (msg.type) { case ContentType::handshake: case ContentType::alert: case ContentType::application_data: break; default: throw std::runtime_error(folly::to<std::string>( "received encrypted content type ", static_cast<ContentTypeType>(msg.type))); } if (!msg.fragment || msg.fragment->empty()) { if (msg.type == ContentType::application_data) { msg.fragment = folly::IOBuf::create(0); } else { throw std::runtime_error("received empty fragment"); } } return msg; } EncryptionLevel EncryptedReadRecordLayer::getEncryptionLevel() const { return encryptionLevel_; } EncryptedWriteRecordLayer::EncryptedWriteRecordLayer( EncryptionLevel encryptionLevel) : encryptionLevel_(encryptionLevel) {} TLSContent EncryptedWriteRecordLayer::write(TLSMessage&& msg) const { folly::IOBufQueue queue; queue.append(std::move(msg.fragment)); std::unique_ptr<folly::IOBuf> outBuf; std::array<uint8_t, kEncryptedHeaderSize> headerBuf; auto header = folly::IOBuf::wrapBufferAsValue(folly::range(headerBuf)); aead_->setEncryptedBufferHeadroom(kEncryptedHeaderSize); while (!queue.empty()) { auto dataBuf = getBufToEncrypt(queue); // Currently we never send padding. // check if we have enough room to add the encrypted footer. if (!dataBuf->isShared() && dataBuf->prev()->tailroom() >= sizeof(ContentType)) { // extend it and add it folly::io::Appender appender(dataBuf.get(), 0); appender.writeBE(static_cast<ContentTypeType>(msg.type)); } else { // not enough or shared - let's add enough for the tag as well auto encryptedFooter = folly::IOBuf::create( sizeof(ContentType) + aead_->getCipherOverhead()); folly::io::Appender appender(encryptedFooter.get(), 0); appender.writeBE(static_cast<ContentTypeType>(msg.type)); dataBuf->prependChain(std::move(encryptedFooter)); } if (seqNum_ == std::numeric_limits<uint64_t>::max()) { throw std::runtime_error("max write seq num"); } // we will either be able to memcpy directly into the ciphertext or // need to create a new buf to insert before the ciphertext but we need // it for additional data header.clear(); folly::io::Appender appender(&header, 0); appender.writeBE( static_cast<ContentTypeType>(ContentType::application_data)); appender.writeBE( static_cast<ProtocolVersionType>(ProtocolVersion::tls_1_2)); auto ciphertextLength = dataBuf->computeChainDataLength() + aead_->getCipherOverhead(); appender.writeBE<uint16_t>(ciphertextLength); auto cipherText = aead_->encrypt( std::move(dataBuf), useAdditionalData_ ? &header : nullptr, seqNum_++); std::unique_ptr<folly::IOBuf> record; if (!cipherText->isShared() && cipherText->headroom() >= kEncryptedHeaderSize) { // prepend and then write it in cipherText->prepend(kEncryptedHeaderSize); memcpy(cipherText->writableData(), header.data(), header.length()); record = std::move(cipherText); } else { record = folly::IOBuf::copyBuffer(header.data(), header.length()); record->prependChain(std::move(cipherText)); } if (!outBuf) { outBuf = std::move(record); } else { outBuf->prependChain(std::move(record)); } } if (!outBuf) { outBuf = folly::IOBuf::create(0); } TLSContent content; content.data = std::move(outBuf); content.contentType = msg.type; content.encryptionLevel = encryptionLevel_; return content; } Buf EncryptedWriteRecordLayer::getBufToEncrypt(folly::IOBufQueue& queue) const { if (queue.front()->length() > maxRecord_) { return queue.splitAtMost(maxRecord_); } else if (queue.front()->length() >= desiredMinRecord_) { return queue.pop_front(); } else { return queue.splitAtMost(desiredMinRecord_); } } EncryptionLevel EncryptedWriteRecordLayer::getEncryptionLevel() const { return encryptionLevel_; } } // namespace fizz
./CrossVul/dataset_final_sorted/CWE-400/cpp/good_842_0
crossvul-cpp_data_good_4323_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * SPDX-FileCopyrightText: 2014 Alejandro Fiestas Olivares <afiestas@kde.org> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "socketlinereader.h" SocketLineReader::SocketLineReader(QSslSocket* socket, QObject* parent) : QObject(parent) , m_socket(socket) { connect(m_socket, &QIODevice::readyRead, this, &SocketLineReader::dataReceived); } void SocketLineReader::dataReceived() { while (m_socket->canReadLine()) { const QByteArray line = m_socket->readLine(); if (line.length() > 1) { //we don't want a single \n m_packets.enqueue(line); } } //If we have any packets, tell it to the world. if (!m_packets.isEmpty()) { Q_EMIT readyRead(); } }
./CrossVul/dataset_final_sorted/CWE-400/cpp/good_4323_0
crossvul-cpp_data_bad_4321_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); QTimer* timer = new QTimer(socket); timer->setSingleShot(true); timer->setInterval(1000); connect(socket, &QSslSocket::encrypted, timer, &QObject::deleteLater); connect(timer, &QTimer::timeout, socket, [socket] { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Host timed out without sending any identity." << socket->peerAddress(); socket->disconnectFromHost(); }); timer->start(); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); //the size here is arbitrary and is now at 8192 bytes. It needs to be considerably long as it includes the capabilities but there needs to be a limit //Tested between my systems and I get around 2000 per identity package. if (socket->bytesAvailable() > 8192) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Suspiciously long identity package received. Closing connection." << socket->peerAddress() << socket->bytesAvailable(); socket->disconnectFromHost(); return; } #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_4321_0
crossvul-cpp_data_bad_455_0
#include "sass.hpp" #include "parser.hpp" #include "file.hpp" #include "inspect.hpp" #include "constants.hpp" #include "util.hpp" #include "prelexer.hpp" #include "color_maps.hpp" #include "sass/functions.h" #include "error_handling.hpp" // Notes about delayed: some ast nodes can have delayed evaluation so // they can preserve their original semantics if needed. This is most // prominently exhibited by the division operation, since it is not // only a valid operation, but also a valid css statement (i.e. for // fonts, as in `16px/24px`). When parsing lists and expression we // unwrap single items from lists and other operations. A nested list // must not be delayed, only the items of the first level sometimes // are delayed (as with argument lists). To achieve this we need to // pass status to the list parser, so this can be set correctly. // Another case with delayed values are colors. In compressed mode // only processed values get compressed (other are left as written). #include <cstdlib> #include <iostream> #include <vector> #include <typeinfo> namespace Sass { using namespace Constants; using namespace Prelexer; Parser Parser::from_c_str(const char* beg, Context& ctx, Backtraces traces, ParserState pstate, const char* source) { pstate.offset.column = 0; pstate.offset.line = 0; Parser p(ctx, pstate, traces); p.source = source ? source : beg; p.position = beg ? beg : p.source; p.end = p.position + strlen(p.position); Block_Obj root = SASS_MEMORY_NEW(Block, pstate); p.block_stack.push_back(root); root->is_root(true); return p; } Parser Parser::from_c_str(const char* beg, const char* end, Context& ctx, Backtraces traces, ParserState pstate, const char* source) { pstate.offset.column = 0; pstate.offset.line = 0; Parser p(ctx, pstate, traces); p.source = source ? source : beg; p.position = beg ? beg : p.source; p.end = end ? end : p.position + strlen(p.position); Block_Obj root = SASS_MEMORY_NEW(Block, pstate); p.block_stack.push_back(root); root->is_root(true); return p; } void Parser::advanceToNextToken() { lex < css_comments >(false); // advance to position pstate += pstate.offset; pstate.offset.column = 0; pstate.offset.line = 0; } Selector_List_Obj Parser::parse_selector(const char* beg, Context& ctx, Backtraces traces, ParserState pstate, const char* source) { Parser p = Parser::from_c_str(beg, ctx, traces, pstate, source); // ToDo: ruby sass errors on parent references // ToDo: remap the source-map entries somehow return p.parse_selector_list(false); } bool Parser::peek_newline(const char* start) { return peek_linefeed(start ? start : position) && ! peek_css<exactly<'{'>>(start); } Parser Parser::from_token(Token t, Context& ctx, Backtraces traces, ParserState pstate, const char* source) { Parser p(ctx, pstate, traces); p.source = source ? source : t.begin; p.position = t.begin ? t.begin : p.source; p.end = t.end ? t.end : p.position + strlen(p.position); Block_Obj root = SASS_MEMORY_NEW(Block, pstate); p.block_stack.push_back(root); root->is_root(true); return p; } /* main entry point to parse root block */ Block_Obj Parser::parse() { // consume unicode BOM read_bom(); // scan the input to find invalid utf8 sequences const char* it = utf8::find_invalid(position, end); // report invalid utf8 if (it != end) { pstate += Offset::init(position, it); traces.push_back(Backtrace(pstate)); throw Exception::InvalidSass(pstate, traces, "Invalid UTF-8 sequence"); } // create a block AST node to hold children Block_Obj root = SASS_MEMORY_NEW(Block, pstate, 0, true); // check seems a bit esoteric but works if (ctx.resources.size() == 1) { // apply headers only on very first include ctx.apply_custom_headers(root, path, pstate); } // parse children nodes block_stack.push_back(root); parse_block_nodes(true); block_stack.pop_back(); // update final position root->update_pstate(pstate); if (position != end) { css_error("Invalid CSS", " after ", ": expected selector or at-rule, was "); } return root; } // convenience function for block parsing // will create a new block ad-hoc for you // this is the base block parsing function Block_Obj Parser::parse_css_block(bool is_root) { // parse comments before block // lex < optional_css_comments >(); // lex mandatory opener or error out if (!lex_css < exactly<'{'> >()) { css_error("Invalid CSS", " after ", ": expected \"{\", was "); } // create new block and push to the selector stack Block_Obj block = SASS_MEMORY_NEW(Block, pstate, 0, is_root); block_stack.push_back(block); if (!parse_block_nodes(is_root)) css_error("Invalid CSS", " after ", ": expected \"}\", was "); if (!lex_css < exactly<'}'> >()) { css_error("Invalid CSS", " after ", ": expected \"}\", was "); } // update for end position // this seems to be done somewhere else // but that fixed selector schema issue // block->update_pstate(pstate); // parse comments after block // lex < optional_css_comments >(); block_stack.pop_back(); return block; } // convenience function for block parsing // will create a new block ad-hoc for you // also updates the `in_at_root` flag Block_Obj Parser::parse_block(bool is_root) { return parse_css_block(is_root); } // the main block parsing function // parses stuff between `{` and `}` bool Parser::parse_block_nodes(bool is_root) { // loop until end of string while (position < end) { // we should be able to refactor this parse_block_comments(); lex < css_whitespace >(); if (lex < exactly<';'> >()) continue; if (peek < end_of_file >()) return true; if (peek < exactly<'}'> >()) return true; if (parse_block_node(is_root)) continue; parse_block_comments(); if (lex_css < exactly<';'> >()) continue; if (peek_css < end_of_file >()) return true; if (peek_css < exactly<'}'> >()) return true; // illegal sass return false; } // return success return true; } // parser for a single node in a block // semicolons must be lexed beforehand bool Parser::parse_block_node(bool is_root) { Block_Obj block = block_stack.back(); parse_block_comments(); // throw away white-space // includes line comments lex < css_whitespace >(); Lookahead lookahead_result; // also parse block comments // first parse everything that is allowed in functions if (lex < variable >(true)) { block->append(parse_assignment()); } else if (lex < kwd_err >(true)) { block->append(parse_error()); } else if (lex < kwd_dbg >(true)) { block->append(parse_debug()); } else if (lex < kwd_warn >(true)) { block->append(parse_warning()); } else if (lex < kwd_if_directive >(true)) { block->append(parse_if_directive()); } else if (lex < kwd_for_directive >(true)) { block->append(parse_for_directive()); } else if (lex < kwd_each_directive >(true)) { block->append(parse_each_directive()); } else if (lex < kwd_while_directive >(true)) { block->append(parse_while_directive()); } else if (lex < kwd_return_directive >(true)) { block->append(parse_return_directive()); } // parse imports to process later else if (lex < kwd_import >(true)) { Scope parent = stack.empty() ? Scope::Rules : stack.back(); if (parent != Scope::Function && parent != Scope::Root && parent != Scope::Rules && parent != Scope::Media) { if (! peek_css< uri_prefix >(position)) { // this seems to go in ruby sass 3.4.20 error("Import directives may not be used within control directives or mixins."); } } // this puts the parsed doc into sheets // import stub will fetch this in expand Import_Obj imp = parse_import(); // if it is a url, we only add the statement if (!imp->urls().empty()) block->append(imp); // process all resources now (add Import_Stub nodes) for (size_t i = 0, S = imp->incs().size(); i < S; ++i) { block->append(SASS_MEMORY_NEW(Import_Stub, pstate, imp->incs()[i])); } } else if (lex < kwd_extend >(true)) { Lookahead lookahead = lookahead_for_include(position); if (!lookahead.found) css_error("Invalid CSS", " after ", ": expected selector, was "); Selector_List_Obj target; if (!lookahead.has_interpolants) { target = parse_selector_list(true); } else { target = SASS_MEMORY_NEW(Selector_List, pstate); target->schema(parse_selector_schema(lookahead.found, true)); } block->append(SASS_MEMORY_NEW(Extension, pstate, target)); } // selector may contain interpolations which need delayed evaluation else if ( !(lookahead_result = lookahead_for_selector(position)).error && !lookahead_result.is_custom_property ) { block->append(parse_ruleset(lookahead_result)); } // parse multiple specific keyword directives else if (lex < kwd_media >(true)) { block->append(parse_media_block()); } else if (lex < kwd_at_root >(true)) { block->append(parse_at_root_block()); } else if (lex < kwd_include_directive >(true)) { block->append(parse_include_directive()); } else if (lex < kwd_content_directive >(true)) { block->append(parse_content_directive()); } else if (lex < kwd_supports_directive >(true)) { block->append(parse_supports_directive()); } else if (lex < kwd_mixin >(true)) { block->append(parse_definition(Definition::MIXIN)); } else if (lex < kwd_function >(true)) { block->append(parse_definition(Definition::FUNCTION)); } // ignore the @charset directive for now else if (lex< kwd_charset_directive >(true)) { parse_charset_directive(); } // generic at keyword (keep last) else if (lex< re_special_directive >(true)) { block->append(parse_special_directive()); } else if (lex< re_prefixed_directive >(true)) { block->append(parse_prefixed_directive()); } else if (lex< at_keyword >(true)) { block->append(parse_directive()); } else if (is_root && stack.back() != Scope::AtRoot /* && block->is_root() */) { lex< css_whitespace >(); if (position >= end) return true; css_error("Invalid CSS", " after ", ": expected 1 selector or at-rule, was "); } // parse a declaration else { // ToDo: how does it handle parse errors? // maybe we are expected to parse something? Declaration_Obj decl = parse_declaration(); decl->tabs(indentation); block->append(decl); // maybe we have a "sub-block" if (peek< exactly<'{'> >()) { if (decl->is_indented()) ++ indentation; // parse a propset that rides on the declaration's property stack.push_back(Scope::Properties); decl->block(parse_block()); stack.pop_back(); if (decl->is_indented()) -- indentation; } } // something matched return true; } // EO parse_block_nodes // parse imports inside the Import_Obj Parser::parse_import() { Import_Obj imp = SASS_MEMORY_NEW(Import, pstate); std::vector<std::pair<std::string,Function_Call_Obj>> to_import; bool first = true; do { while (lex< block_comment >()); if (lex< quoted_string >()) { to_import.push_back(std::pair<std::string,Function_Call_Obj>(std::string(lexed), 0)); } else if (lex< uri_prefix >()) { Arguments_Obj args = SASS_MEMORY_NEW(Arguments, pstate); Function_Call_Obj result = SASS_MEMORY_NEW(Function_Call, pstate, "url", args); if (lex< quoted_string >()) { Expression_Obj quoted_url = parse_string(); args->append(SASS_MEMORY_NEW(Argument, quoted_url->pstate(), quoted_url)); } else if (String_Obj string_url = parse_url_function_argument()) { args->append(SASS_MEMORY_NEW(Argument, string_url->pstate(), string_url)); } else if (peek < skip_over_scopes < exactly < '(' >, exactly < ')' > > >(position)) { Expression_Obj braced_url = parse_list(); // parse_interpolated_chunk(lexed); args->append(SASS_MEMORY_NEW(Argument, braced_url->pstate(), braced_url)); } else { error("malformed URL"); } if (!lex< exactly<')'> >()) error("URI is missing ')'"); to_import.push_back(std::pair<std::string, Function_Call_Obj>("", result)); } else { if (first) error("@import directive requires a url or quoted path"); else error("expecting another url or quoted path in @import list"); } first = false; } while (lex_css< exactly<','> >()); if (!peek_css< alternatives< exactly<';'>, exactly<'}'>, end_of_file > >()) { List_Obj import_queries = parse_media_queries(); imp->import_queries(import_queries); } for(auto location : to_import) { if (location.second) { imp->urls().push_back(location.second); } // check if custom importers want to take over the handling else if (!ctx.call_importers(unquote(location.first), path, pstate, imp)) { // nobody wants it, so we do our import ctx.import_url(imp, location.first, path); } } return imp; } Definition_Obj Parser::parse_definition(Definition::Type which_type) { std::string which_str(lexed); if (!lex< identifier >()) error("invalid name in " + which_str + " definition"); std::string name(Util::normalize_underscores(lexed)); if (which_type == Definition::FUNCTION && (name == "and" || name == "or" || name == "not")) { error("Invalid function name \"" + name + "\"."); } ParserState source_position_of_def = pstate; Parameters_Obj params = parse_parameters(); if (which_type == Definition::MIXIN) stack.push_back(Scope::Mixin); else stack.push_back(Scope::Function); Block_Obj body = parse_block(); stack.pop_back(); return SASS_MEMORY_NEW(Definition, source_position_of_def, name, params, body, which_type); } Parameters_Obj Parser::parse_parameters() { Parameters_Obj params = SASS_MEMORY_NEW(Parameters, pstate); if (lex_css< exactly<'('> >()) { // if there's anything there at all if (!peek_css< exactly<')'> >()) { do { if (peek< exactly<')'> >()) break; params->append(parse_parameter()); } while (lex_css< exactly<','> >()); } if (!lex_css< exactly<')'> >()) { css_error("Invalid CSS", " after ", ": expected \")\", was "); } } return params; } Parameter_Obj Parser::parse_parameter() { if (peek< alternatives< exactly<','>, exactly< '{' >, exactly<';'> > >()) { css_error("Invalid CSS", " after ", ": expected variable (e.g. $foo), was "); } while (lex< alternatives < spaces, block_comment > >()); lex < variable >(); std::string name(Util::normalize_underscores(lexed)); ParserState pos = pstate; Expression_Obj val; bool is_rest = false; while (lex< alternatives < spaces, block_comment > >()); if (lex< exactly<':'> >()) { // there's a default value while (lex< block_comment >()); val = parse_space_list(); } else if (lex< exactly< ellipsis > >()) { is_rest = true; } return SASS_MEMORY_NEW(Parameter, pos, name, val, is_rest); } Arguments_Obj Parser::parse_arguments() { Arguments_Obj args = SASS_MEMORY_NEW(Arguments, pstate); if (lex_css< exactly<'('> >()) { // if there's anything there at all if (!peek_css< exactly<')'> >()) { do { if (peek< exactly<')'> >()) break; args->append(parse_argument()); } while (lex_css< exactly<','> >()); } if (!lex_css< exactly<')'> >()) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } } return args; } Argument_Obj Parser::parse_argument() { if (peek< alternatives< exactly<','>, exactly< '{' >, exactly<';'> > >()) { css_error("Invalid CSS", " after ", ": expected \")\", was "); } if (peek_css< sequence < exactly< hash_lbrace >, exactly< rbrace > > >()) { position += 2; css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } Argument_Obj arg; if (peek_css< sequence < variable, optional_css_comments, exactly<':'> > >()) { lex_css< variable >(); std::string name(Util::normalize_underscores(lexed)); ParserState p = pstate; lex_css< exactly<':'> >(); Expression_Obj val = parse_space_list(); arg = SASS_MEMORY_NEW(Argument, p, val, name); } else { bool is_arglist = false; bool is_keyword = false; Expression_Obj val = parse_space_list(); List_Ptr l = Cast<List>(val); if (lex_css< exactly< ellipsis > >()) { if (val->concrete_type() == Expression::MAP || ( (l != NULL && l->separator() == SASS_HASH) )) is_keyword = true; else is_arglist = true; } arg = SASS_MEMORY_NEW(Argument, pstate, val, "", is_arglist, is_keyword); } return arg; } Assignment_Obj Parser::parse_assignment() { std::string name(Util::normalize_underscores(lexed)); ParserState var_source_position = pstate; if (!lex< exactly<':'> >()) error("expected ':' after " + name + " in assignment statement"); if (peek_css< alternatives < exactly<';'>, end_of_file > >()) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } Expression_Obj val; Lookahead lookahead = lookahead_for_value(position); if (lookahead.has_interpolants && lookahead.found) { val = parse_value_schema(lookahead.found); } else { val = parse_list(); } bool is_default = false; bool is_global = false; while (peek< alternatives < default_flag, global_flag > >()) { if (lex< default_flag >()) is_default = true; else if (lex< global_flag >()) is_global = true; } return SASS_MEMORY_NEW(Assignment, var_source_position, name, val, is_default, is_global); } // a ruleset connects a selector and a block Ruleset_Obj Parser::parse_ruleset(Lookahead lookahead) { NESTING_GUARD(nestings); // inherit is_root from parent block Block_Obj parent = block_stack.back(); bool is_root = parent && parent->is_root(); // make sure to move up the the last position lex < optional_css_whitespace >(false, true); // create the connector object (add parts later) Ruleset_Obj ruleset = SASS_MEMORY_NEW(Ruleset, pstate); // parse selector static or as schema to be evaluated later if (lookahead.parsable) ruleset->selector(parse_selector_list(false)); else { Selector_List_Obj list = SASS_MEMORY_NEW(Selector_List, pstate); list->schema(parse_selector_schema(lookahead.position, false)); ruleset->selector(list); } // then parse the inner block stack.push_back(Scope::Rules); ruleset->block(parse_block()); stack.pop_back(); // update for end position ruleset->update_pstate(pstate); ruleset->block()->update_pstate(pstate); // need this info for sanity checks ruleset->is_root(is_root); // return AST Node return ruleset; } // parse a selector schema that will be evaluated in the eval stage // uses a string schema internally to do the actual schema handling // in the eval stage we will be re-parse it into an actual selector Selector_Schema_Obj Parser::parse_selector_schema(const char* end_of_selector, bool chroot) { NESTING_GUARD(nestings); // move up to the start lex< optional_spaces >(); const char* i = position; // selector schema re-uses string schema implementation String_Schema_Ptr schema = SASS_MEMORY_NEW(String_Schema, pstate); // the selector schema is pretty much just a wrapper for the string schema Selector_Schema_Obj selector_schema = SASS_MEMORY_NEW(Selector_Schema, pstate, schema); selector_schema->connect_parent(chroot == false); selector_schema->media_block(last_media_block); // process until end while (i < end_of_selector) { // try to parse mutliple interpolants if (const char* p = find_first_in_interval< exactly<hash_lbrace>, block_comment >(i, end_of_selector)) { // accumulate the preceding segment if the position has advanced if (i < p) { std::string parsed(i, p); String_Constant_Obj str = SASS_MEMORY_NEW(String_Constant, pstate, parsed); pstate += Offset(parsed); str->update_pstate(pstate); schema->append(str); } // skip over all nested inner interpolations up to our own delimiter const char* j = skip_over_scopes< exactly<hash_lbrace>, exactly<rbrace> >(p + 2, end_of_selector); // check if the interpolation never ends of only contains white-space (error out) if (!j || peek < sequence < optional_spaces, exactly<rbrace> > >(p+2)) { position = p+2; css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } // pass inner expression to the parser to resolve nested interpolations pstate.add(p, p+2); Expression_Obj interpolant = Parser::from_c_str(p+2, j, ctx, traces, pstate).parse_list(); // set status on the list expression interpolant->is_interpolant(true); // schema->has_interpolants(true); // add to the string schema schema->append(interpolant); // advance parser state pstate.add(p+2, j); // advance position i = j; } // no more interpolants have been found // add the last segment if there is one else { // make sure to add the last bits of the string up to the end (if any) if (i < end_of_selector) { std::string parsed(i, end_of_selector); String_Constant_Obj str = SASS_MEMORY_NEW(String_Constant, pstate, parsed); pstate += Offset(parsed); str->update_pstate(pstate); i = end_of_selector; schema->append(str); } // exit loop } } // EO until eos // update position position = i; // update for end position selector_schema->update_pstate(pstate); schema->update_pstate(pstate); after_token = before_token = pstate; // return parsed result return selector_schema.detach(); } // EO parse_selector_schema void Parser::parse_charset_directive() { lex < sequence < quoted_string, optional_spaces, exactly <';'> > >(); } // called after parsing `kwd_include_directive` Mixin_Call_Obj Parser::parse_include_directive() { // lex identifier into `lexed` var lex_identifier(); // may error out // normalize underscores to hyphens std::string name(Util::normalize_underscores(lexed)); // create the initial mixin call object Mixin_Call_Obj call = SASS_MEMORY_NEW(Mixin_Call, pstate, name, 0, 0); // parse mandatory arguments call->arguments(parse_arguments()); // parse optional block if (peek < exactly <'{'> >()) { call->block(parse_block()); } // return ast node return call.detach(); } // EO parse_include_directive // parse a list of complex selectors // this is the main entry point for most Selector_List_Obj Parser::parse_selector_list(bool chroot) { bool reloop; bool had_linefeed = false; NESTING_GUARD(nestings); Complex_Selector_Obj sel; Selector_List_Obj group = SASS_MEMORY_NEW(Selector_List, pstate); group->media_block(last_media_block); if (peek_css< alternatives < end_of_file, exactly <'{'>, exactly <','> > >()) { css_error("Invalid CSS", " after ", ": expected selector, was "); } do { reloop = false; had_linefeed = had_linefeed || peek_newline(); if (peek_css< alternatives < class_char < selector_list_delims > > >()) break; // in case there are superfluous commas at the end // now parse the complex selector sel = parse_complex_selector(chroot); if (!sel) return group.detach(); sel->has_line_feed(had_linefeed); had_linefeed = false; while (peek_css< exactly<','> >()) { lex< css_comments >(false); // consume everything up and including the comma separator reloop = lex< exactly<','> >() != 0; // remember line break (also between some commas) had_linefeed = had_linefeed || peek_newline(); // remember line break (also between some commas) } group->append(sel); } while (reloop); while (lex_css< kwd_optional >()) { group->is_optional(true); } // update for end position group->update_pstate(pstate); if (sel) sel->last()->has_line_break(false); return group.detach(); } // EO parse_selector_list // a complex selector combines a compound selector with another // complex selector, with one of four combinator operations. // the compound selector (head) is optional, since the combinator // can come first in the whole selector sequence (like `> DIV'). Complex_Selector_Obj Parser::parse_complex_selector(bool chroot) { NESTING_GUARD(nestings); String_Obj reference = 0; lex < block_comment >(); advanceToNextToken(); Complex_Selector_Obj sel = SASS_MEMORY_NEW(Complex_Selector, pstate); if (peek < end_of_file >()) return 0; // parse the left hand side Compound_Selector_Obj lhs; // special case if it starts with combinator ([+~>]) if (!peek_css< class_char < selector_combinator_ops > >()) { // parse the left hand side lhs = parse_compound_selector(); } // parse combinator between lhs and rhs Complex_Selector::Combinator combinator = Complex_Selector::ANCESTOR_OF; if (lex< exactly<'+'> >()) combinator = Complex_Selector::ADJACENT_TO; else if (lex< exactly<'~'> >()) combinator = Complex_Selector::PRECEDES; else if (lex< exactly<'>'> >()) combinator = Complex_Selector::PARENT_OF; else if (lex< sequence < exactly<'/'>, negate < exactly < '*' > > > >()) { // comments are allowed, but not spaces? combinator = Complex_Selector::REFERENCE; if (!lex < re_reference_combinator >()) return 0; reference = SASS_MEMORY_NEW(String_Constant, pstate, lexed); if (!lex < exactly < '/' > >()) return 0; // ToDo: error msg? } if (!lhs && combinator == Complex_Selector::ANCESTOR_OF) return 0; // lex < block_comment >(); sel->head(lhs); sel->combinator(combinator); sel->media_block(last_media_block); if (combinator == Complex_Selector::REFERENCE) sel->reference(reference); // has linfeed after combinator? sel->has_line_break(peek_newline()); // sel->has_line_feed(has_line_feed); // check if we got the abort condition (ToDo: optimize) if (!peek_css< class_char < complex_selector_delims > >()) { // parse next selector in sequence sel->tail(parse_complex_selector(true)); } // add a parent selector if we are not in a root // also skip adding parent ref if we only have refs if (!sel->has_parent_ref() && !chroot) { // create the objects to wrap parent selector reference Compound_Selector_Obj head = SASS_MEMORY_NEW(Compound_Selector, pstate); Parent_Selector_Ptr parent = SASS_MEMORY_NEW(Parent_Selector, pstate, false); parent->media_block(last_media_block); head->media_block(last_media_block); // add simple selector head->append(parent); // selector may not have any head yet if (!sel->head()) { sel->head(head); } // otherwise we need to create a new complex selector and set the old one as its tail else { sel = SASS_MEMORY_NEW(Complex_Selector, pstate, Complex_Selector::ANCESTOR_OF, head, sel); sel->media_block(last_media_block); } // peek for linefeed and remember result on head // if (peek_newline()) head->has_line_break(true); } sel->update_pstate(pstate); // complex selector return sel; } // EO parse_complex_selector // parse one compound selector, which is basically // a list of simple selectors (directly adjacent) // lex them exactly (without skipping white-space) Compound_Selector_Obj Parser::parse_compound_selector() { // init an empty compound selector wrapper Compound_Selector_Obj seq = SASS_MEMORY_NEW(Compound_Selector, pstate); seq->media_block(last_media_block); // skip initial white-space lex< css_whitespace >(); // parse list while (true) { // remove all block comments (don't skip white-space) lex< delimited_by< slash_star, star_slash, false > >(false); // parse functional if (match < re_pseudo_selector >()) { seq->append(parse_simple_selector()); } // parse parent selector else if (lex< exactly<'&'> >(false)) { // this produces a linefeed!? seq->has_parent_reference(true); seq->append(SASS_MEMORY_NEW(Parent_Selector, pstate)); // parent selector only allowed at start // upcoming Sass may allow also trailing if (seq->length() > 1) { ParserState state(pstate); Simple_Selector_Obj cur = (*seq)[seq->length()-1]; Simple_Selector_Obj prev = (*seq)[seq->length()-2]; std::string sel(prev->to_string({ NESTED, 5 })); std::string found(cur->to_string({ NESTED, 5 })); if (lex < identifier >()) { found += std::string(lexed); } error("Invalid CSS after \"" + sel + "\": expected \"{\", was \"" + found + "\"\n\n" "\"" + found + "\" may only be used at the beginning of a compound selector.", state); } } // parse type selector else if (lex< re_type_selector >(false)) { seq->append(SASS_MEMORY_NEW(Element_Selector, pstate, lexed)); } // peek for abort conditions else if (peek< spaces >()) break; else if (peek< end_of_file >()) { break; } else if (peek_css < class_char < selector_combinator_ops > >()) break; else if (peek_css < class_char < complex_selector_delims > >()) break; // otherwise parse another simple selector else { Simple_Selector_Obj sel = parse_simple_selector(); if (!sel) return 0; seq->append(sel); } } if (seq && !peek_css<alternatives<end_of_file,exactly<'{'>>>()) { seq->has_line_break(peek_newline()); } // EO while true return seq; } // EO parse_compound_selector Simple_Selector_Obj Parser::parse_simple_selector() { lex < css_comments >(false); if (lex< class_name >()) { return SASS_MEMORY_NEW(Class_Selector, pstate, lexed); } else if (lex< id_name >()) { return SASS_MEMORY_NEW(Id_Selector, pstate, lexed); } else if (lex< alternatives < variable, number, static_reference_combinator > >()) { return SASS_MEMORY_NEW(Element_Selector, pstate, lexed); } else if (peek< pseudo_not >()) { return parse_negated_selector(); } else if (peek< re_pseudo_selector >()) { return parse_pseudo_selector(); } else if (peek< exactly<':'> >()) { return parse_pseudo_selector(); } else if (lex < exactly<'['> >()) { return parse_attribute_selector(); } else if (lex< placeholder >()) { Placeholder_Selector_Ptr sel = SASS_MEMORY_NEW(Placeholder_Selector, pstate, lexed); sel->media_block(last_media_block); return sel; } else { css_error("Invalid CSS", " after ", ": expected selector, was "); } // failed return 0; } Wrapped_Selector_Obj Parser::parse_negated_selector() { lex< pseudo_not >(); std::string name(lexed); ParserState nsource_position = pstate; Selector_List_Obj negated = parse_selector_list(true); if (!lex< exactly<')'> >()) { error("negated selector is missing ')'"); } name.erase(name.size() - 1); return SASS_MEMORY_NEW(Wrapped_Selector, nsource_position, name, negated); } // a pseudo selector often starts with one or two colons // it can contain more selectors inside parentheses Simple_Selector_Obj Parser::parse_pseudo_selector() { if (lex< sequence< optional < pseudo_prefix >, // we keep the space within the name, strange enough // ToDo: refactor output to schedule the space for it // or do we really want to keep the real white-space? sequence< identifier, optional < block_comment >, exactly<'('> > > >()) { std::string name(lexed); name.erase(name.size() - 1); ParserState p = pstate; // specially parse static stuff // ToDo: really everything static? if (peek_css < sequence < alternatives < static_value, binomial >, optional_css_whitespace, exactly<')'> > >() ) { lex_css< alternatives < static_value, binomial > >(); String_Constant_Obj expr = SASS_MEMORY_NEW(String_Constant, pstate, lexed); if (lex_css< exactly<')'> >()) { expr->can_compress_whitespace(true); return SASS_MEMORY_NEW(Pseudo_Selector, p, name, expr); } } else if (Selector_List_Obj wrapped = parse_selector_list(true)) { if (wrapped && lex_css< exactly<')'> >()) { return SASS_MEMORY_NEW(Wrapped_Selector, p, name, wrapped); } } } // EO if pseudo selector else if (lex < sequence< optional < pseudo_prefix >, identifier > >()) { return SASS_MEMORY_NEW(Pseudo_Selector, pstate, lexed); } else if(lex < pseudo_prefix >()) { css_error("Invalid CSS", " after ", ": expected pseudoclass or pseudoelement, was "); } css_error("Invalid CSS", " after ", ": expected \")\", was "); // unreachable statement return 0; } const char* Parser::re_attr_sensitive_close(const char* src) { return alternatives < exactly<']'>, exactly<'/'> >(src); } const char* Parser::re_attr_insensitive_close(const char* src) { return sequence < insensitive<'i'>, re_attr_sensitive_close >(src); } Attribute_Selector_Obj Parser::parse_attribute_selector() { ParserState p = pstate; if (!lex_css< attribute_name >()) error("invalid attribute name in attribute selector"); std::string name(lexed); if (lex_css< re_attr_sensitive_close >()) { return SASS_MEMORY_NEW(Attribute_Selector, p, name, "", 0, 0); } else if (lex_css< re_attr_insensitive_close >()) { char modifier = lexed.begin[0]; return SASS_MEMORY_NEW(Attribute_Selector, p, name, "", 0, modifier); } if (!lex_css< alternatives< exact_match, class_match, dash_match, prefix_match, suffix_match, substring_match > >()) { error("invalid operator in attribute selector for " + name); } std::string matcher(lexed); String_Obj value = 0; if (lex_css< identifier >()) { value = SASS_MEMORY_NEW(String_Constant, p, lexed); } else if (lex_css< quoted_string >()) { value = parse_interpolated_chunk(lexed, true); // needed! } else { error("expected a string constant or identifier in attribute selector for " + name); } if (lex_css< re_attr_sensitive_close >()) { return SASS_MEMORY_NEW(Attribute_Selector, p, name, matcher, value, 0); } else if (lex_css< re_attr_insensitive_close >()) { char modifier = lexed.begin[0]; return SASS_MEMORY_NEW(Attribute_Selector, p, name, matcher, value, modifier); } error("unterminated attribute selector for " + name); return NULL; // to satisfy compilers (error must not return) } /* parse block comment and add to block */ void Parser::parse_block_comments() { Block_Obj block = block_stack.back(); while (lex< block_comment >()) { bool is_important = lexed.begin[2] == '!'; // flag on second param is to skip loosely over comments String_Obj contents = parse_interpolated_chunk(lexed, true, false); block->append(SASS_MEMORY_NEW(Comment, pstate, contents, is_important)); } } Declaration_Obj Parser::parse_declaration() { String_Obj prop; bool is_custom_property = false; if (lex< sequence< optional< exactly<'*'> >, identifier_schema > >()) { const std::string property(lexed); is_custom_property = property.compare(0, 2, "--") == 0; prop = parse_identifier_schema(); } else if (lex< sequence< optional< exactly<'*'> >, identifier, zero_plus< block_comment > > >()) { const std::string property(lexed); is_custom_property = property.compare(0, 2, "--") == 0; prop = SASS_MEMORY_NEW(String_Constant, pstate, lexed); } else { css_error("Invalid CSS", " after ", ": expected \"}\", was "); } bool is_indented = true; const std::string property(lexed); if (!lex_css< one_plus< exactly<':'> > >()) error("property \"" + escape_string(property) + "\" must be followed by a ':'"); if (!is_custom_property && match< sequence< optional_css_comments, exactly<';'> > >()) error("style declaration must contain a value"); if (match< sequence< optional_css_comments, exactly<'{'> > >()) is_indented = false; // don't indent if value is empty if (is_custom_property) { return SASS_MEMORY_NEW(Declaration, prop->pstate(), prop, parse_css_variable_value(), false, true); } lex < css_comments >(false); if (peek_css< static_value >()) { return SASS_MEMORY_NEW(Declaration, prop->pstate(), prop, parse_static_value()/*, lex<kwd_important>()*/); } else { Expression_Obj value; Lookahead lookahead = lookahead_for_value(position); if (lookahead.found) { if (lookahead.has_interpolants) { value = parse_value_schema(lookahead.found); } else { value = parse_list(DELAYED); } } else { value = parse_list(DELAYED); if (List_Ptr list = Cast<List>(value)) { if (!list->is_bracketed() && list->length() == 0 && !peek< exactly <'{'> >()) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } } } lex < css_comments >(false); Declaration_Obj decl = SASS_MEMORY_NEW(Declaration, prop->pstate(), prop, value/*, lex<kwd_important>()*/); decl->is_indented(is_indented); decl->update_pstate(pstate); return decl; } } // parse +/- and return false if negative // this is never hit via spec tests bool Parser::parse_number_prefix() { bool positive = true; while(true) { if (lex < block_comment >()) continue; if (lex < number_prefix >()) continue; if (lex < exactly < '-' > >()) { positive = !positive; continue; } break; } return positive; } Expression_Obj Parser::parse_map() { NESTING_GUARD(nestings); Expression_Obj key = parse_list(); List_Obj map = SASS_MEMORY_NEW(List, pstate, 0, SASS_HASH); // it's not a map so return the lexed value as a list value if (!lex_css< exactly<':'> >()) { return key; } List_Obj l = Cast<List>(key); if (l && l->separator() == SASS_COMMA) { css_error("Invalid CSS", " after ", ": expected \")\", was "); } Expression_Obj value = parse_space_list(); map->append(key); map->append(value); while (lex_css< exactly<','> >()) { // allow trailing commas - #495 if (peek_css< exactly<')'> >(position)) { break; } key = parse_space_list(); if (!(lex< exactly<':'> >())) { css_error("Invalid CSS", " after ", ": expected \":\", was "); } value = parse_space_list(); map->append(key); map->append(value); } ParserState ps = map->pstate(); ps.offset = pstate - ps + pstate.offset; map->pstate(ps); return map; } Expression_Obj Parser::parse_bracket_list() { NESTING_GUARD(nestings); // check if we have an empty list // return the empty list as such if (peek_css< list_terminator >(position)) { // return an empty list (nothing to delay) return SASS_MEMORY_NEW(List, pstate, 0, SASS_SPACE, false, true); } bool has_paren = peek_css< exactly<'('> >() != NULL; // now try to parse a space list Expression_Obj list = parse_space_list(); // if it's a singleton, return it (don't wrap it) if (!peek_css< exactly<','> >(position)) { List_Obj l = Cast<List>(list); if (!l || l->is_bracketed() || has_paren) { List_Obj bracketed_list = SASS_MEMORY_NEW(List, pstate, 1, SASS_SPACE, false, true); bracketed_list->append(list); return bracketed_list; } l->is_bracketed(true); return l; } // if we got so far, we actually do have a comma list List_Obj bracketed_list = SASS_MEMORY_NEW(List, pstate, 2, SASS_COMMA, false, true); // wrap the first expression bracketed_list->append(list); while (lex_css< exactly<','> >()) { // check for abort condition if (peek_css< list_terminator >(position) ) { break; } // otherwise add another expression bracketed_list->append(parse_space_list()); } // return the list return bracketed_list; } // parse list returns either a space separated list, // a comma separated list or any bare expression found. // so to speak: we unwrap items from lists if possible here! Expression_Obj Parser::parse_list(bool delayed) { NESTING_GUARD(nestings); return parse_comma_list(delayed); } // will return singletons unwrapped Expression_Obj Parser::parse_comma_list(bool delayed) { NESTING_GUARD(nestings); // check if we have an empty list // return the empty list as such if (peek_css< list_terminator >(position)) { // return an empty list (nothing to delay) return SASS_MEMORY_NEW(List, pstate, 0); } // now try to parse a space list Expression_Obj list = parse_space_list(); // if it's a singleton, return it (don't wrap it) if (!peek_css< exactly<','> >(position)) { // set_delay doesn't apply to list children // so this will only undelay single values if (!delayed) list->set_delayed(false); return list; } // if we got so far, we actually do have a comma list List_Obj comma_list = SASS_MEMORY_NEW(List, pstate, 2, SASS_COMMA); // wrap the first expression comma_list->append(list); while (lex_css< exactly<','> >()) { // check for abort condition if (peek_css< list_terminator >(position) ) { break; } // otherwise add another expression comma_list->append(parse_space_list()); } // return the list return comma_list; } // EO parse_comma_list // will return singletons unwrapped Expression_Obj Parser::parse_space_list() { NESTING_GUARD(nestings); Expression_Obj disj1 = parse_disjunction(); // if it's a singleton, return it (don't wrap it) if (peek_css< space_list_terminator >(position) ) { return disj1; } List_Obj space_list = SASS_MEMORY_NEW(List, pstate, 2, SASS_SPACE); space_list->append(disj1); while ( !(peek_css< space_list_terminator >(position)) && peek_css< optional_css_whitespace >() != end ) { // the space is parsed implicitly? space_list->append(parse_disjunction()); } // return the list return space_list; } // EO parse_space_list // parse logical OR operation Expression_Obj Parser::parse_disjunction() { NESTING_GUARD(nestings); advanceToNextToken(); ParserState state(pstate); // parse the left hand side conjunction Expression_Obj conj = parse_conjunction(); // parse multiple right hand sides std::vector<Expression_Obj> operands; while (lex_css< kwd_or >()) operands.push_back(parse_conjunction()); // if it's a singleton, return it directly if (operands.size() == 0) return conj; // fold all operands into one binary expression Expression_Obj ex = fold_operands(conj, operands, { Sass_OP::OR }); state.offset = pstate - state + pstate.offset; ex->pstate(state); return ex; } // EO parse_disjunction // parse logical AND operation Expression_Obj Parser::parse_conjunction() { NESTING_GUARD(nestings); advanceToNextToken(); ParserState state(pstate); // parse the left hand side relation Expression_Obj rel = parse_relation(); // parse multiple right hand sides std::vector<Expression_Obj> operands; while (lex_css< kwd_and >()) { operands.push_back(parse_relation()); } // if it's a singleton, return it directly if (operands.size() == 0) return rel; // fold all operands into one binary expression Expression_Obj ex = fold_operands(rel, operands, { Sass_OP::AND }); state.offset = pstate - state + pstate.offset; ex->pstate(state); return ex; } // EO parse_conjunction // parse comparison operations Expression_Obj Parser::parse_relation() { NESTING_GUARD(nestings); advanceToNextToken(); ParserState state(pstate); // parse the left hand side expression Expression_Obj lhs = parse_expression(); std::vector<Expression_Obj> operands; std::vector<Operand> operators; // if it's a singleton, return it (don't wrap it) while (peek< alternatives < kwd_eq, kwd_neq, kwd_gte, kwd_gt, kwd_lte, kwd_lt > >(position)) { // is directly adjancent to expression? bool left_ws = peek < css_comments >() != NULL; // parse the operator enum Sass_OP op = lex<kwd_eq>() ? Sass_OP::EQ : lex<kwd_neq>() ? Sass_OP::NEQ : lex<kwd_gte>() ? Sass_OP::GTE : lex<kwd_lte>() ? Sass_OP::LTE : lex<kwd_gt>() ? Sass_OP::GT : lex<kwd_lt>() ? Sass_OP::LT // we checked the possibilities on top of fn : Sass_OP::EQ; // is directly adjacent to expression? bool right_ws = peek < css_comments >() != NULL; operators.push_back({ op, left_ws, right_ws }); operands.push_back(parse_expression()); } // we are called recursively for list, so we first // fold inner binary expression which has delayed // correctly set to zero. After folding we also unwrap // single nested items. So we cannot set delay on the // returned result here, as we have lost nestings ... Expression_Obj ex = fold_operands(lhs, operands, operators); state.offset = pstate - state + pstate.offset; ex->pstate(state); return ex; } // parse_relation // parse expression valid for operations // called from parse_relation // called from parse_for_directive // called from parse_media_expression // parse addition and subtraction operations Expression_Obj Parser::parse_expression() { NESTING_GUARD(nestings); advanceToNextToken(); ParserState state(pstate); // parses multiple add and subtract operations // NOTE: make sure that identifiers starting with // NOTE: dashes do NOT count as subtract operation Expression_Obj lhs = parse_operators(); // if it's a singleton, return it (don't wrap it) if (!(peek_css< exactly<'+'> >(position) || // condition is a bit misterious, but some combinations should not be counted as operations (peek< no_spaces >(position) && peek< sequence< negate< unsigned_number >, exactly<'-'>, negate< space > > >(position)) || (peek< sequence< negate< unsigned_number >, exactly<'-'>, negate< unsigned_number > > >(position))) || peek< sequence < zero_plus < exactly <'-' > >, identifier > >(position)) { return lhs; } std::vector<Expression_Obj> operands; std::vector<Operand> operators; bool left_ws = peek < css_comments >() != NULL; while ( lex_css< exactly<'+'> >() || ( ! peek_css< sequence < zero_plus < exactly <'-' > >, identifier > >(position) && lex_css< sequence< negate< digit >, exactly<'-'> > >() ) ) { bool right_ws = peek < css_comments >() != NULL; operators.push_back({ lexed.to_string() == "+" ? Sass_OP::ADD : Sass_OP::SUB, left_ws, right_ws }); operands.push_back(parse_operators()); left_ws = peek < css_comments >() != NULL; } if (operands.size() == 0) return lhs; Expression_Obj ex = fold_operands(lhs, operands, operators); state.offset = pstate - state + pstate.offset; ex->pstate(state); return ex; } // parse addition and subtraction operations Expression_Obj Parser::parse_operators() { NESTING_GUARD(nestings); advanceToNextToken(); ParserState state(pstate); Expression_Obj factor = parse_factor(); // if it's a singleton, return it (don't wrap it) std::vector<Expression_Obj> operands; // factors std::vector<Operand> operators; // ops // lex operations to apply to lhs const char* left_ws = peek < css_comments >(); while (lex_css< class_char< static_ops > >()) { const char* right_ws = peek < css_comments >(); switch(*lexed.begin) { case '*': operators.push_back({ Sass_OP::MUL, left_ws != 0, right_ws != 0 }); break; case '/': operators.push_back({ Sass_OP::DIV, left_ws != 0, right_ws != 0 }); break; case '%': operators.push_back({ Sass_OP::MOD, left_ws != 0, right_ws != 0 }); break; default: throw std::runtime_error("unknown static op parsed"); } operands.push_back(parse_factor()); left_ws = peek < css_comments >(); } // operands and operators to binary expression Expression_Obj ex = fold_operands(factor, operands, operators); state.offset = pstate - state + pstate.offset; ex->pstate(state); return ex; } // EO parse_operators // called from parse_operators // called from parse_value_schema Expression_Obj Parser::parse_factor() { NESTING_GUARD(nestings); lex < css_comments >(false); if (lex_css< exactly<'('> >()) { // parse_map may return a list Expression_Obj value = parse_map(); // lex the expected closing parenthesis if (!lex_css< exactly<')'> >()) error("unclosed parenthesis"); // expression can be evaluated return value; } else if (lex_css< exactly<'['> >()) { // explicit bracketed Expression_Obj value = parse_bracket_list(); // lex the expected closing square bracket if (!lex_css< exactly<']'> >()) error("unclosed squared bracket"); return value; } // string may be interpolated // if (lex< quoted_string >()) { // return &parse_string(); // } else if (peek< ie_property >()) { return parse_ie_property(); } else if (peek< ie_keyword_arg >()) { return parse_ie_keyword_arg(); } else if (peek< sequence < calc_fn_call, exactly <'('> > >()) { return parse_calc_function(); } else if (lex < functional_schema >()) { return parse_function_call_schema(); } else if (lex< identifier_schema >()) { String_Obj string = parse_identifier_schema(); if (String_Schema_Ptr schema = Cast<String_Schema>(string)) { if (lex < exactly < '(' > >()) { schema->append(parse_list()); lex < exactly < ')' > >(); } } return string; } else if (peek< sequence< uri_prefix, W, real_uri_value > >()) { return parse_url_function_string(); } else if (peek< re_functional >()) { return parse_function_call(); } else if (lex< exactly<'+'> >()) { Unary_Expression_Ptr ex = SASS_MEMORY_NEW(Unary_Expression, pstate, Unary_Expression::PLUS, parse_factor()); if (ex && ex->operand()) ex->is_delayed(ex->operand()->is_delayed()); return ex; } else if (lex< exactly<'-'> >()) { Unary_Expression_Ptr ex = SASS_MEMORY_NEW(Unary_Expression, pstate, Unary_Expression::MINUS, parse_factor()); if (ex && ex->operand()) ex->is_delayed(ex->operand()->is_delayed()); return ex; } else if (lex< exactly<'/'> >()) { Unary_Expression_Ptr ex = SASS_MEMORY_NEW(Unary_Expression, pstate, Unary_Expression::SLASH, parse_factor()); if (ex && ex->operand()) ex->is_delayed(ex->operand()->is_delayed()); return ex; } else if (lex< sequence< kwd_not > >()) { Unary_Expression_Ptr ex = SASS_MEMORY_NEW(Unary_Expression, pstate, Unary_Expression::NOT, parse_factor()); if (ex && ex->operand()) ex->is_delayed(ex->operand()->is_delayed()); return ex; } // this whole branch is never hit via spec tests else if (peek < sequence < one_plus < alternatives < css_whitespace, exactly<'-'>, exactly<'+'> > >, number > >()) { if (parse_number_prefix()) return parse_value(); // prefix is positive Unary_Expression_Ptr ex = SASS_MEMORY_NEW(Unary_Expression, pstate, Unary_Expression::MINUS, parse_value()); if (ex->operand()) ex->is_delayed(ex->operand()->is_delayed()); return ex; } else { return parse_value(); } } bool number_has_zero(const std::string& parsed) { size_t L = parsed.length(); return !( (L > 0 && parsed.substr(0, 1) == ".") || (L > 1 && parsed.substr(0, 2) == "0.") || (L > 1 && parsed.substr(0, 2) == "-.") || (L > 2 && parsed.substr(0, 3) == "-0.") ); } Number_Ptr Parser::lexed_number(const ParserState& pstate, const std::string& parsed) { Number_Ptr nr = SASS_MEMORY_NEW(Number, pstate, sass_strtod(parsed.c_str()), "", number_has_zero(parsed)); nr->is_interpolant(false); nr->is_delayed(true); return nr; } Number_Ptr Parser::lexed_percentage(const ParserState& pstate, const std::string& parsed) { Number_Ptr nr = SASS_MEMORY_NEW(Number, pstate, sass_strtod(parsed.c_str()), "%", true); nr->is_interpolant(false); nr->is_delayed(true); return nr; } Number_Ptr Parser::lexed_dimension(const ParserState& pstate, const std::string& parsed) { size_t L = parsed.length(); size_t num_pos = parsed.find_first_not_of(" \n\r\t"); if (num_pos == std::string::npos) num_pos = L; size_t unit_pos = parsed.find_first_not_of("-+0123456789.", num_pos); if (parsed[unit_pos] == 'e' && is_number(parsed[unit_pos+1]) ) { unit_pos = parsed.find_first_not_of("-+0123456789.", ++ unit_pos); } if (unit_pos == std::string::npos) unit_pos = L; const std::string& num = parsed.substr(num_pos, unit_pos - num_pos); Number_Ptr nr = SASS_MEMORY_NEW(Number, pstate, sass_strtod(num.c_str()), Token(number(parsed.c_str())), number_has_zero(parsed)); nr->is_interpolant(false); nr->is_delayed(true); return nr; } Value_Ptr Parser::lexed_hex_color(const ParserState& pstate, const std::string& parsed) { Color_Ptr color = NULL; if (parsed[0] != '#') { return SASS_MEMORY_NEW(String_Quoted, pstate, parsed); } // chop off the '#' std::string hext(parsed.substr(1)); if (parsed.length() == 4) { std::string r(2, parsed[1]); std::string g(2, parsed[2]); std::string b(2, parsed[3]); color = SASS_MEMORY_NEW(Color, pstate, static_cast<double>(strtol(r.c_str(), NULL, 16)), static_cast<double>(strtol(g.c_str(), NULL, 16)), static_cast<double>(strtol(b.c_str(), NULL, 16)), 1, // alpha channel parsed); } else if (parsed.length() == 7) { std::string r(parsed.substr(1,2)); std::string g(parsed.substr(3,2)); std::string b(parsed.substr(5,2)); color = SASS_MEMORY_NEW(Color, pstate, static_cast<double>(strtol(r.c_str(), NULL, 16)), static_cast<double>(strtol(g.c_str(), NULL, 16)), static_cast<double>(strtol(b.c_str(), NULL, 16)), 1, // alpha channel parsed); } else if (parsed.length() == 9) { std::string r(parsed.substr(1,2)); std::string g(parsed.substr(3,2)); std::string b(parsed.substr(5,2)); std::string a(parsed.substr(7,2)); color = SASS_MEMORY_NEW(Color, pstate, static_cast<double>(strtol(r.c_str(), NULL, 16)), static_cast<double>(strtol(g.c_str(), NULL, 16)), static_cast<double>(strtol(b.c_str(), NULL, 16)), static_cast<double>(strtol(a.c_str(), NULL, 16)) / 255, parsed); } color->is_interpolant(false); color->is_delayed(false); return color; } Value_Ptr Parser::color_or_string(const std::string& lexed) const { if (auto color = name_to_color(lexed)) { auto c = SASS_MEMORY_NEW(Color, color); c->is_delayed(true); c->pstate(pstate); c->disp(lexed); return c; } else { return SASS_MEMORY_NEW(String_Constant, pstate, lexed); } } // parse one value for a list Expression_Obj Parser::parse_value() { lex< css_comments >(false); if (lex< ampersand >()) { if (match< ampersand >()) { warning("In Sass, \"&&\" means two copies of the parent selector. You probably want to use \"and\" instead.", pstate); } return SASS_MEMORY_NEW(Parent_Selector, pstate); } if (lex< kwd_important >()) { return SASS_MEMORY_NEW(String_Constant, pstate, "!important"); } // parse `10%4px` into separated items and not a schema if (lex< sequence < percentage, lookahead < number > > >()) { return lexed_percentage(lexed); } if (lex< sequence < number, lookahead< sequence < op, number > > > >()) { return lexed_number(lexed); } // string may be interpolated if (lex< sequence < quoted_string, lookahead < exactly <'-'> > > >()) { return parse_string(); } if (const char* stop = peek< value_schema >()) { return parse_value_schema(stop); } // string may be interpolated if (lex< quoted_string >()) { return parse_string(); } if (lex< kwd_true >()) { return SASS_MEMORY_NEW(Boolean, pstate, true); } if (lex< kwd_false >()) { return SASS_MEMORY_NEW(Boolean, pstate, false); } if (lex< kwd_null >()) { return SASS_MEMORY_NEW(Null, pstate); } if (lex< identifier >()) { return color_or_string(lexed); } if (lex< percentage >()) { return lexed_percentage(lexed); } // match hex number first because 0x000 looks like a number followed by an identifier if (lex< sequence < alternatives< hex, hex0 >, negate < exactly<'-'> > > >()) { return lexed_hex_color(lexed); } if (lex< hexa >()) { std::string s = lexed.to_string(); deprecated( "The value \""+s+"\" is currently parsed as a string, but it will be parsed as a color in", "future versions of Sass. Use \"unquote('"+s+"')\" to continue parsing it as a string.", true, pstate ); return SASS_MEMORY_NEW(String_Quoted, pstate, lexed); } if (lex< sequence < exactly <'#'>, identifier > >()) { return SASS_MEMORY_NEW(String_Quoted, pstate, lexed); } // also handle the 10em- foo special case // alternatives < exactly < '.' >, .. > -- `1.5em-.75em` is split into a list, not a binary expression if (lex< sequence< dimension, optional< sequence< exactly<'-'>, lookahead< alternatives < space > > > > > >()) { return lexed_dimension(lexed); } if (lex< sequence< static_component, one_plus< strict_identifier > > >()) { return SASS_MEMORY_NEW(String_Constant, pstate, lexed); } if (lex< number >()) { return lexed_number(lexed); } if (lex< variable >()) { return SASS_MEMORY_NEW(Variable, pstate, Util::normalize_underscores(lexed)); } // Special case handling for `%` proceeding an interpolant. if (lex< sequence< exactly<'%'>, optional< percentage > > >()) { return SASS_MEMORY_NEW(String_Constant, pstate, lexed); } css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); // unreachable statement return 0; } // this parses interpolation inside other strings // means the result should later be quoted again String_Obj Parser::parse_interpolated_chunk(Token chunk, bool constant, bool css) { const char* i = chunk.begin; // see if there any interpolants const char* p = constant ? find_first_in_interval< exactly<hash_lbrace> >(i, chunk.end) : find_first_in_interval< exactly<hash_lbrace>, block_comment >(i, chunk.end); if (!p) { String_Quoted_Ptr str_quoted = SASS_MEMORY_NEW(String_Quoted, pstate, std::string(i, chunk.end), 0, false, false, true, css); if (!constant && str_quoted->quote_mark()) str_quoted->quote_mark('*'); return str_quoted; } String_Schema_Obj schema = SASS_MEMORY_NEW(String_Schema, pstate, 0, css); schema->is_interpolant(true); while (i < chunk.end) { p = constant ? find_first_in_interval< exactly<hash_lbrace> >(i, chunk.end) : find_first_in_interval< exactly<hash_lbrace>, block_comment >(i, chunk.end); if (p) { if (i < p) { // accumulate the preceding segment if it's nonempty schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string(i, p), css)); } // we need to skip anything inside strings // create a new target in parser/prelexer if (peek < sequence < optional_spaces, exactly<rbrace> > >(p+2)) { position = p+2; css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } const char* j = skip_over_scopes< exactly<hash_lbrace>, exactly<rbrace> >(p + 2, chunk.end); // find the closing brace if (j) { --j; // parse the interpolant and accumulate it Expression_Obj interp_node = Parser::from_token(Token(p+2, j), ctx, traces, pstate, source).parse_list(); interp_node->is_interpolant(true); schema->append(interp_node); i = j; } else { // throw an error if the interpolant is unterminated error("unterminated interpolant inside string constant " + chunk.to_string()); } } else { // no interpolants left; add the last segment if nonempty // check if we need quotes here (was not sure after merge) if (i < chunk.end) schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string(i, chunk.end), css)); break; } ++ i; } return schema.detach(); } String_Schema_Obj Parser::parse_css_variable_value(bool top_level) { String_Schema_Obj schema = SASS_MEMORY_NEW(String_Schema, pstate); String_Schema_Obj tok; if (!(tok = parse_css_variable_value_token(top_level))) { return NULL; } schema->concat(tok); while ((tok = parse_css_variable_value_token(top_level))) { schema->concat(tok); } return schema.detach(); } String_Schema_Obj Parser::parse_css_variable_value_token(bool top_level) { String_Schema_Obj schema = SASS_MEMORY_NEW(String_Schema, pstate); if ( (top_level && lex< css_variable_top_level_value >(false)) || (!top_level && lex< css_variable_value >(false)) ) { Token str(lexed); schema->append(SASS_MEMORY_NEW(String_Constant, pstate, str)); } else if (Expression_Obj tok = lex_interpolation()) { if (String_Schema_Ptr s = Cast<String_Schema>(tok)) { schema->concat(s); } else { schema->append(tok); } } else if (lex< quoted_string >()) { Expression_Obj tok = parse_string(); if (String_Schema_Ptr s = Cast<String_Schema>(tok)) { schema->concat(s); } else { schema->append(tok); } } else { if (peek< alternatives< exactly<'('>, exactly<'['>, exactly<'{'> > >()) { if (lex< exactly<'('> >()) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string("("))); if (String_Schema_Obj tok = parse_css_variable_value(false)) schema->concat(tok); if (!lex< exactly<')'> >()) css_error("Invalid CSS", " after ", ": expected \")\", was "); schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string(")"))); } else if (lex< exactly<'['> >()) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string("["))); if (String_Schema_Obj tok = parse_css_variable_value(false)) schema->concat(tok); if (!lex< exactly<']'> >()) css_error("Invalid CSS", " after ", ": expected \"]\", was "); schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string("]"))); } else if (lex< exactly<'{'> >()) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string("{"))); if (String_Schema_Obj tok = parse_css_variable_value(false)) schema->concat(tok); if (!lex< exactly<'}'> >()) css_error("Invalid CSS", " after ", ": expected \"}\", was "); schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string("}"))); } } } return schema->length() > 0 ? schema.detach() : NULL; } Value_Obj Parser::parse_static_value() { lex< static_value >(); Token str(lexed); // static values always have trailing white- // space and end delimiter (\s*[;]$) included --pstate.offset.column; --after_token.column; --str.end; --position; return color_or_string(str.time_wspace());; } String_Obj Parser::parse_string() { return parse_interpolated_chunk(Token(lexed)); } String_Obj Parser::parse_ie_property() { lex< ie_property >(); Token str(lexed); const char* i = str.begin; // see if there any interpolants const char* p = find_first_in_interval< exactly<hash_lbrace>, block_comment >(str.begin, str.end); if (!p) { return SASS_MEMORY_NEW(String_Quoted, pstate, std::string(str.begin, str.end)); } String_Schema_Ptr schema = SASS_MEMORY_NEW(String_Schema, pstate); while (i < str.end) { p = find_first_in_interval< exactly<hash_lbrace>, block_comment >(i, str.end); if (p) { if (i < p) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string(i, p))); // accumulate the preceding segment if it's nonempty } if (peek < sequence < optional_spaces, exactly<rbrace> > >(p+2)) { position = p+2; css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } const char* j = skip_over_scopes< exactly<hash_lbrace>, exactly<rbrace> >(p+2, str.end); // find the closing brace if (j) { // parse the interpolant and accumulate it Expression_Obj interp_node = Parser::from_token(Token(p+2, j), ctx, traces, pstate, source).parse_list(); interp_node->is_interpolant(true); schema->append(interp_node); i = j; } else { // throw an error if the interpolant is unterminated error("unterminated interpolant inside IE function " + str.to_string()); } } else { // no interpolants left; add the last segment if nonempty if (i < str.end) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string(i, str.end))); } break; } } return schema; } String_Obj Parser::parse_ie_keyword_arg() { String_Schema_Ptr kwd_arg = SASS_MEMORY_NEW(String_Schema, pstate, 3); if (lex< variable >()) { kwd_arg->append(SASS_MEMORY_NEW(Variable, pstate, Util::normalize_underscores(lexed))); } else { lex< alternatives< identifier_schema, identifier > >(); kwd_arg->append(SASS_MEMORY_NEW(String_Constant, pstate, lexed)); } lex< exactly<'='> >(); kwd_arg->append(SASS_MEMORY_NEW(String_Constant, pstate, lexed)); if (peek< variable >()) kwd_arg->append(parse_list()); else if (lex< number >()) { std::string parsed(lexed); Util::normalize_decimals(parsed); kwd_arg->append(lexed_number(parsed)); } else if (peek < ie_keyword_arg_value >()) { kwd_arg->append(parse_list()); } return kwd_arg; } String_Schema_Obj Parser::parse_value_schema(const char* stop) { // initialize the string schema object to add tokens String_Schema_Obj schema = SASS_MEMORY_NEW(String_Schema, pstate); if (peek<exactly<'}'>>()) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } const char* e; const char* ee = end; end = stop; size_t num_items = 0; bool need_space = false; while (position < stop) { // parse space between tokens if (lex< spaces >() && num_items) { need_space = true; } if (need_space) { need_space = false; // schema->append(SASS_MEMORY_NEW(String_Constant, pstate, " ")); } if ((e = peek< re_functional >()) && e < stop) { schema->append(parse_function_call()); } // lex an interpolant /#{...}/ else if (lex< exactly < hash_lbrace > >()) { // Try to lex static expression first if (peek< exactly< rbrace > >()) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } Expression_Obj ex; if (lex< re_static_expression >()) { ex = SASS_MEMORY_NEW(String_Constant, pstate, lexed); } else { ex = parse_list(true); } ex->is_interpolant(true); schema->append(ex); if (!lex < exactly < rbrace > >()) { css_error("Invalid CSS", " after ", ": expected \"}\", was "); } } // lex some string constants or other valid token // Note: [-+] chars are left over from i.e. `#{3}+3` else if (lex< alternatives < exactly<'%'>, exactly < '-' >, exactly < '+' > > >()) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, lexed)); } // lex a quoted string else if (lex< quoted_string >()) { // need_space = true; // if (schema->length()) schema->append(SASS_MEMORY_NEW(String_Constant, pstate, " ")); // else need_space = true; schema->append(parse_string()); if ((*position == '"' || *position == '\'') || peek < alternatives < alpha > >()) { // need_space = true; } if (peek < exactly < '-' > >()) break; } else if (lex< identifier >()) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, lexed)); if ((*position == '"' || *position == '\'') || peek < alternatives < alpha > >()) { // need_space = true; } } // lex (normalized) variable else if (lex< variable >()) { std::string name(Util::normalize_underscores(lexed)); schema->append(SASS_MEMORY_NEW(Variable, pstate, name)); } // lex percentage value else if (lex< percentage >()) { schema->append(lexed_percentage(lexed)); } // lex dimension value else if (lex< dimension >()) { schema->append(lexed_dimension(lexed)); } // lex number value else if (lex< number >()) { schema->append(lexed_number(lexed)); } // lex hex color value else if (lex< sequence < hex, negate < exactly < '-' > > > >()) { schema->append(lexed_hex_color(lexed)); } else if (lex< sequence < exactly <'#'>, identifier > >()) { schema->append(SASS_MEMORY_NEW(String_Quoted, pstate, lexed)); } // lex a value in parentheses else if (peek< parenthese_scope >()) { schema->append(parse_factor()); } else { break; } ++num_items; } if (position != stop) { schema->append(SASS_MEMORY_NEW(String_Constant, pstate, std::string(position, stop))); position = stop; } end = ee; return schema; } // this parses interpolation outside other strings // means the result must not be quoted again later String_Obj Parser::parse_identifier_schema() { Token id(lexed); const char* i = id.begin; // see if there any interpolants const char* p = find_first_in_interval< exactly<hash_lbrace>, block_comment >(id.begin, id.end); if (!p) { return SASS_MEMORY_NEW(String_Constant, pstate, std::string(id.begin, id.end)); } String_Schema_Obj schema = SASS_MEMORY_NEW(String_Schema, pstate); while (i < id.end) { p = find_first_in_interval< exactly<hash_lbrace>, block_comment >(i, id.end); if (p) { if (i < p) { // accumulate the preceding segment if it's nonempty const char* o = position; position = i; schema->append(parse_value_schema(p)); position = o; } // we need to skip anything inside strings // create a new target in parser/prelexer if (peek < sequence < optional_spaces, exactly<rbrace> > >(p+2)) { position = p; css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } const char* j = skip_over_scopes< exactly<hash_lbrace>, exactly<rbrace> >(p+2, id.end); // find the closing brace if (j) { // parse the interpolant and accumulate it Expression_Obj interp_node = Parser::from_token(Token(p+2, j), ctx, traces, pstate, source).parse_list(DELAYED); interp_node->is_interpolant(true); schema->append(interp_node); // schema->has_interpolants(true); i = j; } else { // throw an error if the interpolant is unterminated error("unterminated interpolant inside interpolated identifier " + id.to_string()); } } else { // no interpolants left; add the last segment if nonempty if (i < end) { const char* o = position; position = i; schema->append(parse_value_schema(id.end)); position = o; } break; } } return schema ? schema.detach() : 0; } // calc functions should preserve arguments Function_Call_Obj Parser::parse_calc_function() { lex< identifier >(); std::string name(lexed); ParserState call_pos = pstate; lex< exactly<'('> >(); ParserState arg_pos = pstate; const char* arg_beg = position; parse_list(); const char* arg_end = position; lex< skip_over_scopes < exactly < '(' >, exactly < ')' > > >(); Argument_Obj arg = SASS_MEMORY_NEW(Argument, arg_pos, parse_interpolated_chunk(Token(arg_beg, arg_end))); Arguments_Obj args = SASS_MEMORY_NEW(Arguments, arg_pos); args->append(arg); return SASS_MEMORY_NEW(Function_Call, call_pos, name, args); } String_Obj Parser::parse_url_function_string() { std::string prefix(""); if (lex< uri_prefix >()) { prefix = std::string(lexed); } lex < optional_spaces >(); String_Obj url_string = parse_url_function_argument(); std::string suffix(""); if (lex< real_uri_suffix >()) { suffix = std::string(lexed); } std::string uri(""); if (url_string) { uri = url_string->to_string({ NESTED, 5 }); } if (String_Schema_Ptr schema = Cast<String_Schema>(url_string)) { String_Schema_Obj res = SASS_MEMORY_NEW(String_Schema, pstate); res->append(SASS_MEMORY_NEW(String_Constant, pstate, prefix)); res->append(schema); res->append(SASS_MEMORY_NEW(String_Constant, pstate, suffix)); return res; } else { std::string res = prefix + uri + suffix; return SASS_MEMORY_NEW(String_Constant, pstate, res); } } String_Obj Parser::parse_url_function_argument() { const char* p = position; std::string uri(""); if (lex< real_uri_value >(false)) { uri = lexed.to_string(); } if (peek< exactly< hash_lbrace > >()) { const char* pp = position; // TODO: error checking for unclosed interpolants while (pp && peek< exactly< hash_lbrace > >(pp)) { pp = sequence< interpolant, real_uri_value >(pp); } position = pp; return parse_interpolated_chunk(Token(p, position)); } else if (uri != "") { std::string res = Util::rtrim(uri); return SASS_MEMORY_NEW(String_Constant, pstate, res); } return 0; } Function_Call_Obj Parser::parse_function_call() { lex< identifier >(); std::string name(lexed); if (Util::normalize_underscores(name) == "content-exists" && stack.back() != Scope::Mixin) { error("Cannot call content-exists() except within a mixin."); } ParserState call_pos = pstate; Arguments_Obj args = parse_arguments(); return SASS_MEMORY_NEW(Function_Call, call_pos, name, args); } Function_Call_Schema_Obj Parser::parse_function_call_schema() { String_Obj name = parse_identifier_schema(); ParserState source_position_of_call = pstate; Arguments_Obj args = parse_arguments(); return SASS_MEMORY_NEW(Function_Call_Schema, source_position_of_call, name, args); } Content_Obj Parser::parse_content_directive() { return SASS_MEMORY_NEW(Content, pstate); } If_Obj Parser::parse_if_directive(bool else_if) { stack.push_back(Scope::Control); ParserState if_source_position = pstate; bool root = block_stack.back()->is_root(); Expression_Obj predicate = parse_list(); Block_Obj block = parse_block(root); Block_Obj alternative = NULL; // only throw away comment if we parse a case // we want all other comments to be parsed if (lex_css< elseif_directive >()) { alternative = SASS_MEMORY_NEW(Block, pstate); alternative->append(parse_if_directive(true)); } else if (lex_css< kwd_else_directive >()) { alternative = parse_block(root); } stack.pop_back(); return SASS_MEMORY_NEW(If, if_source_position, predicate, block, alternative); } For_Obj Parser::parse_for_directive() { stack.push_back(Scope::Control); ParserState for_source_position = pstate; bool root = block_stack.back()->is_root(); lex_variable(); std::string var(Util::normalize_underscores(lexed)); if (!lex< kwd_from >()) error("expected 'from' keyword in @for directive"); Expression_Obj lower_bound = parse_expression(); bool inclusive = false; if (lex< kwd_through >()) inclusive = true; else if (lex< kwd_to >()) inclusive = false; else error("expected 'through' or 'to' keyword in @for directive"); Expression_Obj upper_bound = parse_expression(); Block_Obj body = parse_block(root); stack.pop_back(); return SASS_MEMORY_NEW(For, for_source_position, var, lower_bound, upper_bound, body, inclusive); } // helper to parse a var token Token Parser::lex_variable() { // peek for dollar sign first if (!peek< exactly <'$'> >()) { css_error("Invalid CSS", " after ", ": expected \"$\", was "); } // we expect a simple identifier as the call name if (!lex< sequence < exactly <'$'>, identifier > >()) { lex< exactly <'$'> >(); // move pstate and position up css_error("Invalid CSS", " after ", ": expected identifier, was "); } // return object return token; } // helper to parse identifier Token Parser::lex_identifier() { // we expect a simple identifier as the call name if (!lex< identifier >()) { // ToDo: pstate wrong? css_error("Invalid CSS", " after ", ": expected identifier, was "); } // return object return token; } Each_Obj Parser::parse_each_directive() { stack.push_back(Scope::Control); ParserState each_source_position = pstate; bool root = block_stack.back()->is_root(); std::vector<std::string> vars; lex_variable(); vars.push_back(Util::normalize_underscores(lexed)); while (lex< exactly<','> >()) { if (!lex< variable >()) error("@each directive requires an iteration variable"); vars.push_back(Util::normalize_underscores(lexed)); } if (!lex< kwd_in >()) error("expected 'in' keyword in @each directive"); Expression_Obj list = parse_list(); Block_Obj body = parse_block(root); stack.pop_back(); return SASS_MEMORY_NEW(Each, each_source_position, vars, list, body); } // called after parsing `kwd_while_directive` While_Obj Parser::parse_while_directive() { stack.push_back(Scope::Control); bool root = block_stack.back()->is_root(); // create the initial while call object While_Obj call = SASS_MEMORY_NEW(While, pstate, 0, 0); // parse mandatory predicate Expression_Obj predicate = parse_list(); List_Obj l = Cast<List>(predicate); if (!predicate || (l && !l->length())) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was ", false); } call->predicate(predicate); // parse mandatory block call->block(parse_block(root)); // return ast node stack.pop_back(); // return ast node return call.detach(); } // EO parse_while_directive Media_Block_Obj Parser::parse_media_block() { stack.push_back(Scope::Media); Media_Block_Obj media_block = SASS_MEMORY_NEW(Media_Block, pstate, 0, 0); media_block->media_queries(parse_media_queries()); Media_Block_Obj prev_media_block = last_media_block; last_media_block = media_block; media_block->block(parse_css_block()); last_media_block = prev_media_block; stack.pop_back(); return media_block.detach(); } List_Obj Parser::parse_media_queries() { advanceToNextToken(); List_Obj queries = SASS_MEMORY_NEW(List, pstate, 0, SASS_COMMA); if (!peek_css < exactly <'{'> >()) queries->append(parse_media_query()); while (lex_css < exactly <','> >()) queries->append(parse_media_query()); queries->update_pstate(pstate); return queries.detach(); } // Expression_Ptr Parser::parse_media_query() Media_Query_Obj Parser::parse_media_query() { advanceToNextToken(); Media_Query_Obj media_query = SASS_MEMORY_NEW(Media_Query, pstate); if (lex < kwd_not >()) { media_query->is_negated(true); lex < css_comments >(false); } else if (lex < kwd_only >()) { media_query->is_restricted(true); lex < css_comments >(false); } if (lex < identifier_schema >()) media_query->media_type(parse_identifier_schema()); else if (lex < identifier >()) media_query->media_type(parse_interpolated_chunk(lexed)); else media_query->append(parse_media_expression()); while (lex_css < kwd_and >()) media_query->append(parse_media_expression()); if (lex < identifier_schema >()) { String_Schema_Ptr schema = SASS_MEMORY_NEW(String_Schema, pstate); schema->append(media_query->media_type()); schema->append(SASS_MEMORY_NEW(String_Constant, pstate, " ")); schema->append(parse_identifier_schema()); media_query->media_type(schema); } while (lex_css < kwd_and >()) media_query->append(parse_media_expression()); media_query->update_pstate(pstate); return media_query; } Media_Query_Expression_Obj Parser::parse_media_expression() { if (lex < identifier_schema >()) { String_Obj ss = parse_identifier_schema(); return SASS_MEMORY_NEW(Media_Query_Expression, pstate, ss, 0, true); } if (!lex_css< exactly<'('> >()) { error("media query expression must begin with '('"); } Expression_Obj feature; if (peek_css< exactly<')'> >()) { error("media feature required in media query expression"); } feature = parse_expression(); Expression_Obj expression = 0; if (lex_css< exactly<':'> >()) { expression = parse_list(DELAYED); } if (!lex_css< exactly<')'> >()) { error("unclosed parenthesis in media query expression"); } return SASS_MEMORY_NEW(Media_Query_Expression, feature->pstate(), feature, expression); } // lexed after `kwd_supports_directive` // these are very similar to media blocks Supports_Block_Obj Parser::parse_supports_directive() { Supports_Condition_Obj cond = parse_supports_condition(); if (!cond) { css_error("Invalid CSS", " after ", ": expected @supports condition (e.g. (display: flexbox)), was ", false); } // create the ast node object for the support queries Supports_Block_Obj query = SASS_MEMORY_NEW(Supports_Block, pstate, cond); // additional block is mandatory // parse inner block query->block(parse_block()); // return ast node return query; } // parse one query operation // may encounter nested queries Supports_Condition_Obj Parser::parse_supports_condition() { lex < css_whitespace >(); Supports_Condition_Obj cond; if ((cond = parse_supports_negation())) return cond; if ((cond = parse_supports_operator())) return cond; if ((cond = parse_supports_interpolation())) return cond; return cond; } Supports_Condition_Obj Parser::parse_supports_negation() { if (!lex < kwd_not >()) return 0; Supports_Condition_Obj cond = parse_supports_condition_in_parens(); return SASS_MEMORY_NEW(Supports_Negation, pstate, cond); } Supports_Condition_Obj Parser::parse_supports_operator() { Supports_Condition_Obj cond = parse_supports_condition_in_parens(); if (cond.isNull()) return 0; while (true) { Supports_Operator::Operand op = Supports_Operator::OR; if (lex < kwd_and >()) { op = Supports_Operator::AND; } else if(!lex < kwd_or >()) { break; } lex < css_whitespace >(); Supports_Condition_Obj right = parse_supports_condition_in_parens(); // Supports_Condition_Ptr cc = SASS_MEMORY_NEW(Supports_Condition, *static_cast<Supports_Condition_Ptr>(cond)); cond = SASS_MEMORY_NEW(Supports_Operator, pstate, cond, right, op); } return cond; } Supports_Condition_Obj Parser::parse_supports_interpolation() { if (!lex < interpolant >()) return 0; String_Obj interp = parse_interpolated_chunk(lexed); if (!interp) return 0; return SASS_MEMORY_NEW(Supports_Interpolation, pstate, interp); } // TODO: This needs some major work. Although feature conditions // look like declarations their semantics differ significantly Supports_Condition_Obj Parser::parse_supports_declaration() { Supports_Condition_Ptr cond; // parse something declaration like Expression_Obj feature = parse_expression(); Expression_Obj expression = 0; if (lex_css< exactly<':'> >()) { expression = parse_list(DELAYED); } if (!feature || !expression) error("@supports condition expected declaration"); cond = SASS_MEMORY_NEW(Supports_Declaration, feature->pstate(), feature, expression); // ToDo: maybe we need an additional error condition? return cond; } Supports_Condition_Obj Parser::parse_supports_condition_in_parens() { Supports_Condition_Obj interp = parse_supports_interpolation(); if (interp != 0) return interp; if (!lex < exactly <'('> >()) return 0; lex < css_whitespace >(); Supports_Condition_Obj cond = parse_supports_condition(); if (cond != 0) { if (!lex < exactly <')'> >()) error("unclosed parenthesis in @supports declaration"); } else { cond = parse_supports_declaration(); if (!lex < exactly <')'> >()) error("unclosed parenthesis in @supports declaration"); } lex < css_whitespace >(); return cond; } At_Root_Block_Obj Parser::parse_at_root_block() { stack.push_back(Scope::AtRoot); ParserState at_source_position = pstate; Block_Obj body = 0; At_Root_Query_Obj expr; Lookahead lookahead_result; if (lex_css< exactly<'('> >()) { expr = parse_at_root_query(); } if (peek_css < exactly<'{'> >()) { lex <optional_spaces>(); body = parse_block(true); } else if ((lookahead_result = lookahead_for_selector(position)).found) { Ruleset_Obj r = parse_ruleset(lookahead_result); body = SASS_MEMORY_NEW(Block, r->pstate(), 1, true); body->append(r); } At_Root_Block_Obj at_root = SASS_MEMORY_NEW(At_Root_Block, at_source_position, body); if (!expr.isNull()) at_root->expression(expr); stack.pop_back(); return at_root; } At_Root_Query_Obj Parser::parse_at_root_query() { if (peek< exactly<')'> >()) error("at-root feature required in at-root expression"); if (!peek< alternatives< kwd_with_directive, kwd_without_directive > >()) { css_error("Invalid CSS", " after ", ": expected \"with\" or \"without\", was "); } Expression_Obj feature = parse_list(); if (!lex_css< exactly<':'> >()) error("style declaration must contain a value"); Expression_Obj expression = parse_list(); List_Obj value = SASS_MEMORY_NEW(List, feature->pstate(), 1); if (expression->concrete_type() == Expression::LIST) { value = Cast<List>(expression); } else value->append(expression); At_Root_Query_Obj cond = SASS_MEMORY_NEW(At_Root_Query, value->pstate(), feature, value); if (!lex_css< exactly<')'> >()) error("unclosed parenthesis in @at-root expression"); return cond; } Directive_Obj Parser::parse_special_directive() { std::string kwd(lexed); if (lexed == "@else") error("Invalid CSS: @else must come after @if"); // this whole branch is never hit via spec tests Directive_Ptr at_rule = SASS_MEMORY_NEW(Directive, pstate, kwd); Lookahead lookahead = lookahead_for_include(position); if (lookahead.found && !lookahead.has_interpolants) { at_rule->selector(parse_selector_list(false)); } lex < css_comments >(false); if (lex < static_property >()) { at_rule->value(parse_interpolated_chunk(Token(lexed))); } else if (!(peek < alternatives < exactly<'{'>, exactly<'}'>, exactly<';'> > >())) { at_rule->value(parse_list()); } lex < css_comments >(false); if (peek< exactly<'{'> >()) { at_rule->block(parse_block()); } return at_rule; } // this whole branch is never hit via spec tests Directive_Obj Parser::parse_prefixed_directive() { std::string kwd(lexed); if (lexed == "@else") error("Invalid CSS: @else must come after @if"); Directive_Obj at_rule = SASS_MEMORY_NEW(Directive, pstate, kwd); Lookahead lookahead = lookahead_for_include(position); if (lookahead.found && !lookahead.has_interpolants) { at_rule->selector(parse_selector_list(false)); } lex < css_comments >(false); if (lex < static_property >()) { at_rule->value(parse_interpolated_chunk(Token(lexed))); } else if (!(peek < alternatives < exactly<'{'>, exactly<'}'>, exactly<';'> > >())) { at_rule->value(parse_list()); } lex < css_comments >(false); if (peek< exactly<'{'> >()) { at_rule->block(parse_block()); } return at_rule; } Directive_Obj Parser::parse_directive() { Directive_Obj directive = SASS_MEMORY_NEW(Directive, pstate, lexed); String_Schema_Obj val = parse_almost_any_value(); // strip left and right if they are of type string directive->value(val); if (peek< exactly<'{'> >()) { directive->block(parse_block()); } return directive; } Expression_Obj Parser::lex_interpolation() { if (lex < interpolant >(true) != NULL) { return parse_interpolated_chunk(lexed, true); } return 0; } Expression_Obj Parser::lex_interp_uri() { // create a string schema by lexing optional interpolations return lex_interp< re_string_uri_open, re_string_uri_close >(); } Expression_Obj Parser::lex_interp_string() { Expression_Obj rv; if ((rv = lex_interp< re_string_double_open, re_string_double_close >())) return rv; if ((rv = lex_interp< re_string_single_open, re_string_single_close >())) return rv; return rv; } Expression_Obj Parser::lex_almost_any_value_chars() { const char* match = lex < one_plus < alternatives < sequence < exactly <'\\'>, any_char >, sequence < negate < sequence < exactly < url_kwd >, exactly <'('> > >, neg_class_char < almost_any_value_class > >, sequence < exactly <'/'>, negate < alternatives < exactly <'/'>, exactly <'*'> > > >, sequence < exactly <'\\'>, exactly <'#'>, negate < exactly <'{'> > >, sequence < exactly <'!'>, negate < alpha > > > > >(false); if (match) { return SASS_MEMORY_NEW(String_Constant, pstate, lexed); } return NULL; } Expression_Obj Parser::lex_almost_any_value_token() { Expression_Obj rv; if (*position == 0) return 0; if ((rv = lex_almost_any_value_chars())) return rv; // if ((rv = lex_block_comment())) return rv; // if ((rv = lex_single_line_comment())) return rv; if ((rv = lex_interp_string())) return rv; if ((rv = lex_interp_uri())) return rv; if ((rv = lex_interpolation())) return rv; if (lex< alternatives< hex, hex0 > >()) { return lexed_hex_color(lexed); } return rv; } String_Schema_Obj Parser::parse_almost_any_value() { String_Schema_Obj schema = SASS_MEMORY_NEW(String_Schema, pstate); if (*position == 0) return 0; lex < spaces >(false); Expression_Obj token = lex_almost_any_value_token(); if (!token) return 0; schema->append(token); if (*position == 0) { schema->rtrim(); return schema.detach(); } while ((token = lex_almost_any_value_token())) { schema->append(token); } lex < css_whitespace >(); schema->rtrim(); return schema.detach(); } Warning_Obj Parser::parse_warning() { if (stack.back() != Scope::Root && stack.back() != Scope::Function && stack.back() != Scope::Mixin && stack.back() != Scope::Control && stack.back() != Scope::Rules) { error("Illegal nesting: Only properties may be nested beneath properties."); } return SASS_MEMORY_NEW(Warning, pstate, parse_list(DELAYED)); } Error_Obj Parser::parse_error() { if (stack.back() != Scope::Root && stack.back() != Scope::Function && stack.back() != Scope::Mixin && stack.back() != Scope::Control && stack.back() != Scope::Rules) { error("Illegal nesting: Only properties may be nested beneath properties."); } return SASS_MEMORY_NEW(Error, pstate, parse_list(DELAYED)); } Debug_Obj Parser::parse_debug() { if (stack.back() != Scope::Root && stack.back() != Scope::Function && stack.back() != Scope::Mixin && stack.back() != Scope::Control && stack.back() != Scope::Rules) { error("Illegal nesting: Only properties may be nested beneath properties."); } return SASS_MEMORY_NEW(Debug, pstate, parse_list(DELAYED)); } Return_Obj Parser::parse_return_directive() { // check that we do not have an empty list (ToDo: check if we got all cases) if (peek_css < alternatives < exactly < ';' >, exactly < '}' >, end_of_file > >()) { css_error("Invalid CSS", " after ", ": expected expression (e.g. 1px, bold), was "); } return SASS_MEMORY_NEW(Return, pstate, parse_list()); } Lookahead Parser::lookahead_for_selector(const char* start) { // init result struct Lookahead rv = Lookahead(); // get start position const char* p = start ? start : position; // match in one big "regex" rv.error = p; if (const char* q = peek < re_selector_list >(p) ) { bool could_be_property = peek< sequence< exactly<'-'>, exactly<'-'> > >(p) != 0; bool could_be_escaped = false; while (p < q) { // did we have interpolations? if (*p == '#' && *(p+1) == '{') { rv.has_interpolants = true; p = q; break; } // A property that's ambiguous with a nested selector is interpreted as a // custom property. if (*p == ':' && !could_be_escaped) { rv.is_custom_property = could_be_property || p+1 == q || peek< space >(p+1); } could_be_escaped = *p == '\\'; ++ p; } // store anyway } // ToDo: remove rv.error = q; rv.position = q; // check expected opening bracket // only after successfull matching if (peek < exactly<'{'> >(q)) rv.found = q; // else if (peek < end_of_file >(q)) rv.found = q; else if (peek < exactly<'('> >(q)) rv.found = q; // else if (peek < exactly<';'> >(q)) rv.found = q; // else if (peek < exactly<'}'> >(q)) rv.found = q; if (rv.found || *p == 0) rv.error = 0; } rv.parsable = ! rv.has_interpolants; // return result return rv; } // EO lookahead_for_selector // used in parse_block_nodes and parse_special_directive // ToDo: actual usage is still not really clear to me? Lookahead Parser::lookahead_for_include(const char* start) { // we actually just lookahead for a selector Lookahead rv = lookahead_for_selector(start); // but the "found" rules are different if (const char* p = rv.position) { // check for additional abort condition if (peek < exactly<';'> >(p)) rv.found = p; else if (peek < exactly<'}'> >(p)) rv.found = p; } // return result return rv; } // EO lookahead_for_include // look ahead for a token with interpolation in it // we mostly use the result if there is an interpolation // everything that passes here gets parsed as one schema // meaning it will not be parsed as a space separated list Lookahead Parser::lookahead_for_value(const char* start) { // init result struct Lookahead rv = Lookahead(); // get start position const char* p = start ? start : position; // match in one big "regex" if (const char* q = peek < non_greedy < alternatives < // consume whitespace block_comment, // spaces, // main tokens sequence < interpolant, optional < quoted_string > >, identifier, variable, // issue #442 sequence < parenthese_scope, interpolant, optional < quoted_string > > >, sequence < // optional_spaces, alternatives < // end_of_file, exactly<'{'>, exactly<'}'>, exactly<';'> > > > >(p) ) { if (p == q) return rv; while (p < q) { // did we have interpolations? if (*p == '#' && *(p+1) == '{') { rv.has_interpolants = true; p = q; break; } ++ p; } // store anyway // ToDo: remove rv.position = q; // check expected opening bracket // only after successful matching if (peek < exactly<'{'> >(q)) rv.found = q; else if (peek < exactly<';'> >(q)) rv.found = q; else if (peek < exactly<'}'> >(q)) rv.found = q; } // return result return rv; } // EO lookahead_for_value void Parser::read_bom() { size_t skip = 0; std::string encoding; bool utf_8 = false; switch ((unsigned char) source[0]) { case 0xEF: skip = check_bom_chars(source, end, utf_8_bom, 3); encoding = "UTF-8"; utf_8 = true; break; case 0xFE: skip = check_bom_chars(source, end, utf_16_bom_be, 2); encoding = "UTF-16 (big endian)"; break; case 0xFF: skip = check_bom_chars(source, end, utf_16_bom_le, 2); skip += (skip ? check_bom_chars(source, end, utf_32_bom_le, 4) : 0); encoding = (skip == 2 ? "UTF-16 (little endian)" : "UTF-32 (little endian)"); break; case 0x00: skip = check_bom_chars(source, end, utf_32_bom_be, 4); encoding = "UTF-32 (big endian)"; break; case 0x2B: skip = check_bom_chars(source, end, utf_7_bom_1, 4) | check_bom_chars(source, end, utf_7_bom_2, 4) | check_bom_chars(source, end, utf_7_bom_3, 4) | check_bom_chars(source, end, utf_7_bom_4, 4) | check_bom_chars(source, end, utf_7_bom_5, 5); encoding = "UTF-7"; break; case 0xF7: skip = check_bom_chars(source, end, utf_1_bom, 3); encoding = "UTF-1"; break; case 0xDD: skip = check_bom_chars(source, end, utf_ebcdic_bom, 4); encoding = "UTF-EBCDIC"; break; case 0x0E: skip = check_bom_chars(source, end, scsu_bom, 3); encoding = "SCSU"; break; case 0xFB: skip = check_bom_chars(source, end, bocu_1_bom, 3); encoding = "BOCU-1"; break; case 0x84: skip = check_bom_chars(source, end, gb_18030_bom, 4); encoding = "GB-18030"; break; default: break; } if (skip > 0 && !utf_8) error("only UTF-8 documents are currently supported; your document appears to be " + encoding); position += skip; } size_t check_bom_chars(const char* src, const char *end, const unsigned char* bom, size_t len) { size_t skip = 0; if (src + len > end) return 0; for (size_t i = 0; i < len; ++i, ++skip) { if ((unsigned char) src[i] != bom[i]) return 0; } return skip; } Expression_Obj Parser::fold_operands(Expression_Obj base, std::vector<Expression_Obj>& operands, Operand op) { for (size_t i = 0, S = operands.size(); i < S; ++i) { base = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), op, base, operands[i]); } return base; } Expression_Obj Parser::fold_operands(Expression_Obj base, std::vector<Expression_Obj>& operands, std::vector<Operand>& ops, size_t i) { if (String_Schema_Ptr schema = Cast<String_Schema>(base)) { // return schema; if (schema->has_interpolants()) { if (i + 1 < operands.size() && ( (ops[0].operand == Sass_OP::EQ) || (ops[0].operand == Sass_OP::ADD) || (ops[0].operand == Sass_OP::DIV) || (ops[0].operand == Sass_OP::MUL) || (ops[0].operand == Sass_OP::NEQ) || (ops[0].operand == Sass_OP::LT) || (ops[0].operand == Sass_OP::GT) || (ops[0].operand == Sass_OP::LTE) || (ops[0].operand == Sass_OP::GTE) )) { Expression_Obj rhs = fold_operands(operands[i], operands, ops, i + 1); rhs = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), ops[0], schema, rhs); return rhs; } // return schema; } } for (size_t S = operands.size(); i < S; ++i) { if (String_Schema_Ptr schema = Cast<String_Schema>(operands[i])) { if (schema->has_interpolants()) { if (i + 1 < S) { // this whole branch is never hit via spec tests Expression_Obj rhs = fold_operands(operands[i+1], operands, ops, i + 2); rhs = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), ops[i], schema, rhs); base = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), ops[i], base, rhs); return base; } base = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), ops[i], base, operands[i]); return base; } else { base = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), ops[i], base, operands[i]); } } else { base = SASS_MEMORY_NEW(Binary_Expression, base->pstate(), ops[i], base, operands[i]); } Binary_Expression_Ptr b = Cast<Binary_Expression>(base.ptr()); if (b && ops[i].operand == Sass_OP::DIV && b->left()->is_delayed() && b->right()->is_delayed()) { base->is_delayed(true); } } // nested binary expression are never to be delayed if (Binary_Expression_Ptr b = Cast<Binary_Expression>(base)) { if (Cast<Binary_Expression>(b->left())) base->set_delayed(false); if (Cast<Binary_Expression>(b->right())) base->set_delayed(false); } return base; } void Parser::error(std::string msg, Position pos) { Position p(pos.line ? pos : before_token); ParserState pstate(path, source, p, Offset(0, 0)); traces.push_back(Backtrace(pstate)); throw Exception::InvalidSass(pstate, traces, msg); } void Parser::error(std::string msg) { error(msg, pstate); } // print a css parsing error with actual context information from parsed source void Parser::css_error(const std::string& msg, const std::string& prefix, const std::string& middle, const bool trim) { int max_len = 18; const char* end = this->end; while (*end != 0) ++ end; const char* pos = peek < optional_spaces >(); if (!pos) pos = position; const char* last_pos(pos); if (last_pos > source) { utf8::prior(last_pos, source); } // backup position to last significant char while (trim && last_pos > source && last_pos < end) { if (!Prelexer::is_space(*last_pos)) break; utf8::prior(last_pos, source); } bool ellipsis_left = false; const char* pos_left(last_pos); const char* end_left(last_pos); if (*pos_left) utf8::next(pos_left, end); if (*end_left) utf8::next(end_left, end); while (pos_left > source) { if (utf8::distance(pos_left, end_left) >= max_len) { utf8::prior(pos_left, source); ellipsis_left = *(pos_left) != '\n' && *(pos_left) != '\r'; utf8::next(pos_left, end); break; } const char* prev = pos_left; utf8::prior(prev, source); if (*prev == '\r') break; if (*prev == '\n') break; pos_left = prev; } if (pos_left < source) { pos_left = source; } bool ellipsis_right = false; const char* end_right(pos); const char* pos_right(pos); while (end_right < end) { if (utf8::distance(pos_right, end_right) > max_len) { ellipsis_left = *(pos_right) != '\n' && *(pos_right) != '\r'; break; } if (*end_right == '\r') break; if (*end_right == '\n') break; utf8::next(end_right, end); } // if (*end_right == 0) end_right ++; std::string left(pos_left, end_left); std::string right(pos_right, end_right); size_t left_subpos = left.size() > 15 ? left.size() - 15 : 0; size_t right_subpos = right.size() > 15 ? right.size() - 15 : 0; if (left_subpos && ellipsis_left) left = ellipsis + left.substr(left_subpos); if (right_subpos && ellipsis_right) right = right.substr(right_subpos) + ellipsis; // Hotfix when source is null, probably due to interpolation parsing!? if (source == NULL || *source == 0) source = pstate.src; // now pass new message to the more generic error function error(msg + prefix + quote(left) + middle + quote(right)); } }
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_455_0
crossvul-cpp_data_good_4322_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 static const int MAX_UNPAIRED_CONNECTIONS = 42; static const int MAX_REMEMBERED_IDENTITY_PACKETS = 42; LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; if (m_receivedIdentityPackets.size() > MAX_REMEMBERED_IDENTITY_PACKETS) { qCWarning(KDECONNECT_CORE) << "Too many remembered identities, ignoring" << receivedPacket->get<QString>(QStringLiteral("deviceId")) << "received via UDP"; delete receivedPacket; continue; } QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); QTimer* timer = new QTimer(socket); timer->setSingleShot(true); timer->setInterval(1000); connect(socket, &QSslSocket::encrypted, timer, &QObject::deleteLater); connect(timer, &QTimer::timeout, socket, [socket] { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Host timed out without sending any identity." << socket->peerAddress(); socket->disconnectFromHost(); }); timer->start(); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); //the size here is arbitrary and is now at 8192 bytes. It needs to be considerably long as it includes the capabilities but there needs to be a limit //Tested between my systems and I get around 2000 per identity package. if (socket->bytesAvailable() > 8192) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Suspiciously long identity package received. Closing connection." << socket->peerAddress() << socket->bytesAvailable(); socket->disconnectFromHost(); return; } #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } if (m_receivedIdentityPackets.size() > MAX_REMEMBERED_IDENTITY_PACKETS) { qCWarning(KDECONNECT_CORE) << "Too many remembered identities, ignoring" << np->get<QString>(QStringLiteral("deviceId")) << "received via TCP"; delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); if (!isDeviceTrusted && m_links.size() > MAX_UNPAIRED_CONNECTIONS) { qCWarning(KDECONNECT_CORE) << "Too many unpaired devices to remember them all. Ignoring " << deviceId; socket->disconnectFromHost(); socket->deleteLater(); return; } connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/good_4322_0
crossvul-cpp_data_bad_4324_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 static const int MAX_UNPAIRED_CONNECTIONS = 42; static const int MAX_REMEMBERED_IDENTITY_PACKETS = 42; LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; if (m_receivedIdentityPackets.size() > MAX_REMEMBERED_IDENTITY_PACKETS) { qCWarning(KDECONNECT_CORE) << "Too many remembered identities, ignoring" << receivedPacket->get<QString>(QStringLiteral("deviceId")) << "received via UDP"; delete receivedPacket; continue; } QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); QTimer* timer = new QTimer(socket); timer->setSingleShot(true); timer->setInterval(1000); connect(socket, &QSslSocket::encrypted, timer, &QObject::deleteLater); connect(timer, &QTimer::timeout, socket, [socket] { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Host timed out without sending any identity." << socket->peerAddress(); socket->disconnectFromHost(); }); timer->start(); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); //the size here is arbitrary and is now at 8192 bytes. It needs to be considerably long as it includes the capabilities but there needs to be a limit //Tested between my systems and I get around 2000 per identity package. if (socket->bytesAvailable() > 8192) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Suspiciously long identity package received. Closing connection." << socket->peerAddress() << socket->bytesAvailable(); socket->disconnectFromHost(); return; } #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } if (m_receivedIdentityPackets.size() > MAX_REMEMBERED_IDENTITY_PACKETS) { qCWarning(KDECONNECT_CORE) << "Too many remembered identities, ignoring" << np->get<QString>(QStringLiteral("deviceId")) << "received via TCP"; delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); if (!isDeviceTrusted && m_links.size() > MAX_UNPAIRED_CONNECTIONS) { qCWarning(KDECONNECT_CORE) << "Too many unpaired devices to remember them all. Ignoring " << deviceId; socket->disconnectFromHost(); socket->deleteLater(); return; } connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_4324_0
crossvul-cpp_data_good_843_0
/* * Copyright (c) 2018-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <fizz/record/EncryptedRecordLayer.h> #include <fizz/crypto/aead/IOBufUtil.h> namespace fizz { using ContentTypeType = typename std::underlying_type<ContentType>::type; using ProtocolVersionType = typename std::underlying_type<ProtocolVersion>::type; static constexpr uint16_t kMaxEncryptedRecordSize = 0x4000 + 256; // 16k + 256 static constexpr size_t kEncryptedHeaderSize = sizeof(ContentType) + sizeof(ProtocolVersion) + sizeof(uint16_t); EncryptedReadRecordLayer::EncryptedReadRecordLayer( EncryptionLevel encryptionLevel) : encryptionLevel_(encryptionLevel) {} folly::Optional<Buf> EncryptedReadRecordLayer::getDecryptedBuf( folly::IOBufQueue& buf) { while (true) { // Cache the front buffer, calling front may invoke and update // of the tail cache. auto frontBuf = buf.front(); folly::io::Cursor cursor(frontBuf); if (buf.empty() || !cursor.canAdvance(kEncryptedHeaderSize)) { return folly::none; } std::array<uint8_t, kEncryptedHeaderSize> ad; folly::io::Cursor adCursor(cursor); adCursor.pull(ad.data(), ad.size()); folly::IOBuf adBuf{folly::IOBuf::wrapBufferAsValue(folly::range(ad))}; auto contentType = static_cast<ContentType>(cursor.readBE<ContentTypeType>()); cursor.skip(sizeof(ProtocolVersion)); auto length = cursor.readBE<uint16_t>(); if (length == 0) { throw std::runtime_error("received 0 length encrypted record"); } if (length > kMaxEncryptedRecordSize) { throw std::runtime_error("received too long encrypted record"); } auto consumedBytes = cursor - frontBuf; if (buf.chainLength() < consumedBytes + length) { return folly::none; } if (contentType == ContentType::alert && length == 2) { auto alert = decode<Alert>(cursor); throw std::runtime_error(folly::to<std::string>( "received plaintext alert in encrypted record: ", toString(alert.description))); } // If we already know that the length of the buffer is the // same as the number of bytes we need, move the entire buffer. std::unique_ptr<folly::IOBuf> encrypted; if (buf.chainLength() == consumedBytes + length) { encrypted = buf.move(); } else { encrypted = buf.split(consumedBytes + length); } trimStart(*encrypted, consumedBytes); if (contentType == ContentType::change_cipher_spec) { encrypted->coalesce(); if (encrypted->length() == 1 && *encrypted->data() == 0x01) { continue; } else { throw FizzException( "received ccs", AlertDescription::illegal_parameter); } } TLSMessage msg; if (seqNum_ == std::numeric_limits<uint64_t>::max()) { throw std::runtime_error("max read seq num"); } if (skipFailedDecryption_) { auto decryptAttempt = aead_->tryDecrypt( std::move(encrypted), useAdditionalData_ ? &adBuf : nullptr, seqNum_); if (decryptAttempt) { seqNum_++; skipFailedDecryption_ = false; return decryptAttempt; } else { continue; } } else { return aead_->decrypt( std::move(encrypted), useAdditionalData_ ? &adBuf : nullptr, seqNum_++); } } } folly::Optional<TLSMessage> EncryptedReadRecordLayer::read( folly::IOBufQueue& buf) { auto decryptedBuf = getDecryptedBuf(buf); if (!decryptedBuf) { return folly::none; } TLSMessage msg; // Iterate over the buffers while trying to find // the first non-zero octet. This is much faster than // first iterating and then trimming. auto currentBuf = decryptedBuf->get(); bool nonZeroFound = false; do { currentBuf = currentBuf->prev(); size_t i = currentBuf->length(); while (i > 0 && !nonZeroFound) { nonZeroFound = (currentBuf->data()[i - 1] != 0); i--; } if (nonZeroFound) { msg.type = static_cast<ContentType>(currentBuf->data()[i]); } currentBuf->trimEnd(currentBuf->length() - i); } while (!nonZeroFound && currentBuf != decryptedBuf->get()); if (!nonZeroFound) { throw std::runtime_error("No content type found"); } msg.fragment = std::move(*decryptedBuf); switch (msg.type) { case ContentType::handshake: case ContentType::alert: case ContentType::application_data: break; default: throw std::runtime_error(folly::to<std::string>( "received encrypted content type ", static_cast<ContentTypeType>(msg.type))); } if (!msg.fragment || msg.fragment->empty()) { if (msg.type == ContentType::application_data) { msg.fragment = folly::IOBuf::create(0); } else { throw std::runtime_error("received empty fragment"); } } return msg; } EncryptionLevel EncryptedReadRecordLayer::getEncryptionLevel() const { return encryptionLevel_; } EncryptedWriteRecordLayer::EncryptedWriteRecordLayer( EncryptionLevel encryptionLevel) : encryptionLevel_(encryptionLevel) {} TLSContent EncryptedWriteRecordLayer::write(TLSMessage&& msg) const { folly::IOBufQueue queue; queue.append(std::move(msg.fragment)); std::unique_ptr<folly::IOBuf> outBuf; std::array<uint8_t, kEncryptedHeaderSize> headerBuf; auto header = folly::IOBuf::wrapBufferAsValue(folly::range(headerBuf)); aead_->setEncryptedBufferHeadroom(kEncryptedHeaderSize); while (!queue.empty()) { auto dataBuf = getBufToEncrypt(queue); // Currently we never send padding. // check if we have enough room to add the encrypted footer. if (!dataBuf->isShared() && dataBuf->prev()->tailroom() >= sizeof(ContentType)) { // extend it and add it folly::io::Appender appender(dataBuf.get(), 0); appender.writeBE(static_cast<ContentTypeType>(msg.type)); } else { // not enough or shared - let's add enough for the tag as well auto encryptedFooter = folly::IOBuf::create( sizeof(ContentType) + aead_->getCipherOverhead()); folly::io::Appender appender(encryptedFooter.get(), 0); appender.writeBE(static_cast<ContentTypeType>(msg.type)); dataBuf->prependChain(std::move(encryptedFooter)); } if (seqNum_ == std::numeric_limits<uint64_t>::max()) { throw std::runtime_error("max write seq num"); } // we will either be able to memcpy directly into the ciphertext or // need to create a new buf to insert before the ciphertext but we need // it for additional data header.clear(); folly::io::Appender appender(&header, 0); appender.writeBE( static_cast<ContentTypeType>(ContentType::application_data)); appender.writeBE( static_cast<ProtocolVersionType>(ProtocolVersion::tls_1_2)); auto ciphertextLength = dataBuf->computeChainDataLength() + aead_->getCipherOverhead(); appender.writeBE<uint16_t>(ciphertextLength); auto cipherText = aead_->encrypt( std::move(dataBuf), useAdditionalData_ ? &header : nullptr, seqNum_++); std::unique_ptr<folly::IOBuf> record; if (!cipherText->isShared() && cipherText->headroom() >= kEncryptedHeaderSize) { // prepend and then write it in cipherText->prepend(kEncryptedHeaderSize); memcpy(cipherText->writableData(), header.data(), header.length()); record = std::move(cipherText); } else { record = folly::IOBuf::copyBuffer(header.data(), header.length()); record->prependChain(std::move(cipherText)); } if (!outBuf) { outBuf = std::move(record); } else { outBuf->prependChain(std::move(record)); } } if (!outBuf) { outBuf = folly::IOBuf::create(0); } TLSContent content; content.data = std::move(outBuf); content.contentType = msg.type; content.encryptionLevel = encryptionLevel_; return content; } Buf EncryptedWriteRecordLayer::getBufToEncrypt(folly::IOBufQueue& queue) const { if (queue.front()->length() > maxRecord_) { return queue.splitAtMost(maxRecord_); } else if (queue.front()->length() >= desiredMinRecord_) { return queue.pop_front(); } else { return queue.splitAtMost(desiredMinRecord_); } } EncryptionLevel EncryptedWriteRecordLayer::getEncryptionLevel() const { return encryptionLevel_; } } // namespace fizz
./CrossVul/dataset_final_sorted/CWE-400/cpp/good_843_0
crossvul-cpp_data_bad_843_0
/* * Copyright (c) 2018-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <fizz/record/EncryptedRecordLayer.h> #include <fizz/crypto/aead/IOBufUtil.h> namespace fizz { using ContentTypeType = typename std::underlying_type<ContentType>::type; using ProtocolVersionType = typename std::underlying_type<ProtocolVersion>::type; static constexpr uint16_t kMaxEncryptedRecordSize = 0x4000 + 256; // 16k + 256 static constexpr size_t kEncryptedHeaderSize = sizeof(ContentType) + sizeof(ProtocolVersion) + sizeof(uint16_t); EncryptedReadRecordLayer::EncryptedReadRecordLayer( EncryptionLevel encryptionLevel) : encryptionLevel_(encryptionLevel) {} folly::Optional<Buf> EncryptedReadRecordLayer::getDecryptedBuf( folly::IOBufQueue& buf) { while (true) { // Cache the front buffer, calling front may invoke and update // of the tail cache. auto frontBuf = buf.front(); folly::io::Cursor cursor(frontBuf); if (buf.empty() || !cursor.canAdvance(kEncryptedHeaderSize)) { return folly::none; } std::array<uint8_t, kEncryptedHeaderSize> ad; folly::io::Cursor adCursor(cursor); adCursor.pull(ad.data(), ad.size()); folly::IOBuf adBuf{folly::IOBuf::wrapBufferAsValue(folly::range(ad))}; auto contentType = static_cast<ContentType>(cursor.readBE<ContentTypeType>()); cursor.skip(sizeof(ProtocolVersion)); auto length = cursor.readBE<uint16_t>(); if (length == 0) { throw std::runtime_error("received 0 length encrypted record"); } if (length > kMaxEncryptedRecordSize) { throw std::runtime_error("received too long encrypted record"); } auto consumedBytes = cursor - frontBuf; if (buf.chainLength() < consumedBytes + length) { return folly::none; } if (contentType == ContentType::alert && length == 2) { auto alert = decode<Alert>(cursor); throw std::runtime_error(folly::to<std::string>( "received plaintext alert in encrypted record: ", toString(alert.description))); } // If we already know that the length of the buffer is the // same as the number of bytes we need, move the entire buffer. std::unique_ptr<folly::IOBuf> encrypted; if (buf.chainLength() == consumedBytes + length) { encrypted = buf.move(); } else { encrypted = buf.split(consumedBytes + length); } trimStart(*encrypted, consumedBytes); if (contentType == ContentType::change_cipher_spec) { encrypted->coalesce(); if (encrypted->length() == 1 && *encrypted->data() == 0x01) { continue; } else { throw FizzException( "received ccs", AlertDescription::illegal_parameter); } } TLSMessage msg; if (seqNum_ == std::numeric_limits<uint64_t>::max()) { throw std::runtime_error("max read seq num"); } if (skipFailedDecryption_) { auto decryptAttempt = aead_->tryDecrypt( std::move(encrypted), useAdditionalData_ ? &adBuf : nullptr, seqNum_); if (decryptAttempt) { seqNum_++; skipFailedDecryption_ = false; return decryptAttempt; } else { continue; } } else { return aead_->decrypt( std::move(encrypted), useAdditionalData_ ? &adBuf : nullptr, seqNum_++); } } } folly::Optional<TLSMessage> EncryptedReadRecordLayer::read( folly::IOBufQueue& buf) { auto decryptedBuf = getDecryptedBuf(buf); if (!decryptedBuf) { return folly::none; } TLSMessage msg; // Iterate over the buffers while trying to find // the first non-zero octet. This is much faster than // first iterating and then trimming. auto currentBuf = decryptedBuf->get(); bool nonZeroFound = false; do { currentBuf = currentBuf->prev(); size_t i = currentBuf->length(); while (i > 0 && !nonZeroFound) { nonZeroFound = (currentBuf->data()[i - 1] != 0); i--; } if (nonZeroFound) { msg.type = static_cast<ContentType>(currentBuf->data()[i]); } currentBuf->trimEnd(currentBuf->length() - i); } while (!nonZeroFound && currentBuf != decryptedBuf->get()); if (!nonZeroFound) { throw std::runtime_error("No content type found"); } msg.fragment = std::move(*decryptedBuf); switch (msg.type) { case ContentType::handshake: case ContentType::alert: case ContentType::application_data: break; default: throw std::runtime_error(folly::to<std::string>( "received encrypted content type ", static_cast<ContentTypeType>(msg.type))); } if (!msg.fragment) { if (msg.type == ContentType::application_data) { msg.fragment = folly::IOBuf::create(0); } else { throw std::runtime_error("received empty fragment"); } } return msg; } EncryptionLevel EncryptedReadRecordLayer::getEncryptionLevel() const { return encryptionLevel_; } EncryptedWriteRecordLayer::EncryptedWriteRecordLayer( EncryptionLevel encryptionLevel) : encryptionLevel_(encryptionLevel) {} TLSContent EncryptedWriteRecordLayer::write(TLSMessage&& msg) const { folly::IOBufQueue queue; queue.append(std::move(msg.fragment)); std::unique_ptr<folly::IOBuf> outBuf; std::array<uint8_t, kEncryptedHeaderSize> headerBuf; auto header = folly::IOBuf::wrapBufferAsValue(folly::range(headerBuf)); aead_->setEncryptedBufferHeadroom(kEncryptedHeaderSize); while (!queue.empty()) { auto dataBuf = getBufToEncrypt(queue); // Currently we never send padding. // check if we have enough room to add the encrypted footer. if (!dataBuf->isShared() && dataBuf->prev()->tailroom() >= sizeof(ContentType)) { // extend it and add it folly::io::Appender appender(dataBuf.get(), 0); appender.writeBE(static_cast<ContentTypeType>(msg.type)); } else { // not enough or shared - let's add enough for the tag as well auto encryptedFooter = folly::IOBuf::create( sizeof(ContentType) + aead_->getCipherOverhead()); folly::io::Appender appender(encryptedFooter.get(), 0); appender.writeBE(static_cast<ContentTypeType>(msg.type)); dataBuf->prependChain(std::move(encryptedFooter)); } if (seqNum_ == std::numeric_limits<uint64_t>::max()) { throw std::runtime_error("max write seq num"); } // we will either be able to memcpy directly into the ciphertext or // need to create a new buf to insert before the ciphertext but we need // it for additional data header.clear(); folly::io::Appender appender(&header, 0); appender.writeBE( static_cast<ContentTypeType>(ContentType::application_data)); appender.writeBE( static_cast<ProtocolVersionType>(ProtocolVersion::tls_1_2)); auto ciphertextLength = dataBuf->computeChainDataLength() + aead_->getCipherOverhead(); appender.writeBE<uint16_t>(ciphertextLength); auto cipherText = aead_->encrypt( std::move(dataBuf), useAdditionalData_ ? &header : nullptr, seqNum_++); std::unique_ptr<folly::IOBuf> record; if (!cipherText->isShared() && cipherText->headroom() >= kEncryptedHeaderSize) { // prepend and then write it in cipherText->prepend(kEncryptedHeaderSize); memcpy(cipherText->writableData(), header.data(), header.length()); record = std::move(cipherText); } else { record = folly::IOBuf::copyBuffer(header.data(), header.length()); record->prependChain(std::move(cipherText)); } if (!outBuf) { outBuf = std::move(record); } else { outBuf->prependChain(std::move(record)); } } if (!outBuf) { outBuf = folly::IOBuf::create(0); } TLSContent content; content.data = std::move(outBuf); content.contentType = msg.type; content.encryptionLevel = encryptionLevel_; return content; } Buf EncryptedWriteRecordLayer::getBufToEncrypt(folly::IOBufQueue& queue) const { if (queue.front()->length() > maxRecord_) { return queue.splitAtMost(maxRecord_); } else if (queue.front()->length() >= desiredMinRecord_) { return queue.pop_front(); } else { return queue.splitAtMost(desiredMinRecord_); } } EncryptionLevel EncryptedWriteRecordLayer::getEncryptionLevel() const { return encryptionLevel_; } } // namespace fizz
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_843_0
crossvul-cpp_data_bad_842_1
/* * Copyright (c) 2018-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <fizz/record/RecordLayer.h> namespace fizz { using HandshakeTypeType = typename std::underlying_type<HandshakeType>::type; static constexpr size_t kHandshakeHeaderSize = sizeof(HandshakeType) + detail::bits24::size; folly::Optional<Param> ReadRecordLayer::readEvent( folly::IOBufQueue& socketBuf) { if (!unparsedHandshakeData_.empty()) { auto param = decodeHandshakeMessage(unparsedHandshakeData_); if (param) { VLOG(8) << "Received handshake message " << toString(boost::apply_visitor(EventVisitor(), *param)); return param; } } while (true) { // Read one record. We read one record at a time since records could cause // a change in the record layer. auto message = read(socketBuf); if (!message) { return folly::none; } if (!unparsedHandshakeData_.empty() && message->type != ContentType::handshake) { throw std::runtime_error("spliced handshake data"); } switch (message->type) { case ContentType::alert: { auto alert = decode<Alert>(std::move(message->fragment)); if (alert.description == AlertDescription::close_notify) { return Param(CloseNotify(socketBuf.move())); } else { return Param(std::move(alert)); } } case ContentType::handshake: { unparsedHandshakeData_.append(std::move(message->fragment)); auto param = decodeHandshakeMessage(unparsedHandshakeData_); if (param) { VLOG(8) << "Received handshake message " << toString(boost::apply_visitor(EventVisitor(), *param)); return param; } else { // If we read handshake data but didn't have enough to get a full // message we immediately try to read another record. // TODO: add limits on number of records we buffer continue; } } case ContentType::application_data: return Param(AppData(std::move(message->fragment))); default: throw std::runtime_error("unknown content type"); } } } template <typename T> static Param parse(Buf handshakeMsg, Buf original) { auto msg = decode<T>(std::move(handshakeMsg)); msg.originalEncoding = std::move(original); return std::move(msg); } template <> Param parse<ServerHello>(Buf handshakeMsg, Buf original) { auto shlo = decode<ServerHello>(std::move(handshakeMsg)); if (shlo.random == HelloRetryRequest::HrrRandom) { HelloRetryRequest hrr; hrr.legacy_version = shlo.legacy_version; hrr.legacy_session_id_echo = std::move(shlo.legacy_session_id_echo); hrr.cipher_suite = shlo.cipher_suite; hrr.legacy_compression_method = shlo.legacy_compression_method; hrr.extensions = std::move(shlo.extensions); hrr.originalEncoding = std::move(original); return std::move(hrr); } else { shlo.originalEncoding = std::move(original); return std::move(shlo); } } folly::Optional<Param> ReadRecordLayer::decodeHandshakeMessage( folly::IOBufQueue& buf) { folly::io::Cursor cursor(buf.front()); if (!cursor.canAdvance(kHandshakeHeaderSize)) { return folly::none; } auto handshakeType = static_cast<HandshakeType>(cursor.readBE<HandshakeTypeType>()); auto length = detail::readBits24(cursor); if (length > kMaxHandshakeSize) { throw std::runtime_error("handshake record too big"); } if (buf.chainLength() < (cursor - buf.front()) + length) { return folly::none; } Buf handshakeMsg; cursor.clone(handshakeMsg, length); auto original = buf.split(kHandshakeHeaderSize + length); switch (handshakeType) { case HandshakeType::client_hello: return parse<ClientHello>(std::move(handshakeMsg), std::move(original)); case HandshakeType::server_hello: return parse<ServerHello>(std::move(handshakeMsg), std::move(original)); case HandshakeType::end_of_early_data: return parse<EndOfEarlyData>( std::move(handshakeMsg), std::move(original)); case HandshakeType::new_session_ticket: return parse<NewSessionTicket>( std::move(handshakeMsg), std::move(original)); case HandshakeType::encrypted_extensions: return parse<EncryptedExtensions>( std::move(handshakeMsg), std::move(original)); case HandshakeType::certificate: return parse<CertificateMsg>( std::move(handshakeMsg), std::move(original)); case HandshakeType::compressed_certificate: return parse<CompressedCertificate>( std::move(handshakeMsg), std::move(original)); case HandshakeType::certificate_request: return parse<CertificateRequest>( std::move(handshakeMsg), std::move(original)); case HandshakeType::certificate_verify: return parse<CertificateVerify>( std::move(handshakeMsg), std::move(original)); case HandshakeType::finished: return parse<Finished>(std::move(handshakeMsg), std::move(original)); case HandshakeType::key_update: return parse<KeyUpdate>(std::move(handshakeMsg), std::move(original)); default: throw std::runtime_error("unknown handshake type"); }; } bool ReadRecordLayer::hasUnparsedHandshakeData() const { return !unparsedHandshakeData_.empty(); } } // namespace fizz
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_842_1
crossvul-cpp_data_good_4319_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); QTimer* timer = new QTimer(socket); timer->setSingleShot(true); timer->setInterval(1000); connect(socket, &QSslSocket::encrypted, timer, &QObject::deleteLater); connect(timer, &QTimer::timeout, socket, [socket] { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Host timed out without sending any identity." << socket->peerAddress(); socket->disconnectFromHost(); }); timer->start(); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); //the size here is arbitrary and is now at 8192 bytes. It needs to be considerably long as it includes the capabilities but there needs to be a limit //Tested between my systems and I get around 2000 per identity package. if (socket->bytesAvailable() > 8192) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Suspiciously long identity package received. Closing connection." << socket->peerAddress() << socket->bytesAvailable(); socket->disconnectFromHost(); return; } #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/good_4319_0
crossvul-cpp_data_bad_4322_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 static const int MAX_UNPAIRED_CONNECTIONS = 42; LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); QTimer* timer = new QTimer(socket); timer->setSingleShot(true); timer->setInterval(1000); connect(socket, &QSslSocket::encrypted, timer, &QObject::deleteLater); connect(timer, &QTimer::timeout, socket, [socket] { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Host timed out without sending any identity." << socket->peerAddress(); socket->disconnectFromHost(); }); timer->start(); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); //the size here is arbitrary and is now at 8192 bytes. It needs to be considerably long as it includes the capabilities but there needs to be a limit //Tested between my systems and I get around 2000 per identity package. if (socket->bytesAvailable() > 8192) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Suspiciously long identity package received. Closing connection." << socket->peerAddress() << socket->bytesAvailable(); socket->disconnectFromHost(); return; } #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); if (!isDeviceTrusted && m_links.size() > MAX_UNPAIRED_CONNECTIONS) { qCWarning(KDECONNECT_CORE) << "Too many unpaired devices to remember them all. Ignoring " << deviceId; socket->disconnectFromHost(); socket->deleteLater(); return; } connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_4322_0
crossvul-cpp_data_good_4320_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); //the size here is arbitrary and is now at 8192 bytes. It needs to be considerably long as it includes the capabilities but there needs to be a limit //Tested between my systems and I get around 2000 per identity package. if (socket->bytesAvailable() > 8192) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Suspiciously long identity package received. Closing connection." << socket->peerAddress() << socket->bytesAvailable(); socket->disconnectFromHost(); return; } #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/good_4320_0
crossvul-cpp_data_bad_843_1
/* * Copyright (c) 2018-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include <folly/portability/GMock.h> #include <folly/portability/GTest.h> #include <fizz/record/EncryptedRecordLayer.h> #include <fizz/crypto/aead/test/Mocks.h> #include <folly/String.h> using namespace folly; using namespace folly::io; using testing::_; using namespace testing; namespace fizz { namespace test { class EncryptedRecordTest : public testing::Test { void SetUp() override { auto readAead = std::make_unique<MockAead>(); readAead_ = readAead.get(); read_.setAead(folly::ByteRange(), std::move(readAead)); auto writeAead = std::make_unique<MockAead>(); writeAead_ = writeAead.get(); write_.setAead(folly::ByteRange(), std::move(writeAead)); } protected: EncryptedReadRecordLayer read_{EncryptionLevel::AppTraffic}; EncryptedWriteRecordLayer write_{EncryptionLevel::AppTraffic}; MockAead* readAead_; MockAead* writeAead_; IOBufQueue queue_{IOBufQueue::cacheChainLength()}; IOBufEqualTo eq_; static Buf getBuf(const std::string& hex, size_t headroom = 0, size_t tailroom = 0) { auto data = unhexlify(hex); return IOBuf::copyBuffer(data.data(), data.size(), headroom, tailroom); } void addToQueue(const std::string& hex) { queue_.append(getBuf(hex)); } static void expectSame(const Buf& buf, const std::string& hex) { auto str = buf->moveToFbString().toStdString(); EXPECT_EQ(hexlify(str), hex); } }; TEST_F(EncryptedRecordTest, TestReadEmpty) { EXPECT_FALSE(read_.read(queue_).hasValue()); } TEST_F(EncryptedRecordTest, TestReadHandshake) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("abcdef16"); })); auto msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::handshake); expectSame(msg->fragment, "abcdef"); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestReadAlert) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("020215"); })); auto msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::alert); expectSame(msg->fragment, "0202"); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestReadAppData) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { EXPECT_FALSE(buf->isShared()); expectSame(buf, "0123456789"); return getBuf("1234abcd17"); })); auto msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::application_data); expectSame(msg->fragment, "1234abcd"); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestReadUnknown) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("1234abcd20"); })); EXPECT_ANY_THROW(read_.read(queue_)); } TEST_F(EncryptedRecordTest, TestWaitForData) { addToQueue("1703010010012345"); EXPECT_FALSE(read_.read(queue_).hasValue()); EXPECT_EQ(queue_.chainLength(), 8); } TEST_F(EncryptedRecordTest, TestWaitForHeader) { addToQueue("16030102"); EXPECT_FALSE(read_.read(queue_).hasValue()); EXPECT_EQ(queue_.chainLength(), 4); } TEST_F(EncryptedRecordTest, TestMaxSize) { addToQueue("1603014100"); EXPECT_FALSE(read_.read(queue_).hasValue()); EXPECT_EQ(queue_.chainLength(), 5); } TEST_F(EncryptedRecordTest, TestOverSize) { addToQueue("1603015000"); EXPECT_ANY_THROW(read_.read(queue_)); } TEST_F(EncryptedRecordTest, TestDataRemaining) { addToQueue("17030100050123456789aa"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("abcdef16"); })); read_.read(queue_); EXPECT_EQ(queue_.chainLength(), 1); } TEST_F(EncryptedRecordTest, TestPadding) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("1234abcd17000000"); })); auto msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::application_data); expectSame(msg->fragment, "1234abcd"); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestAllPaddingAppData) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("17000000"); })); auto msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::application_data); EXPECT_TRUE(msg->fragment->empty()); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestAllPaddingHandshake) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("16000000"); })); EXPECT_NO_THROW(read_.read(queue_)); } TEST_F(EncryptedRecordTest, TestNoContentType) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("00000000"); })); EXPECT_ANY_THROW(read_.read(queue_)); } TEST_F(EncryptedRecordTest, TestReadSeqNum) { for (int i = 0; i < 10; i++) { addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _decrypt(_, _, i)) .WillOnce( Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("1234abcd17"); })); read_.read(queue_); } } TEST_F(EncryptedRecordTest, TestSkipAndWait) { read_.setSkipFailedDecryption(true); addToQueue("17030100050123456789"); EXPECT_CALL(*readAead_, _tryDecrypt(_, _, 0)) .WillOnce( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return folly::none; })); EXPECT_FALSE(read_.read(queue_).hasValue()); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestSkipAndRead) { Sequence s; read_.setSkipFailedDecryption(true); addToQueue("1703010005012345678917030100050123456789170301000501234567aa"); EXPECT_CALL(*readAead_, _tryDecrypt(_, _, 0)) .InSequence(s) .WillOnce( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return folly::none; })); EXPECT_CALL(*readAead_, _tryDecrypt(_, _, 0)) .InSequence(s) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "0123456789"); return getBuf("1234abcd17"); })); auto msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::application_data); expectSame(msg->fragment, "1234abcd"); EXPECT_EQ(queue_.chainLength(), 10); EXPECT_CALL(*readAead_, _decrypt(_, _, 1)) .InSequence(s) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "01234567aa"); return getBuf("1234abaa17"); })); msg = read_.read(queue_); EXPECT_EQ(msg->type, ContentType::application_data); expectSame(msg->fragment, "1234abaa"); EXPECT_TRUE(queue_.empty()); } TEST_F(EncryptedRecordTest, TestWriteHandshake) { TLSMessage msg{ContentType::handshake, getBuf("1234567890")}; EXPECT_CALL(*writeAead_, _encrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "123456789016"); return getBuf("abcd1234abcd"); })); auto buf = write_.write(std::move(msg)); expectSame(buf.data, "1703030006abcd1234abcd"); } TEST_F(EncryptedRecordTest, TestWriteAppData) { TLSMessage msg{ContentType::application_data, getBuf("1234567890")}; EXPECT_CALL(*writeAead_, _encrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "123456789017"); return getBuf("abcd1234abcd"); })); auto buf = write_.write(std::move(msg)); expectSame(buf.data, "1703030006abcd1234abcd"); } TEST_F(EncryptedRecordTest, TestWriteAppDataInPlace) { TLSMessage msg{ContentType::application_data, getBuf("1234567890", 5, 17)}; EXPECT_CALL(*writeAead_, _encrypt(_, _, 0)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { // footer should have been written w/o chaining EXPECT_FALSE(buf->isChained()); expectSame(buf, "123456789017"); // we need to return room for the header return getBuf("abcd1234abcd", 5, 0); })); auto buf = write_.write(std::move(msg)); EXPECT_FALSE(buf.data->isChained()); expectSame(buf.data, "1703030006abcd1234abcd"); } TEST_F(EncryptedRecordTest, TestFragmentedWrite) { TLSMessage msg{ContentType::application_data, IOBuf::create(0x4a00)}; msg.fragment->append(0x4a00); memset(msg.fragment->writableData(), 0x1, msg.fragment->length()); Sequence s; EXPECT_CALL(*writeAead_, _encrypt(_, _, 0)) .InSequence(s) .WillOnce( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return getBuf("aaaa"); })); EXPECT_CALL(*writeAead_, _encrypt(_, _, 1)) .InSequence(s) .WillOnce( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return getBuf("bbbb"); })); auto outBuf = write_.write(std::move(msg)); expectSame(outBuf.data, "1703034001aaaa1703030a01bbbb"); } TEST_F(EncryptedRecordTest, TestWriteSplittingWholeBuf) { TLSMessage msg{ContentType::application_data, IOBuf::create(2000)}; msg.fragment->append(2000); memset(msg.fragment->writableData(), 0x1, msg.fragment->length()); msg.fragment->prependChain(IOBuf::copyBuffer("moredata")); Sequence s; EXPECT_CALL(*writeAead_, _encrypt(_, _, _)) .Times(2) .WillRepeatedly( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return getBuf("aaaa"); })); write_.write(std::move(msg)); } TEST_F(EncryptedRecordTest, TestWriteSplittingCombineSmall) { TLSMessage msg{ContentType::application_data, IOBuf::create(500)}; msg.fragment->append(500); memset(msg.fragment->writableData(), 0x1, msg.fragment->length()); msg.fragment->prependChain(IOBuf::copyBuffer("moredata")); Sequence s; EXPECT_CALL(*writeAead_, _encrypt(_, _, _)) .Times(1) .WillRepeatedly( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return getBuf("aaaa"); })); write_.write(std::move(msg)); } TEST_F(EncryptedRecordTest, TestWriteSeqNum) { for (int i = 0; i < 10; i++) { TLSMessage msg{ContentType::application_data, getBuf("1234567890")}; EXPECT_CALL(*writeAead_, _encrypt(_, _, i)) .WillOnce( Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { expectSame(buf, "123456789017"); return getBuf("abcd1234abcd"); })); write_.write(std::move(msg)); } } TEST_F(EncryptedRecordTest, TestWriteEmpty) { TLSMessage msg{ContentType::application_data, folly::IOBuf::create(0)}; auto outBuf = write_.write(std::move(msg)); EXPECT_TRUE(outBuf.data->empty()); } TEST_F(EncryptedRecordTest, TestWriteMaxSize) { write_.setMaxRecord(1900); TLSMessage msg{ContentType::application_data, IOBuf::create(2000)}; msg.fragment->append(2000); memset(msg.fragment->writableData(), 0x1, msg.fragment->length()); Sequence s; EXPECT_CALL(*writeAead_, _encrypt(_, _, _)) .Times(2) .WillRepeatedly( Invoke([](std::unique_ptr<IOBuf>& /*buf*/, const IOBuf*, uint64_t) { return getBuf("aaaa"); })); write_.write(std::move(msg)); } TEST_F(EncryptedRecordTest, TestWriteMinSize) { write_.setMinDesiredRecord(1700); TLSMessage msg{ContentType::application_data, IOBuf::create(1000)}; msg.fragment->append(1000); memset(msg.fragment->writableData(), 0x1, msg.fragment->length()); auto next = IOBuf::create(1000); next->append(1000); memset(next->writableData(), 0x2, next->length()); msg.fragment->prependChain(std::move(next)); Sequence s; EXPECT_CALL(*writeAead_, _encrypt(_, _, _)) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { // one byte for footer EXPECT_EQ(buf->computeChainDataLength(), 1701); return getBuf("aaaa"); })) .WillOnce(Invoke([](std::unique_ptr<IOBuf>& buf, const IOBuf*, uint64_t) { // one byte for footer EXPECT_EQ(buf->computeChainDataLength(), 301); return getBuf("bbbb"); })); write_.write(std::move(msg)); } } // namespace test } // namespace fizz
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_843_1
crossvul-cpp_data_bad_4320_0
/** * SPDX-FileCopyrightText: 2013 Albert Vaca <albertvaka@gmail.com> * * SPDX-License-Identifier: GPL-2.0-only OR GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL */ #include "lanlinkprovider.h" #include "core_debug.h" #ifndef Q_OS_WIN #include <sys/socket.h> #include <netinet/in.h> #include <netinet/tcp.h> #include <netdb.h> #endif #include <QHostInfo> #include <QTcpServer> #include <QMetaEnum> #include <QNetworkProxy> #include <QUdpSocket> #include <QNetworkSession> #include <QNetworkConfigurationManager> #include <QSslCipher> #include <QSslConfiguration> #include <QSslKey> #include "daemon.h" #include "landevicelink.h" #include "lanpairinghandler.h" #include "kdeconnectconfig.h" #include "qtcompat_p.h" #define MIN_VERSION_WITH_SSL_SUPPORT 6 LanLinkProvider::LanLinkProvider( bool testMode, quint16 udpBroadcastPort, quint16 udpListenPort ) : m_server(new Server(this)) , m_udpSocket(this) , m_tcpPort(0) , m_udpBroadcastPort(udpBroadcastPort) , m_udpListenPort(udpListenPort) , m_testMode(testMode) , m_combineBroadcastsTimer(this) { m_combineBroadcastsTimer.setInterval(0); // increase this if waiting a single event-loop iteration is not enough m_combineBroadcastsTimer.setSingleShot(true); connect(&m_combineBroadcastsTimer, &QTimer::timeout, this, &LanLinkProvider::broadcastToNetwork); connect(&m_udpSocket, &QIODevice::readyRead, this, &LanLinkProvider::udpBroadcastReceived); m_server->setProxy(QNetworkProxy::NoProxy); connect(m_server, &QTcpServer::newConnection, this, &LanLinkProvider::newConnection); m_udpSocket.setProxy(QNetworkProxy::NoProxy); //Detect when a network interface changes status, so we announce ourselves in the new network QNetworkConfigurationManager* networkManager = new QNetworkConfigurationManager(this); connect(networkManager, &QNetworkConfigurationManager::configurationChanged, this, &LanLinkProvider::onNetworkConfigurationChanged); } void LanLinkProvider::onNetworkConfigurationChanged(const QNetworkConfiguration& config) { if (m_lastConfig != config && config.state() == QNetworkConfiguration::Active) { m_lastConfig = config; onNetworkChange(); } } LanLinkProvider::~LanLinkProvider() { } void LanLinkProvider::onStart() { const QHostAddress bindAddress = m_testMode? QHostAddress::LocalHost : QHostAddress::Any; bool success = m_udpSocket.bind(bindAddress, m_udpListenPort, QUdpSocket::ShareAddress); if (!success) { QAbstractSocket::SocketError sockErr = m_udpSocket.error(); // Refer to https://doc.qt.io/qt-5/qabstractsocket.html#SocketError-enum to decode socket error number QString errorMessage = QString::fromLatin1(QMetaEnum::fromType<QAbstractSocket::SocketError>().valueToKey(sockErr)); qCritical(KDECONNECT_CORE) << QLatin1String("Failed to bind UDP socket on port") << m_udpListenPort << QLatin1String("with error") << errorMessage; } Q_ASSERT(success); m_tcpPort = MIN_TCP_PORT; while (!m_server->listen(bindAddress, m_tcpPort)) { m_tcpPort++; if (m_tcpPort > MAX_TCP_PORT) { //No ports available? qCritical(KDECONNECT_CORE) << "Error opening a port in range" << MIN_TCP_PORT << "-" << MAX_TCP_PORT; m_tcpPort = 0; return; } } onNetworkChange(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider started"; } void LanLinkProvider::onStop() { m_udpSocket.close(); m_server->close(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider stopped"; } void LanLinkProvider::onNetworkChange() { if (m_combineBroadcastsTimer.isActive()) { qCDebug(KDECONNECT_CORE) << "Preventing duplicate broadcasts"; return; } m_combineBroadcastsTimer.start(); } //I'm in a new network, let's be polite and introduce myself void LanLinkProvider::broadcastToNetwork() { if (!m_server->isListening()) { //Not started return; } Q_ASSERT(m_tcpPort != 0); qCDebug(KDECONNECT_CORE()) << "Broadcasting identity packet"; QList<QHostAddress> destinations = getBroadcastAddresses(); NetworkPacket np; NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); #ifdef Q_OS_WIN //On Windows we need to broadcast from every local IP address to reach all networks QUdpSocket sendSocket; sendSocket.setProxy(QNetworkProxy::NoProxy); for (const QNetworkInterface& iface : QNetworkInterface::allInterfaces()) { if ( (iface.flags() & QNetworkInterface::IsUp) && (iface.flags() & QNetworkInterface::IsRunning) && (iface.flags() & QNetworkInterface::CanBroadcast)) { for (const QNetworkAddressEntry& ifaceAddress : iface.addressEntries()) { QHostAddress sourceAddress = ifaceAddress.ip(); if (sourceAddress.protocol() == QAbstractSocket::IPv4Protocol && sourceAddress != QHostAddress::LocalHost) { qCDebug(KDECONNECT_CORE()) << "Broadcasting as" << sourceAddress; sendBroadcasts(sendSocket, np, destinations); sendSocket.close(); } } } } #else sendBroadcasts(m_udpSocket, np, destinations); #endif } QList<QHostAddress> LanLinkProvider::getBroadcastAddresses() { const QStringList customDevices = KdeConnectConfig::instance().customDevices(); QList<QHostAddress> destinations; destinations.reserve(customDevices.length() + 1); // Default broadcast address destinations.append(m_testMode ? QHostAddress::LocalHost : QHostAddress::Broadcast); // Custom device addresses for (auto& customDevice : customDevices) { QHostAddress address(customDevice); if (address.isNull()) { qCWarning(KDECONNECT_CORE) << "Invalid custom device address" << customDevice; } else { destinations.append(address); } } return destinations; } void LanLinkProvider::sendBroadcasts( QUdpSocket& socket, const NetworkPacket& np, const QList<QHostAddress>& addresses) { const QByteArray payload = np.serialize(); for (auto& address : addresses) { socket.writeDatagram(payload, address, m_udpBroadcastPort); } } //I'm the existing device, a new device is kindly introducing itself. //I will create a TcpSocket and try to connect. This can result in either tcpSocketConnected() or connectError(). void LanLinkProvider::udpBroadcastReceived() { while (m_udpSocket.hasPendingDatagrams()) { QByteArray datagram; datagram.resize(m_udpSocket.pendingDatagramSize()); QHostAddress sender; m_udpSocket.readDatagram(datagram.data(), datagram.size(), &sender); if (sender.isLoopback() && !m_testMode) continue; NetworkPacket* receivedPacket = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(datagram, receivedPacket); //qCDebug(KDECONNECT_CORE) << "udp connection from " << receivedPacket->; //qCDebug(KDECONNECT_CORE) << "Datagram " << datagram.data() ; if (!success) { qCDebug(KDECONNECT_CORE) << "Could not unserialize UDP packet"; delete receivedPacket; continue; } if (receivedPacket->type() != PACKET_TYPE_IDENTITY) { qCDebug(KDECONNECT_CORE) << "Received a UDP packet of wrong type" << receivedPacket->type(); delete receivedPacket; continue; } if (receivedPacket->get<QString>(QStringLiteral("deviceId")) == KdeConnectConfig::instance().deviceId()) { //qCDebug(KDECONNECT_CORE) << "Ignoring my own broadcast"; delete receivedPacket; continue; } int tcpPort = receivedPacket->get<int>(QStringLiteral("tcpPort")); //qCDebug(KDECONNECT_CORE) << "Received Udp identity packet from" << sender << " asking for a tcp connection on port " << tcpPort; QSslSocket* socket = new QSslSocket(this); socket->setProxy(QNetworkProxy::NoProxy); m_receivedIdentityPackets[socket].np = receivedPacket; m_receivedIdentityPackets[socket].sender = sender; connect(socket, &QAbstractSocket::connected, this, &LanLinkProvider::tcpSocketConnected); #if QT_VERSION < QT_VERSION_CHECK(5,15,0) connect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else connect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif socket->connectToHost(sender, tcpPort); } } void LanLinkProvider::connectError(QAbstractSocket::SocketError socketError) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; qCDebug(KDECONNECT_CORE) << "Socket error" << socketError; qCDebug(KDECONNECT_CORE) << "Fallback (1), try reverse connection (send udp packet)" << socket->errorString(); NetworkPacket np(QLatin1String("")); NetworkPacket::createIdentityPacket(&np); np.set(QStringLiteral("tcpPort"), m_tcpPort); m_udpSocket.writeDatagram(np.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); //The socket we created didn't work, and we didn't manage //to create a LanDeviceLink from it, deleting everything. delete m_receivedIdentityPackets.take(socket).np; socket->deleteLater(); } //We received a UDP packet and answered by connecting to them by TCP. This gets called on a successful connection. void LanLinkProvider::tcpSocketConnected() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; // TODO Delete me? #if QT_VERSION < QT_VERSION_CHECK(5,15,0) disconnect(socket, QOverload<QAbstractSocket::SocketError>::of(&QAbstractSocket::error), this, &LanLinkProvider::connectError); #else disconnect(socket, &QAbstractSocket::errorOccurred, this, &LanLinkProvider::connectError); #endif configureSocket(socket); // If socket disconnects due to any reason after connection, link on ssl failure connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "tcpSocketConnected" << socket->isWritable(); // If network is on ssl, do not believe when they are connected, believe when handshake is completed NetworkPacket np2(QLatin1String("")); NetworkPacket::createIdentityPacket(&np2); socket->write(np2.serialize()); bool success = socket->waitForBytesWritten(); if (success) { qCDebug(KDECONNECT_CORE) << "TCP connection done (i'm the existing device)"; // if ssl supported if (receivedPacket->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting server ssl (I'm the client TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); socket->startServerEncryption(); return; // Return statement prevents from deleting received packet, needed in slot "encrypted" } else { qWarning() << receivedPacket->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, receivedPacket, LanDeviceLink::Remotely); } } else { //I think this will never happen, but if it happens the deviceLink //(or the socket that is now inside it) might not be valid. Delete them. qCDebug(KDECONNECT_CORE) << "Fallback (2), try reverse connection (send udp packet)"; m_udpSocket.writeDatagram(np2.serialize(), m_receivedIdentityPackets[socket].sender, m_udpBroadcastPort); } delete m_receivedIdentityPackets.take(socket).np; //We don't delete the socket because now it's owned by the LanDeviceLink } void LanLinkProvider::encrypted() { qCDebug(KDECONNECT_CORE) << "Socket successfully established an SSL connection"; QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; Q_ASSERT(socket->mode() != QSslSocket::UnencryptedMode); LanDeviceLink::ConnectionStarted connectionOrigin = (socket->mode() == QSslSocket::SslClientMode)? LanDeviceLink::Locally : LanDeviceLink::Remotely; NetworkPacket* receivedPacket = m_receivedIdentityPackets[socket].np; const QString& deviceId = receivedPacket->get<QString>(QStringLiteral("deviceId")); addLink(deviceId, socket, receivedPacket, connectionOrigin); // Copied from tcpSocketConnected slot, now delete received packet delete m_receivedIdentityPackets.take(socket).np; } void LanLinkProvider::sslErrors(const QList<QSslError>& errors) { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); if (!socket) return; bool fatal = false; for (const QSslError& error : errors) { if (error.error() != QSslError::SelfSignedCertificate) { qCCritical(KDECONNECT_CORE) << "Disconnecting due to fatal SSL Error: " << error; fatal = true; } else { qCDebug(KDECONNECT_CORE) << "Ignoring self-signed cert error"; } } if (fatal) { socket->disconnectFromHost(); delete m_receivedIdentityPackets.take(socket).np; } } //I'm the new device and this is the answer to my UDP identity packet (no data received yet). They are connecting to us through TCP, and they should send an identity. void LanLinkProvider::newConnection() { qCDebug(KDECONNECT_CORE) << "LanLinkProvider newConnection"; while (m_server->hasPendingConnections()) { QSslSocket* socket = m_server->nextPendingConnection(); configureSocket(socket); //This socket is still managed by us (and child of the QTcpServer), if //it disconnects before we manage to pass it to a LanDeviceLink, it's //our responsibility to delete it. We do so with this connection. connect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); connect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); } } //I'm the new device and this is the answer to my UDP identity packet (data received) void LanLinkProvider::dataReceived() { QSslSocket* socket = qobject_cast<QSslSocket*>(sender()); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!socket->canReadLine()) return; #else socket->startTransaction(); #endif const QByteArray data = socket->readLine(); qCDebug(KDECONNECT_CORE) << "LanLinkProvider received reply:" << data; NetworkPacket* np = new NetworkPacket(QLatin1String("")); bool success = NetworkPacket::unserialize(data, np); #if QT_VERSION < QT_VERSION_CHECK(5,7,0) if (!success) { delete np; return; } #else if (!success) { delete np; socket->rollbackTransaction(); return; } socket->commitTransaction(); #endif if (np->type() != PACKET_TYPE_IDENTITY) { qCWarning(KDECONNECT_CORE) << "LanLinkProvider/newConnection: Expected identity, received " << np->type(); delete np; return; } // Needed in "encrypted" if ssl is used, similar to "tcpSocketConnected" m_receivedIdentityPackets[socket].np = np; const QString& deviceId = np->get<QString>(QStringLiteral("deviceId")); //qCDebug(KDECONNECT_CORE) << "Handshaking done (i'm the new device)"; //This socket will now be owned by the LanDeviceLink or we don't want more data to be received, forget about it disconnect(socket, &QIODevice::readyRead, this, &LanLinkProvider::dataReceived); if (np->get<int>(QStringLiteral("protocolVersion")) >= MIN_VERSION_WITH_SSL_SUPPORT) { bool isDeviceTrusted = KdeConnectConfig::instance().trustedDevices().contains(deviceId); configureSslSocket(socket, deviceId, isDeviceTrusted); qCDebug(KDECONNECT_CORE) << "Starting client ssl (but I'm the server TCP socket)"; connect(socket, &QSslSocket::encrypted, this, &LanLinkProvider::encrypted); if (isDeviceTrusted) { connect(socket, QOverload<const QList<QSslError> &>::of(&QSslSocket::sslErrors), this, &LanLinkProvider::sslErrors); } socket->startClientEncryption(); } else { qWarning() << np->get<QString>(QStringLiteral("deviceName")) << "uses an old protocol version, this won't work"; //addLink(deviceId, socket, np, LanDeviceLink::Locally); delete m_receivedIdentityPackets.take(socket).np; } } void LanLinkProvider::deviceLinkDestroyed(QObject* destroyedDeviceLink) { const QString id = destroyedDeviceLink->property("deviceId").toString(); //qCDebug(KDECONNECT_CORE) << "deviceLinkDestroyed" << id; QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(id); Q_ASSERT(linkIterator != m_links.end()); if (linkIterator != m_links.end()) { Q_ASSERT(linkIterator.value() == destroyedDeviceLink); m_links.erase(linkIterator); auto pairingHandler = m_pairingHandlers.take(id); if (pairingHandler) { pairingHandler->deleteLater(); } } } void LanLinkProvider::configureSslSocket(QSslSocket* socket, const QString& deviceId, bool isDeviceTrusted) { // Setting supported ciphers manually, to match those on Android (FIXME: Test if this can be left unconfigured and still works for Android 4) QList<QSslCipher> socketCiphers; socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES256-GCM-SHA384"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-ECDSA-AES128-GCM-SHA256"))); socketCiphers.append(QSslCipher(QStringLiteral("ECDHE-RSA-AES128-SHA"))); // Configure for ssl QSslConfiguration sslConfig; sslConfig.setCiphers(socketCiphers); sslConfig.setLocalCertificate(KdeConnectConfig::instance().certificate()); QFile privateKeyFile(KdeConnectConfig::instance().privateKeyPath()); QSslKey privateKey; if (privateKeyFile.open(QIODevice::ReadOnly)) { privateKey = QSslKey(privateKeyFile.readAll(), QSsl::Rsa); } privateKeyFile.close(); sslConfig.setPrivateKey(privateKey); if (isDeviceTrusted) { QString certString = KdeConnectConfig::instance().getDeviceProperty(deviceId, QStringLiteral("certificate"), QString()); sslConfig.setCaCertificates({QSslCertificate(certString.toLatin1())}); sslConfig.setPeerVerifyMode(QSslSocket::VerifyPeer); } else { sslConfig.setPeerVerifyMode(QSslSocket::QueryPeer); } socket->setSslConfiguration(sslConfig); socket->setPeerVerifyName(deviceId); //Usually SSL errors are only bad for trusted devices. Uncomment this section to log errors in any case, for debugging. //QObject::connect(socket, static_cast<void (QSslSocket::*)(const QList<QSslError>&)>(&QSslSocket::sslErrors), [](const QList<QSslError>& errors) //{ // Q_FOREACH (const QSslError& error, errors) { // qCDebug(KDECONNECT_CORE) << "SSL Error:" << error.errorString(); // } //}); } void LanLinkProvider::configureSocket(QSslSocket* socket) { socket->setProxy(QNetworkProxy::NoProxy); socket->setSocketOption(QAbstractSocket::KeepAliveOption, QVariant(1)); #ifdef TCP_KEEPIDLE // time to start sending keepalive packets (seconds) int maxIdle = 10; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPIDLE, &maxIdle, sizeof(maxIdle)); #endif #ifdef TCP_KEEPINTVL // interval between keepalive packets after the initial period (seconds) int interval = 5; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPINTVL, &interval, sizeof(interval)); #endif #ifdef TCP_KEEPCNT // number of missed keepalive packets before disconnecting int count = 3; setsockopt(socket->socketDescriptor(), IPPROTO_TCP, TCP_KEEPCNT, &count, sizeof(count)); #endif } void LanLinkProvider::addLink(const QString& deviceId, QSslSocket* socket, NetworkPacket* receivedPacket, LanDeviceLink::ConnectionStarted connectionOrigin) { // Socket disconnection will now be handled by LanDeviceLink disconnect(socket, &QAbstractSocket::disconnected, socket, &QObject::deleteLater); LanDeviceLink* deviceLink; //Do we have a link for this device already? QMap< QString, LanDeviceLink* >::iterator linkIterator = m_links.find(deviceId); if (linkIterator != m_links.end()) { //qCDebug(KDECONNECT_CORE) << "Reusing link to" << deviceId; deviceLink = linkIterator.value(); deviceLink->reset(socket, connectionOrigin); } else { deviceLink = new LanDeviceLink(deviceId, this, socket, connectionOrigin); connect(deviceLink, &QObject::destroyed, this, &LanLinkProvider::deviceLinkDestroyed); m_links[deviceId] = deviceLink; if (m_pairingHandlers.contains(deviceId)) { //We shouldn't have a pairinghandler if we didn't have a link. //Crash if debug, recover if release (by setting the new devicelink to the old pairinghandler) Q_ASSERT(m_pairingHandlers.contains(deviceId)); m_pairingHandlers[deviceId]->setDeviceLink(deviceLink); } } Q_EMIT onConnectionReceived(*receivedPacket, deviceLink); } LanPairingHandler* LanLinkProvider::createPairingHandler(DeviceLink* link) { LanPairingHandler* ph = m_pairingHandlers.value(link->deviceId()); if (!ph) { ph = new LanPairingHandler(link); qCDebug(KDECONNECT_CORE) << "creating pairing handler for" << link->deviceId(); connect (ph, &LanPairingHandler::pairingError, link, &DeviceLink::pairingError); m_pairingHandlers[link->deviceId()] = ph; } return ph; } void LanLinkProvider::userRequestsPair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->requestPairing(); } void LanLinkProvider::userRequestsUnpair(const QString& deviceId) { LanPairingHandler* ph = createPairingHandler(m_links.value(deviceId)); ph->unpair(); } void LanLinkProvider::incomingPairPacket(DeviceLink* deviceLink, const NetworkPacket& np) { LanPairingHandler* ph = createPairingHandler(deviceLink); ph->packetReceived(np); }
./CrossVul/dataset_final_sorted/CWE-400/cpp/bad_4320_0