cwe_id
stringclasses 158
values | cwe_description
stringclasses 158
values | language
stringclasses 21
values | vulnerable_code
stringlengths 54
508k
| fixed_code
stringlengths 54
508k
| file_pair_id
stringlengths 3
7
| source
stringclasses 1
value | language_dir
stringclasses 29
values |
|---|---|---|---|---|---|---|---|
CWE-1021
|
Improper Restriction of Rendered UI Layers or Frames - A web application is expected to place restrictions on whether it is allowed to be rendered within frames, iframes, objects, embed or applet elements.
|
go
|
package controllers
import (
"compress/gzip"
"context"
"crypto/tls"
"html/template"
"net/http"
"net/url"
"time"
"github.com/NYTimes/gziphandler"
"github.com/gophish/gophish/auth"
"github.com/gophish/gophish/config"
ctx "github.com/gophish/gophish/context"
"github.com/gophish/gophish/controllers/api"
log "github.com/gophish/gophish/logger"
mid "github.com/gophish/gophish/middleware"
"github.com/gophish/gophish/middleware/ratelimit"
"github.com/gophish/gophish/models"
"github.com/gophish/gophish/util"
"github.com/gophish/gophish/worker"
"github.com/gorilla/csrf"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/jordan-wright/unindexed"
)
// AdminServerOption is a functional option that is used to configure the
// admin server
type AdminServerOption func(*AdminServer)
// AdminServer is an HTTP server that implements the administrative Gophish
// handlers, including the dashboard and REST API.
type AdminServer struct {
server *http.Server
worker worker.Worker
config config.AdminServer
limiter *ratelimit.PostLimiter
}
var defaultTLSConfig = &tls.Config{
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.X25519,
tls.CurveP256,
},
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// Kept for backwards compatibility with some clients
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
},
}
// WithWorker is an option that sets the background worker.
func WithWorker(w worker.Worker) AdminServerOption {
return func(as *AdminServer) {
as.worker = w
}
}
// NewAdminServer returns a new instance of the AdminServer with the
// provided config and options applied.
func NewAdminServer(config config.AdminServer, options ...AdminServerOption) *AdminServer {
defaultWorker, _ := worker.New()
defaultServer := &http.Server{
ReadTimeout: 10 * time.Second,
Addr: config.ListenURL,
}
defaultLimiter := ratelimit.NewPostLimiter()
as := &AdminServer{
worker: defaultWorker,
server: defaultServer,
limiter: defaultLimiter,
config: config,
}
for _, opt := range options {
opt(as)
}
as.registerRoutes()
return as
}
// Start launches the admin server, listening on the configured address.
func (as *AdminServer) Start() {
if as.worker != nil {
go as.worker.Start()
}
if as.config.UseTLS {
// Only support TLS 1.2 and above - ref #1691, #1689
as.server.TLSConfig = defaultTLSConfig
err := util.CheckAndCreateSSL(as.config.CertPath, as.config.KeyPath)
if err != nil {
log.Fatal(err)
}
log.Infof("Starting admin server at https://%s", as.config.ListenURL)
log.Fatal(as.server.ListenAndServeTLS(as.config.CertPath, as.config.KeyPath))
}
// If TLS isn't configured, just listen on HTTP
log.Infof("Starting admin server at http://%s", as.config.ListenURL)
log.Fatal(as.server.ListenAndServe())
}
// Shutdown attempts to gracefully shutdown the server.
func (as *AdminServer) Shutdown() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
return as.server.Shutdown(ctx)
}
// SetupAdminRoutes creates the routes for handling requests to the web interface.
// This function returns an http.Handler to be used in http.ListenAndServe().
func (as *AdminServer) registerRoutes() {
router := mux.NewRouter()
// Base Front-end routes
router.HandleFunc("/", mid.Use(as.Base, mid.RequireLogin))
router.HandleFunc("/login", mid.Use(as.Login, as.limiter.Limit))
router.HandleFunc("/logout", mid.Use(as.Logout, mid.RequireLogin))
router.HandleFunc("/reset_password", mid.Use(as.ResetPassword, mid.RequireLogin))
router.HandleFunc("/campaigns", mid.Use(as.Campaigns, mid.RequireLogin))
router.HandleFunc("/campaigns/{id:[0-9]+}", mid.Use(as.CampaignID, mid.RequireLogin))
router.HandleFunc("/templates", mid.Use(as.Templates, mid.RequireLogin))
router.HandleFunc("/groups", mid.Use(as.Groups, mid.RequireLogin))
router.HandleFunc("/landing_pages", mid.Use(as.LandingPages, mid.RequireLogin))
router.HandleFunc("/sending_profiles", mid.Use(as.SendingProfiles, mid.RequireLogin))
router.HandleFunc("/settings", mid.Use(as.Settings, mid.RequireLogin))
router.HandleFunc("/users", mid.Use(as.UserManagement, mid.RequirePermission(models.PermissionModifySystem), mid.RequireLogin))
router.HandleFunc("/webhooks", mid.Use(as.Webhooks, mid.RequirePermission(models.PermissionModifySystem), mid.RequireLogin))
router.HandleFunc("/impersonate", mid.Use(as.Impersonate, mid.RequirePermission(models.PermissionModifySystem), mid.RequireLogin))
// Create the API routes
api := api.NewServer(
api.WithWorker(as.worker),
api.WithLimiter(as.limiter),
)
router.PathPrefix("/api/").Handler(api)
// Setup static file serving
router.PathPrefix("/").Handler(http.FileServer(unindexed.Dir("./static/")))
// Setup CSRF Protection
csrfKey := []byte(as.config.CSRFKey)
if len(csrfKey) == 0 {
csrfKey = []byte(auth.GenerateSecureKey(auth.APIKeyLength))
}
csrfHandler := csrf.Protect(csrfKey,
csrf.FieldName("csrf_token"),
csrf.Secure(as.config.UseTLS))
adminHandler := csrfHandler(router)
adminHandler = mid.Use(adminHandler.ServeHTTP, mid.CSRFExceptions, mid.GetContext)
// Setup GZIP compression
gzipWrapper, _ := gziphandler.NewGzipLevelHandler(gzip.BestCompression)
adminHandler = gzipWrapper(adminHandler)
// Setup logging
adminHandler = handlers.CombinedLoggingHandler(log.Writer(), adminHandler)
as.server.Handler = adminHandler
}
type templateParams struct {
Title string
Flashes []interface{}
User models.User
Token string
Version string
ModifySystem bool
}
// newTemplateParams returns the default template parameters for a user and
// the CSRF token.
func newTemplateParams(r *http.Request) templateParams {
user := ctx.Get(r, "user").(models.User)
session := ctx.Get(r, "session").(*sessions.Session)
modifySystem, _ := user.HasPermission(models.PermissionModifySystem)
return templateParams{
Token: csrf.Token(r),
User: user,
ModifySystem: modifySystem,
Version: config.Version,
Flashes: session.Flashes(),
}
}
// Base handles the default path and template execution
func (as *AdminServer) Base(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Dashboard"
getTemplate(w, "dashboard").ExecuteTemplate(w, "base", params)
}
// Campaigns handles the default path and template execution
func (as *AdminServer) Campaigns(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Campaigns"
getTemplate(w, "campaigns").ExecuteTemplate(w, "base", params)
}
// CampaignID handles the default path and template execution
func (as *AdminServer) CampaignID(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Campaign Results"
getTemplate(w, "campaign_results").ExecuteTemplate(w, "base", params)
}
// Templates handles the default path and template execution
func (as *AdminServer) Templates(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Email Templates"
getTemplate(w, "templates").ExecuteTemplate(w, "base", params)
}
// Groups handles the default path and template execution
func (as *AdminServer) Groups(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Users & Groups"
getTemplate(w, "groups").ExecuteTemplate(w, "base", params)
}
// LandingPages handles the default path and template execution
func (as *AdminServer) LandingPages(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Landing Pages"
getTemplate(w, "landing_pages").ExecuteTemplate(w, "base", params)
}
// SendingProfiles handles the default path and template execution
func (as *AdminServer) SendingProfiles(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Sending Profiles"
getTemplate(w, "sending_profiles").ExecuteTemplate(w, "base", params)
}
// Settings handles the changing of settings
func (as *AdminServer) Settings(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == "GET":
params := newTemplateParams(r)
params.Title = "Settings"
session := ctx.Get(r, "session").(*sessions.Session)
session.Save(r, w)
getTemplate(w, "settings").ExecuteTemplate(w, "base", params)
case r.Method == "POST":
u := ctx.Get(r, "user").(models.User)
currentPw := r.FormValue("current_password")
newPassword := r.FormValue("new_password")
confirmPassword := r.FormValue("confirm_new_password")
// Check the current password
err := auth.ValidatePassword(currentPw, u.Hash)
msg := models.Response{Success: true, Message: "Settings Updated Successfully"}
if err != nil {
msg.Message = err.Error()
msg.Success = false
api.JSONResponse(w, msg, http.StatusBadRequest)
return
}
newHash, err := auth.ValidatePasswordChange(u.Hash, newPassword, confirmPassword)
if err != nil {
msg.Message = err.Error()
msg.Success = false
api.JSONResponse(w, msg, http.StatusBadRequest)
return
}
u.Hash = string(newHash)
if err = models.PutUser(&u); err != nil {
msg.Message = err.Error()
msg.Success = false
api.JSONResponse(w, msg, http.StatusInternalServerError)
return
}
api.JSONResponse(w, msg, http.StatusOK)
}
}
// UserManagement is an admin-only handler that allows for the registration
// and management of user accounts within Gophish.
func (as *AdminServer) UserManagement(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "User Management"
getTemplate(w, "users").ExecuteTemplate(w, "base", params)
}
func (as *AdminServer) nextOrIndex(w http.ResponseWriter, r *http.Request) {
next := "/"
url, err := url.Parse(r.FormValue("next"))
if err == nil {
path := url.Path
if path != "" {
next = path
}
}
http.Redirect(w, r, next, 302)
}
func (as *AdminServer) handleInvalidLogin(w http.ResponseWriter, r *http.Request) {
session := ctx.Get(r, "session").(*sessions.Session)
Flash(w, r, "danger", "Invalid Username/Password")
params := struct {
User models.User
Title string
Flashes []interface{}
Token string
}{Title: "Login", Token: csrf.Token(r)}
params.Flashes = session.Flashes()
session.Save(r, w)
templates := template.New("template")
_, err := templates.ParseFiles("templates/login.html", "templates/flashes.html")
if err != nil {
log.Error(err)
}
// w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
template.Must(templates, err).ExecuteTemplate(w, "base", params)
}
// Webhooks is an admin-only handler that handles webhooks
func (as *AdminServer) Webhooks(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Webhooks"
getTemplate(w, "webhooks").ExecuteTemplate(w, "base", params)
}
// Impersonate allows an admin to login to a user account without needing the password
func (as *AdminServer) Impersonate(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
username := r.FormValue("username")
u, err := models.GetUserByUsername(username)
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusNotFound)
return
}
session := ctx.Get(r, "session").(*sessions.Session)
session.Values["id"] = u.Id
session.Save(r, w)
}
http.Redirect(w, r, "/", http.StatusFound)
}
// Login handles the authentication flow for a user. If credentials are valid,
// a session is created
func (as *AdminServer) Login(w http.ResponseWriter, r *http.Request) {
params := struct {
User models.User
Title string
Flashes []interface{}
Token string
}{Title: "Login", Token: csrf.Token(r)}
session := ctx.Get(r, "session").(*sessions.Session)
switch {
case r.Method == "GET":
params.Flashes = session.Flashes()
session.Save(r, w)
templates := template.New("template")
_, err := templates.ParseFiles("templates/login.html", "templates/flashes.html")
if err != nil {
log.Error(err)
}
template.Must(templates, err).ExecuteTemplate(w, "base", params)
case r.Method == "POST":
// Find the user with the provided username
username, password := r.FormValue("username"), r.FormValue("password")
u, err := models.GetUserByUsername(username)
if err != nil {
log.Error(err)
as.handleInvalidLogin(w, r)
return
}
// Validate the user's password
err = auth.ValidatePassword(password, u.Hash)
if err != nil {
log.Error(err)
as.handleInvalidLogin(w, r)
return
}
// If we've logged in, save the session and redirect to the dashboard
session.Values["id"] = u.Id
session.Save(r, w)
as.nextOrIndex(w, r)
}
}
// Logout destroys the current user session
func (as *AdminServer) Logout(w http.ResponseWriter, r *http.Request) {
session := ctx.Get(r, "session").(*sessions.Session)
delete(session.Values, "id")
Flash(w, r, "success", "You have successfully logged out")
session.Save(r, w)
http.Redirect(w, r, "/login", http.StatusFound)
}
// ResetPassword handles the password reset flow when a password change is
// required either by the Gophish system or an administrator.
//
// This handler is meant to be used when a user is required to reset their
// password, not just when they want to.
//
// This is an important distinction since in this handler we don't require
// the user to re-enter their current password, as opposed to the flow
// through the settings handler.
//
// To that end, if the user doesn't require a password change, we will
// redirect them to the settings page.
func (as *AdminServer) ResetPassword(w http.ResponseWriter, r *http.Request) {
u := ctx.Get(r, "user").(models.User)
session := ctx.Get(r, "session").(*sessions.Session)
if !u.PasswordChangeRequired {
Flash(w, r, "info", "Please reset your password through the settings page")
session.Save(r, w)
http.Redirect(w, r, "/settings", http.StatusTemporaryRedirect)
return
}
params := newTemplateParams(r)
params.Title = "Reset Password"
switch {
case r.Method == http.MethodGet:
params.Flashes = session.Flashes()
session.Save(r, w)
getTemplate(w, "reset_password").ExecuteTemplate(w, "base", params)
return
case r.Method == http.MethodPost:
newPassword := r.FormValue("password")
confirmPassword := r.FormValue("confirm_password")
newHash, err := auth.ValidatePasswordChange(u.Hash, newPassword, confirmPassword)
if err != nil {
Flash(w, r, "danger", err.Error())
params.Flashes = session.Flashes()
session.Save(r, w)
w.WriteHeader(http.StatusBadRequest)
getTemplate(w, "reset_password").ExecuteTemplate(w, "base", params)
return
}
u.PasswordChangeRequired = false
u.Hash = newHash
if err = models.PutUser(&u); err != nil {
Flash(w, r, "danger", err.Error())
params.Flashes = session.Flashes()
session.Save(r, w)
w.WriteHeader(http.StatusInternalServerError)
getTemplate(w, "reset_password").ExecuteTemplate(w, "base", params)
return
}
// TODO: We probably want to flash a message here that the password was
// changed successfully. The problem is that when the user resets their
// password on first use, they will see two flashes on the dashboard-
// one for their password reset, and one for the "no campaigns created".
//
// The solution to this is to revamp the empty page to be more useful,
// like a wizard or something.
as.nextOrIndex(w, r)
}
}
// TODO: Make this execute the template, too
func getTemplate(w http.ResponseWriter, tmpl string) *template.Template {
templates := template.New("template")
_, err := templates.ParseFiles("templates/base.html", "templates/nav.html", "templates/"+tmpl+".html", "templates/flashes.html")
if err != nil {
log.Error(err)
}
return template.Must(templates, err)
}
// Flash handles the rendering flash messages
func Flash(w http.ResponseWriter, r *http.Request, t string, m string) {
session := ctx.Get(r, "session").(*sessions.Session)
session.AddFlash(models.Flash{
Type: t,
Message: m,
})
}
|
package controllers
import (
"compress/gzip"
"context"
"crypto/tls"
"html/template"
"net/http"
"net/url"
"time"
"github.com/NYTimes/gziphandler"
"github.com/gophish/gophish/auth"
"github.com/gophish/gophish/config"
ctx "github.com/gophish/gophish/context"
"github.com/gophish/gophish/controllers/api"
log "github.com/gophish/gophish/logger"
mid "github.com/gophish/gophish/middleware"
"github.com/gophish/gophish/middleware/ratelimit"
"github.com/gophish/gophish/models"
"github.com/gophish/gophish/util"
"github.com/gophish/gophish/worker"
"github.com/gorilla/csrf"
"github.com/gorilla/handlers"
"github.com/gorilla/mux"
"github.com/gorilla/sessions"
"github.com/jordan-wright/unindexed"
)
// AdminServerOption is a functional option that is used to configure the
// admin server
type AdminServerOption func(*AdminServer)
// AdminServer is an HTTP server that implements the administrative Gophish
// handlers, including the dashboard and REST API.
type AdminServer struct {
server *http.Server
worker worker.Worker
config config.AdminServer
limiter *ratelimit.PostLimiter
}
var defaultTLSConfig = &tls.Config{
PreferServerCipherSuites: true,
CurvePreferences: []tls.CurveID{
tls.X25519,
tls.CurveP256,
},
MinVersion: tls.VersionTLS12,
CipherSuites: []uint16{
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
// Kept for backwards compatibility with some clients
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
},
}
// WithWorker is an option that sets the background worker.
func WithWorker(w worker.Worker) AdminServerOption {
return func(as *AdminServer) {
as.worker = w
}
}
// NewAdminServer returns a new instance of the AdminServer with the
// provided config and options applied.
func NewAdminServer(config config.AdminServer, options ...AdminServerOption) *AdminServer {
defaultWorker, _ := worker.New()
defaultServer := &http.Server{
ReadTimeout: 10 * time.Second,
Addr: config.ListenURL,
}
defaultLimiter := ratelimit.NewPostLimiter()
as := &AdminServer{
worker: defaultWorker,
server: defaultServer,
limiter: defaultLimiter,
config: config,
}
for _, opt := range options {
opt(as)
}
as.registerRoutes()
return as
}
// Start launches the admin server, listening on the configured address.
func (as *AdminServer) Start() {
if as.worker != nil {
go as.worker.Start()
}
if as.config.UseTLS {
// Only support TLS 1.2 and above - ref #1691, #1689
as.server.TLSConfig = defaultTLSConfig
err := util.CheckAndCreateSSL(as.config.CertPath, as.config.KeyPath)
if err != nil {
log.Fatal(err)
}
log.Infof("Starting admin server at https://%s", as.config.ListenURL)
log.Fatal(as.server.ListenAndServeTLS(as.config.CertPath, as.config.KeyPath))
}
// If TLS isn't configured, just listen on HTTP
log.Infof("Starting admin server at http://%s", as.config.ListenURL)
log.Fatal(as.server.ListenAndServe())
}
// Shutdown attempts to gracefully shutdown the server.
func (as *AdminServer) Shutdown() error {
ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
defer cancel()
return as.server.Shutdown(ctx)
}
// SetupAdminRoutes creates the routes for handling requests to the web interface.
// This function returns an http.Handler to be used in http.ListenAndServe().
func (as *AdminServer) registerRoutes() {
router := mux.NewRouter()
// Base Front-end routes
router.HandleFunc("/", mid.Use(as.Base, mid.RequireLogin))
router.HandleFunc("/login", mid.Use(as.Login, as.limiter.Limit))
router.HandleFunc("/logout", mid.Use(as.Logout, mid.RequireLogin))
router.HandleFunc("/reset_password", mid.Use(as.ResetPassword, mid.RequireLogin))
router.HandleFunc("/campaigns", mid.Use(as.Campaigns, mid.RequireLogin))
router.HandleFunc("/campaigns/{id:[0-9]+}", mid.Use(as.CampaignID, mid.RequireLogin))
router.HandleFunc("/templates", mid.Use(as.Templates, mid.RequireLogin))
router.HandleFunc("/groups", mid.Use(as.Groups, mid.RequireLogin))
router.HandleFunc("/landing_pages", mid.Use(as.LandingPages, mid.RequireLogin))
router.HandleFunc("/sending_profiles", mid.Use(as.SendingProfiles, mid.RequireLogin))
router.HandleFunc("/settings", mid.Use(as.Settings, mid.RequireLogin))
router.HandleFunc("/users", mid.Use(as.UserManagement, mid.RequirePermission(models.PermissionModifySystem), mid.RequireLogin))
router.HandleFunc("/webhooks", mid.Use(as.Webhooks, mid.RequirePermission(models.PermissionModifySystem), mid.RequireLogin))
router.HandleFunc("/impersonate", mid.Use(as.Impersonate, mid.RequirePermission(models.PermissionModifySystem), mid.RequireLogin))
// Create the API routes
api := api.NewServer(
api.WithWorker(as.worker),
api.WithLimiter(as.limiter),
)
router.PathPrefix("/api/").Handler(api)
// Setup static file serving
router.PathPrefix("/").Handler(http.FileServer(unindexed.Dir("./static/")))
// Setup CSRF Protection
csrfKey := []byte(as.config.CSRFKey)
if len(csrfKey) == 0 {
csrfKey = []byte(auth.GenerateSecureKey(auth.APIKeyLength))
}
csrfHandler := csrf.Protect(csrfKey,
csrf.FieldName("csrf_token"),
csrf.Secure(as.config.UseTLS))
adminHandler := csrfHandler(router)
adminHandler = mid.Use(adminHandler.ServeHTTP, mid.CSRFExceptions, mid.GetContext, mid.ApplySecurityHeaders)
// Setup GZIP compression
gzipWrapper, _ := gziphandler.NewGzipLevelHandler(gzip.BestCompression)
adminHandler = gzipWrapper(adminHandler)
// Setup logging
adminHandler = handlers.CombinedLoggingHandler(log.Writer(), adminHandler)
as.server.Handler = adminHandler
}
type templateParams struct {
Title string
Flashes []interface{}
User models.User
Token string
Version string
ModifySystem bool
}
// newTemplateParams returns the default template parameters for a user and
// the CSRF token.
func newTemplateParams(r *http.Request) templateParams {
user := ctx.Get(r, "user").(models.User)
session := ctx.Get(r, "session").(*sessions.Session)
modifySystem, _ := user.HasPermission(models.PermissionModifySystem)
return templateParams{
Token: csrf.Token(r),
User: user,
ModifySystem: modifySystem,
Version: config.Version,
Flashes: session.Flashes(),
}
}
// Base handles the default path and template execution
func (as *AdminServer) Base(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Dashboard"
getTemplate(w, "dashboard").ExecuteTemplate(w, "base", params)
}
// Campaigns handles the default path and template execution
func (as *AdminServer) Campaigns(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Campaigns"
getTemplate(w, "campaigns").ExecuteTemplate(w, "base", params)
}
// CampaignID handles the default path and template execution
func (as *AdminServer) CampaignID(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Campaign Results"
getTemplate(w, "campaign_results").ExecuteTemplate(w, "base", params)
}
// Templates handles the default path and template execution
func (as *AdminServer) Templates(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Email Templates"
getTemplate(w, "templates").ExecuteTemplate(w, "base", params)
}
// Groups handles the default path and template execution
func (as *AdminServer) Groups(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Users & Groups"
getTemplate(w, "groups").ExecuteTemplate(w, "base", params)
}
// LandingPages handles the default path and template execution
func (as *AdminServer) LandingPages(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Landing Pages"
getTemplate(w, "landing_pages").ExecuteTemplate(w, "base", params)
}
// SendingProfiles handles the default path and template execution
func (as *AdminServer) SendingProfiles(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Sending Profiles"
getTemplate(w, "sending_profiles").ExecuteTemplate(w, "base", params)
}
// Settings handles the changing of settings
func (as *AdminServer) Settings(w http.ResponseWriter, r *http.Request) {
switch {
case r.Method == "GET":
params := newTemplateParams(r)
params.Title = "Settings"
session := ctx.Get(r, "session").(*sessions.Session)
session.Save(r, w)
getTemplate(w, "settings").ExecuteTemplate(w, "base", params)
case r.Method == "POST":
u := ctx.Get(r, "user").(models.User)
currentPw := r.FormValue("current_password")
newPassword := r.FormValue("new_password")
confirmPassword := r.FormValue("confirm_new_password")
// Check the current password
err := auth.ValidatePassword(currentPw, u.Hash)
msg := models.Response{Success: true, Message: "Settings Updated Successfully"}
if err != nil {
msg.Message = err.Error()
msg.Success = false
api.JSONResponse(w, msg, http.StatusBadRequest)
return
}
newHash, err := auth.ValidatePasswordChange(u.Hash, newPassword, confirmPassword)
if err != nil {
msg.Message = err.Error()
msg.Success = false
api.JSONResponse(w, msg, http.StatusBadRequest)
return
}
u.Hash = string(newHash)
if err = models.PutUser(&u); err != nil {
msg.Message = err.Error()
msg.Success = false
api.JSONResponse(w, msg, http.StatusInternalServerError)
return
}
api.JSONResponse(w, msg, http.StatusOK)
}
}
// UserManagement is an admin-only handler that allows for the registration
// and management of user accounts within Gophish.
func (as *AdminServer) UserManagement(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "User Management"
getTemplate(w, "users").ExecuteTemplate(w, "base", params)
}
func (as *AdminServer) nextOrIndex(w http.ResponseWriter, r *http.Request) {
next := "/"
url, err := url.Parse(r.FormValue("next"))
if err == nil {
path := url.Path
if path != "" {
next = path
}
}
http.Redirect(w, r, next, 302)
}
func (as *AdminServer) handleInvalidLogin(w http.ResponseWriter, r *http.Request) {
session := ctx.Get(r, "session").(*sessions.Session)
Flash(w, r, "danger", "Invalid Username/Password")
params := struct {
User models.User
Title string
Flashes []interface{}
Token string
}{Title: "Login", Token: csrf.Token(r)}
params.Flashes = session.Flashes()
session.Save(r, w)
templates := template.New("template")
_, err := templates.ParseFiles("templates/login.html", "templates/flashes.html")
if err != nil {
log.Error(err)
}
// w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(http.StatusUnauthorized)
template.Must(templates, err).ExecuteTemplate(w, "base", params)
}
// Webhooks is an admin-only handler that handles webhooks
func (as *AdminServer) Webhooks(w http.ResponseWriter, r *http.Request) {
params := newTemplateParams(r)
params.Title = "Webhooks"
getTemplate(w, "webhooks").ExecuteTemplate(w, "base", params)
}
// Impersonate allows an admin to login to a user account without needing the password
func (as *AdminServer) Impersonate(w http.ResponseWriter, r *http.Request) {
if r.Method == "POST" {
username := r.FormValue("username")
u, err := models.GetUserByUsername(username)
if err != nil {
log.Error(err)
http.Error(w, err.Error(), http.StatusNotFound)
return
}
session := ctx.Get(r, "session").(*sessions.Session)
session.Values["id"] = u.Id
session.Save(r, w)
}
http.Redirect(w, r, "/", http.StatusFound)
}
// Login handles the authentication flow for a user. If credentials are valid,
// a session is created
func (as *AdminServer) Login(w http.ResponseWriter, r *http.Request) {
params := struct {
User models.User
Title string
Flashes []interface{}
Token string
}{Title: "Login", Token: csrf.Token(r)}
session := ctx.Get(r, "session").(*sessions.Session)
switch {
case r.Method == "GET":
params.Flashes = session.Flashes()
session.Save(r, w)
templates := template.New("template")
_, err := templates.ParseFiles("templates/login.html", "templates/flashes.html")
if err != nil {
log.Error(err)
}
template.Must(templates, err).ExecuteTemplate(w, "base", params)
case r.Method == "POST":
// Find the user with the provided username
username, password := r.FormValue("username"), r.FormValue("password")
u, err := models.GetUserByUsername(username)
if err != nil {
log.Error(err)
as.handleInvalidLogin(w, r)
return
}
// Validate the user's password
err = auth.ValidatePassword(password, u.Hash)
if err != nil {
log.Error(err)
as.handleInvalidLogin(w, r)
return
}
// If we've logged in, save the session and redirect to the dashboard
session.Values["id"] = u.Id
session.Save(r, w)
as.nextOrIndex(w, r)
}
}
// Logout destroys the current user session
func (as *AdminServer) Logout(w http.ResponseWriter, r *http.Request) {
session := ctx.Get(r, "session").(*sessions.Session)
delete(session.Values, "id")
Flash(w, r, "success", "You have successfully logged out")
session.Save(r, w)
http.Redirect(w, r, "/login", http.StatusFound)
}
// ResetPassword handles the password reset flow when a password change is
// required either by the Gophish system or an administrator.
//
// This handler is meant to be used when a user is required to reset their
// password, not just when they want to.
//
// This is an important distinction since in this handler we don't require
// the user to re-enter their current password, as opposed to the flow
// through the settings handler.
//
// To that end, if the user doesn't require a password change, we will
// redirect them to the settings page.
func (as *AdminServer) ResetPassword(w http.ResponseWriter, r *http.Request) {
u := ctx.Get(r, "user").(models.User)
session := ctx.Get(r, "session").(*sessions.Session)
if !u.PasswordChangeRequired {
Flash(w, r, "info", "Please reset your password through the settings page")
session.Save(r, w)
http.Redirect(w, r, "/settings", http.StatusTemporaryRedirect)
return
}
params := newTemplateParams(r)
params.Title = "Reset Password"
switch {
case r.Method == http.MethodGet:
params.Flashes = session.Flashes()
session.Save(r, w)
getTemplate(w, "reset_password").ExecuteTemplate(w, "base", params)
return
case r.Method == http.MethodPost:
newPassword := r.FormValue("password")
confirmPassword := r.FormValue("confirm_password")
newHash, err := auth.ValidatePasswordChange(u.Hash, newPassword, confirmPassword)
if err != nil {
Flash(w, r, "danger", err.Error())
params.Flashes = session.Flashes()
session.Save(r, w)
w.WriteHeader(http.StatusBadRequest)
getTemplate(w, "reset_password").ExecuteTemplate(w, "base", params)
return
}
u.PasswordChangeRequired = false
u.Hash = newHash
if err = models.PutUser(&u); err != nil {
Flash(w, r, "danger", err.Error())
params.Flashes = session.Flashes()
session.Save(r, w)
w.WriteHeader(http.StatusInternalServerError)
getTemplate(w, "reset_password").ExecuteTemplate(w, "base", params)
return
}
// TODO: We probably want to flash a message here that the password was
// changed successfully. The problem is that when the user resets their
// password on first use, they will see two flashes on the dashboard-
// one for their password reset, and one for the "no campaigns created".
//
// The solution to this is to revamp the empty page to be more useful,
// like a wizard or something.
as.nextOrIndex(w, r)
}
}
// TODO: Make this execute the template, too
func getTemplate(w http.ResponseWriter, tmpl string) *template.Template {
templates := template.New("template")
_, err := templates.ParseFiles("templates/base.html", "templates/nav.html", "templates/"+tmpl+".html", "templates/flashes.html")
if err != nil {
log.Error(err)
}
return template.Must(templates, err)
}
// Flash handles the rendering flash messages
func Flash(w http.ResponseWriter, r *http.Request, t string, m string) {
session := ctx.Get(r, "session").(*sessions.Session)
session.AddFlash(models.Flash{
Type: t,
Message: m,
})
}
|
4282_0
|
crossvul
|
go
|
CWE-1021
|
Improper Restriction of Rendered UI Layers or Frames - A web application is expected to place restrictions on whether it is allowed to be rendered within frames, iframes, objects, embed or applet elements.
|
go
|
package middleware
import (
"encoding/json"
"fmt"
"net/http"
"strings"
ctx "github.com/gophish/gophish/context"
"github.com/gophish/gophish/models"
"github.com/gorilla/csrf"
)
// CSRFExemptPrefixes are a list of routes that are exempt from CSRF protection
var CSRFExemptPrefixes = []string{
"/api",
}
// CSRFExceptions is a middleware that prevents CSRF checks on routes listed in
// CSRFExemptPrefixes.
func CSRFExceptions(handler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
for _, prefix := range CSRFExemptPrefixes {
if strings.HasPrefix(r.URL.Path, prefix) {
r = csrf.UnsafeSkipCheck(r)
break
}
}
handler.ServeHTTP(w, r)
}
}
// Use allows us to stack middleware to process the request
// Example taken from https://github.com/gorilla/mux/pull/36#issuecomment-25849172
func Use(handler http.HandlerFunc, mid ...func(http.Handler) http.HandlerFunc) http.HandlerFunc {
for _, m := range mid {
handler = m(handler)
}
return handler
}
// GetContext wraps each request in a function which fills in the context for a given request.
// This includes setting the User and Session keys and values as necessary for use in later functions.
func GetContext(handler http.Handler) http.HandlerFunc {
// Set the context here
return func(w http.ResponseWriter, r *http.Request) {
// Parse the request form
err := r.ParseForm()
if err != nil {
http.Error(w, "Error parsing request", http.StatusInternalServerError)
}
// Set the context appropriately here.
// Set the session
session, _ := Store.Get(r, "gophish")
// Put the session in the context so that we can
// reuse the values in different handlers
r = ctx.Set(r, "session", session)
if id, ok := session.Values["id"]; ok {
u, err := models.GetUser(id.(int64))
if err != nil {
r = ctx.Set(r, "user", nil)
} else {
r = ctx.Set(r, "user", u)
}
} else {
r = ctx.Set(r, "user", nil)
}
handler.ServeHTTP(w, r)
// Remove context contents
ctx.Clear(r)
}
}
// RequireAPIKey ensures that a valid API key is set as either the api_key GET
// parameter, or a Bearer token.
func RequireAPIKey(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == "OPTIONS" {
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
w.Header().Set("Access-Control-Max-Age", "1000")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
return
}
r.ParseForm()
ak := r.Form.Get("api_key")
// If we can't get the API key, we'll also check for the
// Authorization Bearer token
if ak == "" {
tokens, ok := r.Header["Authorization"]
if ok && len(tokens) >= 1 {
ak = tokens[0]
ak = strings.TrimPrefix(ak, "Bearer ")
}
}
if ak == "" {
JSONError(w, http.StatusUnauthorized, "API Key not set")
return
}
u, err := models.GetUserByAPIKey(ak)
if err != nil {
JSONError(w, http.StatusUnauthorized, "Invalid API Key")
return
}
r = ctx.Set(r, "user", u)
r = ctx.Set(r, "user_id", u.Id)
r = ctx.Set(r, "api_key", ak)
handler.ServeHTTP(w, r)
})
}
// RequireLogin checks to see if the user is currently logged in.
// If not, the function returns a 302 redirect to the login page.
func RequireLogin(handler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if u := ctx.Get(r, "user"); u != nil {
// If a password change is required for the user, then redirect them
// to the login page
currentUser := u.(models.User)
if currentUser.PasswordChangeRequired && r.URL.Path != "/reset_password" {
q := r.URL.Query()
q.Set("next", r.URL.Path)
http.Redirect(w, r, fmt.Sprintf("/reset_password?%s", q.Encode()), http.StatusTemporaryRedirect)
return
}
handler.ServeHTTP(w, r)
return
}
q := r.URL.Query()
q.Set("next", r.URL.Path)
http.Redirect(w, r, fmt.Sprintf("/login?%s", q.Encode()), http.StatusTemporaryRedirect)
}
}
// EnforceViewOnly is a global middleware that limits the ability to edit
// objects to accounts with the PermissionModifyObjects permission.
func EnforceViewOnly(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If the request is for any non-GET HTTP method, e.g. POST, PUT,
// or DELETE, we need to ensure the user has the appropriate
// permission.
if r.Method != http.MethodGet && r.Method != http.MethodHead && r.Method != http.MethodOptions {
user := ctx.Get(r, "user").(models.User)
access, err := user.HasPermission(models.PermissionModifyObjects)
if err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
if !access {
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return
}
}
next.ServeHTTP(w, r)
})
}
// RequirePermission checks to see if the user has the requested permission
// before executing the handler. If the request is unauthorized, a JSONError
// is returned.
func RequirePermission(perm string) func(http.Handler) http.HandlerFunc {
return func(next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
user := ctx.Get(r, "user").(models.User)
access, err := user.HasPermission(perm)
if err != nil {
JSONError(w, http.StatusInternalServerError, err.Error())
return
}
if !access {
JSONError(w, http.StatusForbidden, http.StatusText(http.StatusForbidden))
return
}
next.ServeHTTP(w, r)
}
}
}
// JSONError returns an error in JSON format with the given
// status code and message
func JSONError(w http.ResponseWriter, c int, m string) {
cj, _ := json.MarshalIndent(models.Response{Success: false, Message: m}, "", " ")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(c)
fmt.Fprintf(w, "%s", cj)
}
|
package middleware
import (
"encoding/json"
"fmt"
"net/http"
"strings"
ctx "github.com/gophish/gophish/context"
"github.com/gophish/gophish/models"
"github.com/gorilla/csrf"
)
// CSRFExemptPrefixes are a list of routes that are exempt from CSRF protection
var CSRFExemptPrefixes = []string{
"/api",
}
// CSRFExceptions is a middleware that prevents CSRF checks on routes listed in
// CSRFExemptPrefixes.
func CSRFExceptions(handler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
for _, prefix := range CSRFExemptPrefixes {
if strings.HasPrefix(r.URL.Path, prefix) {
r = csrf.UnsafeSkipCheck(r)
break
}
}
handler.ServeHTTP(w, r)
}
}
// Use allows us to stack middleware to process the request
// Example taken from https://github.com/gorilla/mux/pull/36#issuecomment-25849172
func Use(handler http.HandlerFunc, mid ...func(http.Handler) http.HandlerFunc) http.HandlerFunc {
for _, m := range mid {
handler = m(handler)
}
return handler
}
// GetContext wraps each request in a function which fills in the context for a given request.
// This includes setting the User and Session keys and values as necessary for use in later functions.
func GetContext(handler http.Handler) http.HandlerFunc {
// Set the context here
return func(w http.ResponseWriter, r *http.Request) {
// Parse the request form
err := r.ParseForm()
if err != nil {
http.Error(w, "Error parsing request", http.StatusInternalServerError)
}
// Set the context appropriately here.
// Set the session
session, _ := Store.Get(r, "gophish")
// Put the session in the context so that we can
// reuse the values in different handlers
r = ctx.Set(r, "session", session)
if id, ok := session.Values["id"]; ok {
u, err := models.GetUser(id.(int64))
if err != nil {
r = ctx.Set(r, "user", nil)
} else {
r = ctx.Set(r, "user", u)
}
} else {
r = ctx.Set(r, "user", nil)
}
handler.ServeHTTP(w, r)
// Remove context contents
ctx.Clear(r)
}
}
// RequireAPIKey ensures that a valid API key is set as either the api_key GET
// parameter, or a Bearer token.
func RequireAPIKey(handler http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Access-Control-Allow-Origin", "*")
if r.Method == "OPTIONS" {
w.Header().Set("Access-Control-Allow-Methods", "POST, GET, OPTIONS")
w.Header().Set("Access-Control-Max-Age", "1000")
w.Header().Set("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept")
return
}
r.ParseForm()
ak := r.Form.Get("api_key")
// If we can't get the API key, we'll also check for the
// Authorization Bearer token
if ak == "" {
tokens, ok := r.Header["Authorization"]
if ok && len(tokens) >= 1 {
ak = tokens[0]
ak = strings.TrimPrefix(ak, "Bearer ")
}
}
if ak == "" {
JSONError(w, http.StatusUnauthorized, "API Key not set")
return
}
u, err := models.GetUserByAPIKey(ak)
if err != nil {
JSONError(w, http.StatusUnauthorized, "Invalid API Key")
return
}
r = ctx.Set(r, "user", u)
r = ctx.Set(r, "user_id", u.Id)
r = ctx.Set(r, "api_key", ak)
handler.ServeHTTP(w, r)
})
}
// RequireLogin checks to see if the user is currently logged in.
// If not, the function returns a 302 redirect to the login page.
func RequireLogin(handler http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if u := ctx.Get(r, "user"); u != nil {
// If a password change is required for the user, then redirect them
// to the login page
currentUser := u.(models.User)
if currentUser.PasswordChangeRequired && r.URL.Path != "/reset_password" {
q := r.URL.Query()
q.Set("next", r.URL.Path)
http.Redirect(w, r, fmt.Sprintf("/reset_password?%s", q.Encode()), http.StatusTemporaryRedirect)
return
}
handler.ServeHTTP(w, r)
return
}
q := r.URL.Query()
q.Set("next", r.URL.Path)
http.Redirect(w, r, fmt.Sprintf("/login?%s", q.Encode()), http.StatusTemporaryRedirect)
}
}
// EnforceViewOnly is a global middleware that limits the ability to edit
// objects to accounts with the PermissionModifyObjects permission.
func EnforceViewOnly(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// If the request is for any non-GET HTTP method, e.g. POST, PUT,
// or DELETE, we need to ensure the user has the appropriate
// permission.
if r.Method != http.MethodGet && r.Method != http.MethodHead && r.Method != http.MethodOptions {
user := ctx.Get(r, "user").(models.User)
access, err := user.HasPermission(models.PermissionModifyObjects)
if err != nil {
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
return
}
if !access {
http.Error(w, http.StatusText(http.StatusForbidden), http.StatusForbidden)
return
}
}
next.ServeHTTP(w, r)
})
}
// RequirePermission checks to see if the user has the requested permission
// before executing the handler. If the request is unauthorized, a JSONError
// is returned.
func RequirePermission(perm string) func(http.Handler) http.HandlerFunc {
return func(next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
user := ctx.Get(r, "user").(models.User)
access, err := user.HasPermission(perm)
if err != nil {
JSONError(w, http.StatusInternalServerError, err.Error())
return
}
if !access {
JSONError(w, http.StatusForbidden, http.StatusText(http.StatusForbidden))
return
}
next.ServeHTTP(w, r)
}
}
}
// ApplySecurityHeaders applies various security headers according to best-
// practices.
func ApplySecurityHeaders(next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
csp := "frame-ancestors 'none';"
w.Header().Set("Content-Security-Policy", csp)
w.Header().Set("X-Frame-Options", "DENY")
next.ServeHTTP(w, r)
}
}
// JSONError returns an error in JSON format with the given
// status code and message
func JSONError(w http.ResponseWriter, c int, m string) {
cj, _ := json.MarshalIndent(models.Response{Success: false, Message: m}, "", " ")
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(c)
fmt.Fprintf(w, "%s", cj)
}
|
4282_1
|
crossvul
|
go
|
CWE-1021
|
Improper Restriction of Rendered UI Layers or Frames - A web application is expected to place restrictions on whether it is allowed to be rendered within frames, iframes, objects, embed or applet elements.
|
php
|
<?php
/*
* LimeSurvey
* Copyright (C) 2007-2016 The LimeSurvey Project Team / Carsten Schmitz
* All rights reserved.
* License: GNU/GPL License v3 or later, see LICENSE.php
* LimeSurvey is free software. This version may have been modified pursuant
* to the GNU General Public License, and as distributed it includes or
* is derivative of works licensed under the GNU General Public License or
* other free or open source software licenses.
* See COPYRIGHT.php for copyright notices and details.
*/
$config['versionnumber'] = '3.17.13';
$config['dbversionnumber'] = 359;
$config['buildnumber'] = '';
$config['updatable'] = true;
$config['assetsversionnumber'] = '30095';
return $config;
|
<?php
/*
* LimeSurvey
* Copyright (C) 2007-2016 The LimeSurvey Project Team / Carsten Schmitz
* All rights reserved.
* License: GNU/GPL License v3 or later, see LICENSE.php
* LimeSurvey is free software. This version may have been modified pursuant
* to the GNU General Public License, and as distributed it includes or
* is derivative of works licensed under the GNU General Public License or
* other free or open source software licenses.
* See COPYRIGHT.php for copyright notices and details.
*/
$config['versionnumber'] = '3.17.14';
$config['dbversionnumber'] = 359;
$config['buildnumber'] = '';
$config['updatable'] = true;
$config['assetsversionnumber'] = '30096';
return $config;
|
1075_0
|
crossvul
|
php
|
CWE-113
|
Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Request/Response Splitting') - HTTP agents or components may include a web server, load balancer, reverse proxy, web caching proxy, application firewall, web browser, etc.
|
javascript
|
'use strict';
const util = require('util');
const net = require('net');
const HTTPParser = process.binding('http_parser').HTTPParser;
const assert = require('assert').ok;
const common = require('_http_common');
const parsers = common.parsers;
const freeParser = common.freeParser;
const debug = common.debug;
const CRLF = common.CRLF;
const continueExpression = common.continueExpression;
const chunkExpression = common.chunkExpression;
const httpSocketSetup = common.httpSocketSetup;
const OutgoingMessage = require('_http_outgoing').OutgoingMessage;
const STATUS_CODES = exports.STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing', // RFC 2518, obsoleted by RFC 4918
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi-Status', // RFC 4918
208: 'Already Reported',
226: 'IM Used',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
308: 'Permanent Redirect', // RFC 7238
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Payload Too Large',
414: 'URI Too Long',
415: 'Unsupported Media Type',
416: 'Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', // RFC 2324
421: 'Misdirected Request',
422: 'Unprocessable Entity', // RFC 4918
423: 'Locked', // RFC 4918
424: 'Failed Dependency', // RFC 4918
425: 'Unordered Collection', // RFC 4918
426: 'Upgrade Required', // RFC 2817
428: 'Precondition Required', // RFC 6585
429: 'Too Many Requests', // RFC 6585
431: 'Request Header Fields Too Large', // RFC 6585
451: 'Unavailable For Legal Reasons',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates', // RFC 2295
507: 'Insufficient Storage', // RFC 4918
508: 'Loop Detected',
509: 'Bandwidth Limit Exceeded',
510: 'Not Extended', // RFC 2774
511: 'Network Authentication Required' // RFC 6585
};
const kOnExecute = HTTPParser.kOnExecute | 0;
function ServerResponse(req) {
OutgoingMessage.call(this);
if (req.method === 'HEAD') this._hasBody = false;
this.sendDate = true;
if (req.httpVersionMajor < 1 || req.httpVersionMinor < 1) {
this.useChunkedEncodingByDefault = chunkExpression.test(req.headers.te);
this.shouldKeepAlive = false;
}
}
util.inherits(ServerResponse, OutgoingMessage);
ServerResponse.prototype._finish = function() {
DTRACE_HTTP_SERVER_RESPONSE(this.connection);
LTTNG_HTTP_SERVER_RESPONSE(this.connection);
COUNTER_HTTP_SERVER_RESPONSE();
OutgoingMessage.prototype._finish.call(this);
};
exports.ServerResponse = ServerResponse;
ServerResponse.prototype.statusCode = 200;
ServerResponse.prototype.statusMessage = undefined;
function onServerResponseClose() {
// EventEmitter.emit makes a copy of the 'close' listeners array before
// calling the listeners. detachSocket() unregisters onServerResponseClose
// but if detachSocket() is called, directly or indirectly, by a 'close'
// listener, onServerResponseClose is still in that copy of the listeners
// array. That is, in the example below, b still gets called even though
// it's been removed by a:
//
// var EventEmitter = require('events');
// var obj = new EventEmitter();
// obj.on('event', a);
// obj.on('event', b);
// function a() { obj.removeListener('event', b) }
// function b() { throw "BAM!" }
// obj.emit('event'); // throws
//
// Ergo, we need to deal with stale 'close' events and handle the case
// where the ServerResponse object has already been deconstructed.
// Fortunately, that requires only a single if check. :-)
if (this._httpMessage) this._httpMessage.emit('close');
}
ServerResponse.prototype.assignSocket = function(socket) {
assert(!socket._httpMessage);
socket._httpMessage = this;
socket.on('close', onServerResponseClose);
this.socket = socket;
this.connection = socket;
this.emit('socket', socket);
this._flush();
};
ServerResponse.prototype.detachSocket = function(socket) {
assert(socket._httpMessage === this);
socket.removeListener('close', onServerResponseClose);
socket._httpMessage = null;
this.socket = this.connection = null;
};
ServerResponse.prototype.writeContinue = function(cb) {
this._writeRaw('HTTP/1.1 100 Continue' + CRLF + CRLF, 'ascii', cb);
this._sent100 = true;
};
ServerResponse.prototype._implicitHeader = function() {
this.writeHead(this.statusCode);
};
ServerResponse.prototype.writeHead = function(statusCode, reason, obj) {
var headers;
if (typeof reason === 'string') {
// writeHead(statusCode, reasonPhrase[, headers])
this.statusMessage = reason;
} else {
// writeHead(statusCode[, headers])
this.statusMessage =
this.statusMessage || STATUS_CODES[statusCode] || 'unknown';
obj = reason;
}
this.statusCode = statusCode;
if (this._headers) {
// Slow-case: when progressive API and header fields are passed.
if (obj) {
var keys = Object.keys(obj);
for (var i = 0; i < keys.length; i++) {
var k = keys[i];
if (k) this.setHeader(k, obj[k]);
}
}
// only progressive api is used
headers = this._renderHeaders();
} else {
// only writeHead() called
headers = obj;
}
statusCode |= 0;
if (statusCode < 100 || statusCode > 999)
throw new RangeError(`Invalid status code: ${statusCode}`);
var statusLine = 'HTTP/1.1 ' + statusCode.toString() + ' ' +
this.statusMessage + CRLF;
if (statusCode === 204 || statusCode === 304 ||
(100 <= statusCode && statusCode <= 199)) {
// RFC 2616, 10.2.5:
// The 204 response MUST NOT include a message-body, and thus is always
// terminated by the first empty line after the header fields.
// RFC 2616, 10.3.5:
// The 304 response MUST NOT contain a message-body, and thus is always
// terminated by the first empty line after the header fields.
// RFC 2616, 10.1 Informational 1xx:
// This class of status code indicates a provisional response,
// consisting only of the Status-Line and optional headers, and is
// terminated by an empty line.
this._hasBody = false;
}
// don't keep alive connections where the client expects 100 Continue
// but we sent a final status; they may put extra bytes on the wire.
if (this._expect_continue && !this._sent100) {
this.shouldKeepAlive = false;
}
this._storeHeader(statusLine, headers);
};
ServerResponse.prototype.writeHeader = function() {
this.writeHead.apply(this, arguments);
};
function Server(requestListener) {
if (!(this instanceof Server)) return new Server(requestListener);
net.Server.call(this, { allowHalfOpen: true });
if (requestListener) {
this.addListener('request', requestListener);
}
/* eslint-disable max-len */
// Similar option to this. Too lazy to write my own docs.
// http://www.squid-cache.org/Doc/config/half_closed_clients/
// http://wiki.squid-cache.org/SquidFaq/InnerWorkings#What_is_a_half-closed_filedescriptor.3F
/* eslint-enable max-len */
this.httpAllowHalfOpen = false;
this.addListener('connection', connectionListener);
this.timeout = 2 * 60 * 1000;
this._pendingResponseData = 0;
}
util.inherits(Server, net.Server);
Server.prototype.setTimeout = function(msecs, callback) {
this.timeout = msecs;
if (callback)
this.on('timeout', callback);
return this;
};
exports.Server = Server;
function connectionListener(socket) {
var self = this;
var outgoing = [];
var incoming = [];
var outgoingData = 0;
function updateOutgoingData(delta) {
// `outgoingData` is an approximate amount of bytes queued through all
// inactive responses. If more data than the high watermark is queued - we
// need to pause TCP socket/HTTP parser, and wait until the data will be
// sent to the client.
outgoingData += delta;
if (socket._paused && outgoingData < socket._writableState.highWaterMark)
return socketOnDrain();
}
function abortIncoming() {
while (incoming.length) {
var req = incoming.shift();
req.emit('aborted');
req.emit('close');
}
// abort socket._httpMessage ?
}
function serverSocketCloseListener() {
debug('server socket close');
// mark this parser as reusable
if (this.parser) {
freeParser(this.parser, null, this);
}
abortIncoming();
}
debug('SERVER new http connection');
httpSocketSetup(socket);
// If the user has added a listener to the server,
// request, or response, then it's their responsibility.
// otherwise, destroy on timeout by default
if (self.timeout)
socket.setTimeout(self.timeout);
socket.on('timeout', function() {
var req = socket.parser && socket.parser.incoming;
var reqTimeout = req && !req.complete && req.emit('timeout', socket);
var res = socket._httpMessage;
var resTimeout = res && res.emit('timeout', socket);
var serverTimeout = self.emit('timeout', socket);
if (!reqTimeout && !resTimeout && !serverTimeout)
socket.destroy();
});
var parser = parsers.alloc();
parser.reinitialize(HTTPParser.REQUEST);
parser.socket = socket;
socket.parser = parser;
parser.incoming = null;
// Propagate headers limit from server instance to parser
if (typeof this.maxHeadersCount === 'number') {
parser.maxHeaderPairs = this.maxHeadersCount << 1;
} else {
// Set default value because parser may be reused from FreeList
parser.maxHeaderPairs = 2000;
}
socket.addListener('error', socketOnError);
socket.addListener('close', serverSocketCloseListener);
parser.onIncoming = parserOnIncoming;
socket.on('end', socketOnEnd);
socket.on('data', socketOnData);
// We are consuming socket, so it won't get any actual data
socket.on('resume', onSocketResume);
socket.on('pause', onSocketPause);
socket.on('drain', socketOnDrain);
// Override on to unconsume on `data`, `readable` listeners
socket.on = socketOnWrap;
var external = socket._handle._externalStream;
if (external) {
parser._consumed = true;
parser.consume(external);
}
external = null;
parser[kOnExecute] = onParserExecute;
// TODO(isaacs): Move all these functions out of here
function socketOnError(e) {
// Ignore further errors
this.removeListener('error', socketOnError);
this.on('error', () => {});
if (!self.emit('clientError', e, this))
this.destroy(e);
}
function socketOnData(d) {
assert(!socket._paused);
debug('SERVER socketOnData %d', d.length);
var ret = parser.execute(d);
onParserExecuteCommon(ret, d);
}
function onParserExecute(ret, d) {
socket._unrefTimer();
debug('SERVER socketOnParserExecute %d', ret);
onParserExecuteCommon(ret, undefined);
}
function onParserExecuteCommon(ret, d) {
if (ret instanceof Error) {
debug('parse error');
socketOnError.call(socket, ret);
} else if (parser.incoming && parser.incoming.upgrade) {
// Upgrade or CONNECT
var bytesParsed = ret;
var req = parser.incoming;
debug('SERVER upgrade or connect', req.method);
if (!d)
d = parser.getCurrentBuffer();
socket.removeListener('data', socketOnData);
socket.removeListener('end', socketOnEnd);
socket.removeListener('close', serverSocketCloseListener);
unconsume(parser, socket);
parser.finish();
freeParser(parser, req, null);
parser = null;
var eventName = req.method === 'CONNECT' ? 'connect' : 'upgrade';
if (self.listenerCount(eventName) > 0) {
debug('SERVER have listener for %s', eventName);
var bodyHead = d.slice(bytesParsed, d.length);
// TODO(isaacs): Need a way to reset a stream to fresh state
// IE, not flowing, and not explicitly paused.
socket._readableState.flowing = null;
self.emit(eventName, req, socket, bodyHead);
} else {
// Got upgrade header or CONNECT method, but have no handler.
socket.destroy();
}
}
if (socket._paused && socket.parser) {
// onIncoming paused the socket, we should pause the parser as well
debug('pause parser');
socket.parser.pause();
}
}
function socketOnEnd() {
var socket = this;
var ret = parser.finish();
if (ret instanceof Error) {
debug('parse error');
socketOnError.call(socket, ret);
return;
}
if (!self.httpAllowHalfOpen) {
abortIncoming();
if (socket.writable) socket.end();
} else if (outgoing.length) {
outgoing[outgoing.length - 1]._last = true;
} else if (socket._httpMessage) {
socket._httpMessage._last = true;
} else {
if (socket.writable) socket.end();
}
}
// The following callback is issued after the headers have been read on a
// new message. In this callback we setup the response object and pass it
// to the user.
socket._paused = false;
function socketOnDrain() {
var needPause = outgoingData > socket._writableState.highWaterMark;
// If we previously paused, then start reading again.
if (socket._paused && !needPause) {
socket._paused = false;
if (socket.parser)
socket.parser.resume();
socket.resume();
}
}
function parserOnIncoming(req, shouldKeepAlive) {
incoming.push(req);
// If the writable end isn't consuming, then stop reading
// so that we don't become overwhelmed by a flood of
// pipelined requests that may never be resolved.
if (!socket._paused) {
var needPause = socket._writableState.needDrain ||
outgoingData >= socket._writableState.highWaterMark;
if (needPause) {
socket._paused = true;
// We also need to pause the parser, but don't do that until after
// the call to execute, because we may still be processing the last
// chunk.
socket.pause();
}
}
var res = new ServerResponse(req);
res._onPendingData = updateOutgoingData;
res.shouldKeepAlive = shouldKeepAlive;
DTRACE_HTTP_SERVER_REQUEST(req, socket);
LTTNG_HTTP_SERVER_REQUEST(req, socket);
COUNTER_HTTP_SERVER_REQUEST();
if (socket._httpMessage) {
// There are already pending outgoing res, append.
outgoing.push(res);
} else {
res.assignSocket(socket);
}
// When we're finished writing the response, check if this is the last
// response, if so destroy the socket.
res.on('finish', resOnFinish);
function resOnFinish() {
// Usually the first incoming element should be our request. it may
// be that in the case abortIncoming() was called that the incoming
// array will be empty.
assert(incoming.length === 0 || incoming[0] === req);
incoming.shift();
// if the user never called req.read(), and didn't pipe() or
// .resume() or .on('data'), then we call req._dump() so that the
// bytes will be pulled off the wire.
if (!req._consuming && !req._readableState.resumeScheduled)
req._dump();
res.detachSocket(socket);
if (res._last) {
socket.destroySoon();
} else {
// start sending the next message
var m = outgoing.shift();
if (m) {
m.assignSocket(socket);
}
}
}
if (req.headers.expect !== undefined &&
(req.httpVersionMajor == 1 && req.httpVersionMinor == 1)) {
if (continueExpression.test(req.headers.expect)) {
res._expect_continue = true;
if (self.listenerCount('checkContinue') > 0) {
self.emit('checkContinue', req, res);
} else {
res.writeContinue();
self.emit('request', req, res);
}
} else {
if (self.listenerCount('checkExpectation') > 0) {
self.emit('checkExpectation', req, res);
} else {
res.writeHead(417);
res.end();
}
}
} else {
self.emit('request', req, res);
}
return false; // Not a HEAD response. (Not even a response!)
}
}
exports._connectionListener = connectionListener;
function onSocketResume() {
// It may seem that the socket is resumed, but this is an enemy's trick to
// deceive us! `resume` is emitted asynchronously, and may be called from
// `incoming.readStart()`. Stop the socket again here, just to preserve the
// state.
//
// We don't care about stream semantics for the consumed socket anyway.
if (this._paused) {
this.pause();
return;
}
if (this._handle && !this._handle.reading) {
this._handle.reading = true;
this._handle.readStart();
}
}
function onSocketPause() {
if (this._handle && this._handle.reading) {
this._handle.reading = false;
this._handle.readStop();
}
}
function unconsume(parser, socket) {
if (socket._handle) {
if (parser._consumed)
parser.unconsume(socket._handle._externalStream);
parser._consumed = false;
socket.removeListener('pause', onSocketPause);
socket.removeListener('resume', onSocketResume);
}
}
function socketOnWrap(ev, fn) {
var res = net.Socket.prototype.on.call(this, ev, fn);
if (!this.parser) {
this.on = net.Socket.prototype.on;
return res;
}
if (ev === 'data' || ev === 'readable')
unconsume(this.parser, this);
return res;
}
|
'use strict';
const util = require('util');
const net = require('net');
const HTTPParser = process.binding('http_parser').HTTPParser;
const assert = require('assert').ok;
const common = require('_http_common');
const parsers = common.parsers;
const freeParser = common.freeParser;
const debug = common.debug;
const CRLF = common.CRLF;
const continueExpression = common.continueExpression;
const chunkExpression = common.chunkExpression;
const httpSocketSetup = common.httpSocketSetup;
const OutgoingMessage = require('_http_outgoing').OutgoingMessage;
const STATUS_CODES = exports.STATUS_CODES = {
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing', // RFC 2518, obsoleted by RFC 4918
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi-Status', // RFC 4918
208: 'Already Reported',
226: 'IM Used',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
308: 'Permanent Redirect', // RFC 7238
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Payload Too Large',
414: 'URI Too Long',
415: 'Unsupported Media Type',
416: 'Range Not Satisfiable',
417: 'Expectation Failed',
418: 'I\'m a teapot', // RFC 2324
421: 'Misdirected Request',
422: 'Unprocessable Entity', // RFC 4918
423: 'Locked', // RFC 4918
424: 'Failed Dependency', // RFC 4918
425: 'Unordered Collection', // RFC 4918
426: 'Upgrade Required', // RFC 2817
428: 'Precondition Required', // RFC 6585
429: 'Too Many Requests', // RFC 6585
431: 'Request Header Fields Too Large', // RFC 6585
451: 'Unavailable For Legal Reasons',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
506: 'Variant Also Negotiates', // RFC 2295
507: 'Insufficient Storage', // RFC 4918
508: 'Loop Detected',
509: 'Bandwidth Limit Exceeded',
510: 'Not Extended', // RFC 2774
511: 'Network Authentication Required' // RFC 6585
};
const kOnExecute = HTTPParser.kOnExecute | 0;
function ServerResponse(req) {
OutgoingMessage.call(this);
if (req.method === 'HEAD') this._hasBody = false;
this.sendDate = true;
if (req.httpVersionMajor < 1 || req.httpVersionMinor < 1) {
this.useChunkedEncodingByDefault = chunkExpression.test(req.headers.te);
this.shouldKeepAlive = false;
}
}
util.inherits(ServerResponse, OutgoingMessage);
ServerResponse.prototype._finish = function() {
DTRACE_HTTP_SERVER_RESPONSE(this.connection);
LTTNG_HTTP_SERVER_RESPONSE(this.connection);
COUNTER_HTTP_SERVER_RESPONSE();
OutgoingMessage.prototype._finish.call(this);
};
exports.ServerResponse = ServerResponse;
ServerResponse.prototype.statusCode = 200;
ServerResponse.prototype.statusMessage = undefined;
function onServerResponseClose() {
// EventEmitter.emit makes a copy of the 'close' listeners array before
// calling the listeners. detachSocket() unregisters onServerResponseClose
// but if detachSocket() is called, directly or indirectly, by a 'close'
// listener, onServerResponseClose is still in that copy of the listeners
// array. That is, in the example below, b still gets called even though
// it's been removed by a:
//
// var EventEmitter = require('events');
// var obj = new EventEmitter();
// obj.on('event', a);
// obj.on('event', b);
// function a() { obj.removeListener('event', b) }
// function b() { throw "BAM!" }
// obj.emit('event'); // throws
//
// Ergo, we need to deal with stale 'close' events and handle the case
// where the ServerResponse object has already been deconstructed.
// Fortunately, that requires only a single if check. :-)
if (this._httpMessage) this._httpMessage.emit('close');
}
ServerResponse.prototype.assignSocket = function(socket) {
assert(!socket._httpMessage);
socket._httpMessage = this;
socket.on('close', onServerResponseClose);
this.socket = socket;
this.connection = socket;
this.emit('socket', socket);
this._flush();
};
ServerResponse.prototype.detachSocket = function(socket) {
assert(socket._httpMessage === this);
socket.removeListener('close', onServerResponseClose);
socket._httpMessage = null;
this.socket = this.connection = null;
};
ServerResponse.prototype.writeContinue = function(cb) {
this._writeRaw('HTTP/1.1 100 Continue' + CRLF + CRLF, 'ascii', cb);
this._sent100 = true;
};
ServerResponse.prototype._implicitHeader = function() {
this.writeHead(this.statusCode);
};
ServerResponse.prototype.writeHead = function(statusCode, reason, obj) {
var headers;
if (typeof reason === 'string') {
// writeHead(statusCode, reasonPhrase[, headers])
this.statusMessage = reason;
} else {
// writeHead(statusCode[, headers])
this.statusMessage =
this.statusMessage || STATUS_CODES[statusCode] || 'unknown';
obj = reason;
}
this.statusCode = statusCode;
if (this._headers) {
// Slow-case: when progressive API and header fields are passed.
if (obj) {
var keys = Object.keys(obj);
for (var i = 0; i < keys.length; i++) {
var k = keys[i];
if (k) this.setHeader(k, obj[k]);
}
}
// only progressive api is used
headers = this._renderHeaders();
} else {
// only writeHead() called
headers = obj;
}
statusCode |= 0;
if (statusCode < 100 || statusCode > 999)
throw new RangeError(`Invalid status code: ${statusCode}`);
if (common._checkInvalidHeaderChar(this.statusMessage))
throw new Error('Invalid character in statusMessage.');
var statusLine = 'HTTP/1.1 ' + statusCode.toString() + ' ' +
this.statusMessage + CRLF;
if (statusCode === 204 || statusCode === 304 ||
(100 <= statusCode && statusCode <= 199)) {
// RFC 2616, 10.2.5:
// The 204 response MUST NOT include a message-body, and thus is always
// terminated by the first empty line after the header fields.
// RFC 2616, 10.3.5:
// The 304 response MUST NOT contain a message-body, and thus is always
// terminated by the first empty line after the header fields.
// RFC 2616, 10.1 Informational 1xx:
// This class of status code indicates a provisional response,
// consisting only of the Status-Line and optional headers, and is
// terminated by an empty line.
this._hasBody = false;
}
// don't keep alive connections where the client expects 100 Continue
// but we sent a final status; they may put extra bytes on the wire.
if (this._expect_continue && !this._sent100) {
this.shouldKeepAlive = false;
}
this._storeHeader(statusLine, headers);
};
ServerResponse.prototype.writeHeader = function() {
this.writeHead.apply(this, arguments);
};
function Server(requestListener) {
if (!(this instanceof Server)) return new Server(requestListener);
net.Server.call(this, { allowHalfOpen: true });
if (requestListener) {
this.addListener('request', requestListener);
}
/* eslint-disable max-len */
// Similar option to this. Too lazy to write my own docs.
// http://www.squid-cache.org/Doc/config/half_closed_clients/
// http://wiki.squid-cache.org/SquidFaq/InnerWorkings#What_is_a_half-closed_filedescriptor.3F
/* eslint-enable max-len */
this.httpAllowHalfOpen = false;
this.addListener('connection', connectionListener);
this.timeout = 2 * 60 * 1000;
this._pendingResponseData = 0;
}
util.inherits(Server, net.Server);
Server.prototype.setTimeout = function(msecs, callback) {
this.timeout = msecs;
if (callback)
this.on('timeout', callback);
return this;
};
exports.Server = Server;
function connectionListener(socket) {
var self = this;
var outgoing = [];
var incoming = [];
var outgoingData = 0;
function updateOutgoingData(delta) {
// `outgoingData` is an approximate amount of bytes queued through all
// inactive responses. If more data than the high watermark is queued - we
// need to pause TCP socket/HTTP parser, and wait until the data will be
// sent to the client.
outgoingData += delta;
if (socket._paused && outgoingData < socket._writableState.highWaterMark)
return socketOnDrain();
}
function abortIncoming() {
while (incoming.length) {
var req = incoming.shift();
req.emit('aborted');
req.emit('close');
}
// abort socket._httpMessage ?
}
function serverSocketCloseListener() {
debug('server socket close');
// mark this parser as reusable
if (this.parser) {
freeParser(this.parser, null, this);
}
abortIncoming();
}
debug('SERVER new http connection');
httpSocketSetup(socket);
// If the user has added a listener to the server,
// request, or response, then it's their responsibility.
// otherwise, destroy on timeout by default
if (self.timeout)
socket.setTimeout(self.timeout);
socket.on('timeout', function() {
var req = socket.parser && socket.parser.incoming;
var reqTimeout = req && !req.complete && req.emit('timeout', socket);
var res = socket._httpMessage;
var resTimeout = res && res.emit('timeout', socket);
var serverTimeout = self.emit('timeout', socket);
if (!reqTimeout && !resTimeout && !serverTimeout)
socket.destroy();
});
var parser = parsers.alloc();
parser.reinitialize(HTTPParser.REQUEST);
parser.socket = socket;
socket.parser = parser;
parser.incoming = null;
// Propagate headers limit from server instance to parser
if (typeof this.maxHeadersCount === 'number') {
parser.maxHeaderPairs = this.maxHeadersCount << 1;
} else {
// Set default value because parser may be reused from FreeList
parser.maxHeaderPairs = 2000;
}
socket.addListener('error', socketOnError);
socket.addListener('close', serverSocketCloseListener);
parser.onIncoming = parserOnIncoming;
socket.on('end', socketOnEnd);
socket.on('data', socketOnData);
// We are consuming socket, so it won't get any actual data
socket.on('resume', onSocketResume);
socket.on('pause', onSocketPause);
socket.on('drain', socketOnDrain);
// Override on to unconsume on `data`, `readable` listeners
socket.on = socketOnWrap;
var external = socket._handle._externalStream;
if (external) {
parser._consumed = true;
parser.consume(external);
}
external = null;
parser[kOnExecute] = onParserExecute;
// TODO(isaacs): Move all these functions out of here
function socketOnError(e) {
// Ignore further errors
this.removeListener('error', socketOnError);
this.on('error', () => {});
if (!self.emit('clientError', e, this))
this.destroy(e);
}
function socketOnData(d) {
assert(!socket._paused);
debug('SERVER socketOnData %d', d.length);
var ret = parser.execute(d);
onParserExecuteCommon(ret, d);
}
function onParserExecute(ret, d) {
socket._unrefTimer();
debug('SERVER socketOnParserExecute %d', ret);
onParserExecuteCommon(ret, undefined);
}
function onParserExecuteCommon(ret, d) {
if (ret instanceof Error) {
debug('parse error');
socketOnError.call(socket, ret);
} else if (parser.incoming && parser.incoming.upgrade) {
// Upgrade or CONNECT
var bytesParsed = ret;
var req = parser.incoming;
debug('SERVER upgrade or connect', req.method);
if (!d)
d = parser.getCurrentBuffer();
socket.removeListener('data', socketOnData);
socket.removeListener('end', socketOnEnd);
socket.removeListener('close', serverSocketCloseListener);
unconsume(parser, socket);
parser.finish();
freeParser(parser, req, null);
parser = null;
var eventName = req.method === 'CONNECT' ? 'connect' : 'upgrade';
if (self.listenerCount(eventName) > 0) {
debug('SERVER have listener for %s', eventName);
var bodyHead = d.slice(bytesParsed, d.length);
// TODO(isaacs): Need a way to reset a stream to fresh state
// IE, not flowing, and not explicitly paused.
socket._readableState.flowing = null;
self.emit(eventName, req, socket, bodyHead);
} else {
// Got upgrade header or CONNECT method, but have no handler.
socket.destroy();
}
}
if (socket._paused && socket.parser) {
// onIncoming paused the socket, we should pause the parser as well
debug('pause parser');
socket.parser.pause();
}
}
function socketOnEnd() {
var socket = this;
var ret = parser.finish();
if (ret instanceof Error) {
debug('parse error');
socketOnError.call(socket, ret);
return;
}
if (!self.httpAllowHalfOpen) {
abortIncoming();
if (socket.writable) socket.end();
} else if (outgoing.length) {
outgoing[outgoing.length - 1]._last = true;
} else if (socket._httpMessage) {
socket._httpMessage._last = true;
} else {
if (socket.writable) socket.end();
}
}
// The following callback is issued after the headers have been read on a
// new message. In this callback we setup the response object and pass it
// to the user.
socket._paused = false;
function socketOnDrain() {
var needPause = outgoingData > socket._writableState.highWaterMark;
// If we previously paused, then start reading again.
if (socket._paused && !needPause) {
socket._paused = false;
if (socket.parser)
socket.parser.resume();
socket.resume();
}
}
function parserOnIncoming(req, shouldKeepAlive) {
incoming.push(req);
// If the writable end isn't consuming, then stop reading
// so that we don't become overwhelmed by a flood of
// pipelined requests that may never be resolved.
if (!socket._paused) {
var needPause = socket._writableState.needDrain ||
outgoingData >= socket._writableState.highWaterMark;
if (needPause) {
socket._paused = true;
// We also need to pause the parser, but don't do that until after
// the call to execute, because we may still be processing the last
// chunk.
socket.pause();
}
}
var res = new ServerResponse(req);
res._onPendingData = updateOutgoingData;
res.shouldKeepAlive = shouldKeepAlive;
DTRACE_HTTP_SERVER_REQUEST(req, socket);
LTTNG_HTTP_SERVER_REQUEST(req, socket);
COUNTER_HTTP_SERVER_REQUEST();
if (socket._httpMessage) {
// There are already pending outgoing res, append.
outgoing.push(res);
} else {
res.assignSocket(socket);
}
// When we're finished writing the response, check if this is the last
// response, if so destroy the socket.
res.on('finish', resOnFinish);
function resOnFinish() {
// Usually the first incoming element should be our request. it may
// be that in the case abortIncoming() was called that the incoming
// array will be empty.
assert(incoming.length === 0 || incoming[0] === req);
incoming.shift();
// if the user never called req.read(), and didn't pipe() or
// .resume() or .on('data'), then we call req._dump() so that the
// bytes will be pulled off the wire.
if (!req._consuming && !req._readableState.resumeScheduled)
req._dump();
res.detachSocket(socket);
if (res._last) {
socket.destroySoon();
} else {
// start sending the next message
var m = outgoing.shift();
if (m) {
m.assignSocket(socket);
}
}
}
if (req.headers.expect !== undefined &&
(req.httpVersionMajor == 1 && req.httpVersionMinor == 1)) {
if (continueExpression.test(req.headers.expect)) {
res._expect_continue = true;
if (self.listenerCount('checkContinue') > 0) {
self.emit('checkContinue', req, res);
} else {
res.writeContinue();
self.emit('request', req, res);
}
} else {
if (self.listenerCount('checkExpectation') > 0) {
self.emit('checkExpectation', req, res);
} else {
res.writeHead(417);
res.end();
}
}
} else {
self.emit('request', req, res);
}
return false; // Not a HEAD response. (Not even a response!)
}
}
exports._connectionListener = connectionListener;
function onSocketResume() {
// It may seem that the socket is resumed, but this is an enemy's trick to
// deceive us! `resume` is emitted asynchronously, and may be called from
// `incoming.readStart()`. Stop the socket again here, just to preserve the
// state.
//
// We don't care about stream semantics for the consumed socket anyway.
if (this._paused) {
this.pause();
return;
}
if (this._handle && !this._handle.reading) {
this._handle.reading = true;
this._handle.readStart();
}
}
function onSocketPause() {
if (this._handle && this._handle.reading) {
this._handle.reading = false;
this._handle.readStop();
}
}
function unconsume(parser, socket) {
if (socket._handle) {
if (parser._consumed)
parser.unconsume(socket._handle._externalStream);
parser._consumed = false;
socket.removeListener('pause', onSocketPause);
socket.removeListener('resume', onSocketResume);
}
}
function socketOnWrap(ev, fn) {
var res = net.Socket.prototype.on.call(this, ev, fn);
if (!this.parser) {
this.on = net.Socket.prototype.on;
return res;
}
if (ev === 'data' || ev === 'readable')
unconsume(this.parser, this);
return res;
}
|
5103_0
|
crossvul
|
js
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyrigt: (c) 2017, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_csr
short_description: Generate OpenSSL Certificate Signing Request (CSR)
description:
- This module allows one to (re)generate OpenSSL certificate signing requests.
- It uses the pyOpenSSL python library to interact with openssl. This module supports
the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple
extensions.
- "Please note that the module regenerates existing CSR if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing CSR, consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.3
- Or pyOpenSSL >= 0.15
author:
- Yanis Guenane (@Spredzy)
options:
state:
description:
- Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
digest:
description:
- The digest used when signing the certificate signing request with the private key.
type: str
default: sha256
privatekey_path:
description:
- The path to the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
version_added: "1.0.0"
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
version:
description:
- The version of the certificate signing request.
- "The only allowed value according to L(RFC 2986,https://tools.ietf.org/html/rfc2986#section-4.1)
is 1."
- This option will no longer accept unsupported values from Ansible 2.14 on.
type: int
default: 1
force:
description:
- Should the certificate signing request be forced regenerated by this ansible module.
type: bool
default: no
path:
description:
- The name of the file into which the generated OpenSSL certificate signing request will be written.
type: path
required: true
subject:
description:
- Key/value pairs that will be present in the subject name field of the certificate signing request.
- If you need to specify more than one value with the same key, use a list as value.
type: dict
country_name:
description:
- The countryName field of the certificate signing request subject.
type: str
aliases: [ C, countryName ]
state_or_province_name:
description:
- The stateOrProvinceName field of the certificate signing request subject.
type: str
aliases: [ ST, stateOrProvinceName ]
locality_name:
description:
- The localityName field of the certificate signing request subject.
type: str
aliases: [ L, localityName ]
organization_name:
description:
- The organizationName field of the certificate signing request subject.
type: str
aliases: [ O, organizationName ]
organizational_unit_name:
description:
- The organizationalUnitName field of the certificate signing request subject.
type: str
aliases: [ OU, organizationalUnitName ]
common_name:
description:
- The commonName field of the certificate signing request subject.
type: str
aliases: [ CN, commonName ]
email_address:
description:
- The emailAddress field of the certificate signing request subject.
type: str
aliases: [ E, emailAddress ]
subject_alt_name:
description:
- SAN extension to attach to the certificate signing request.
- This can either be a 'comma separated string' or a YAML list.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
- Note that if no SAN is specified, but a common name, the common
name will be added as a SAN except if C(useCommonNameForSAN) is
set to I(false).
- More at U(https://tools.ietf.org/html/rfc5280#section-4.2.1.6).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_critical:
description:
- Should the subjectAltName extension be considered as critical.
type: bool
aliases: [ subjectAltName_critical ]
use_common_name_for_san:
description:
- If set to C(yes), the module will fill the common name in for
C(subject_alt_name) with C(DNS:) prefix if no SAN is specified.
type: bool
default: yes
aliases: [ useCommonNameForSAN ]
key_usage:
description:
- This defines the purpose (e.g. encipherment, signature, certificate signing)
of the key contained in the certificate.
type: list
elements: str
aliases: [ keyUsage ]
key_usage_critical:
description:
- Should the keyUsage extension be considered as critical.
type: bool
aliases: [ keyUsage_critical ]
extended_key_usage:
description:
- Additional restrictions (e.g. client authentication, server authentication)
on the allowed purposes for which the public key may be used.
type: list
elements: str
aliases: [ extKeyUsage, extendedKeyUsage ]
extended_key_usage_critical:
description:
- Should the extkeyUsage extension be considered as critical.
type: bool
aliases: [ extKeyUsage_critical, extendedKeyUsage_critical ]
basic_constraints:
description:
- Indicates basic constraints, such as if the certificate is a CA.
type: list
elements: str
aliases: [ basicConstraints ]
basic_constraints_critical:
description:
- Should the basicConstraints extension be considered as critical.
type: bool
aliases: [ basicConstraints_critical ]
ocsp_must_staple:
description:
- Indicates that the certificate should contain the OCSP Must Staple
extension (U(https://tools.ietf.org/html/rfc7633)).
type: bool
aliases: [ ocspMustStaple ]
ocsp_must_staple_critical:
description:
- Should the OCSP Must Staple extension be considered as critical.
- Note that according to the RFC, this extension should not be marked
as critical, as old clients not knowing about OCSP Must Staple
are required to reject such certificates
(see U(https://tools.ietf.org/html/rfc7633#section-4)).
type: bool
aliases: [ ocspMustStaple_critical ]
name_constraints_permitted:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_excluded:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is *not* allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_critical:
description:
- Should the Name Constraints extension be considered as critical.
type: bool
version_added: 1.1.0
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
CSR back if you overwrote it with a new one by accident.
type: bool
default: no
create_subject_key_identifier:
description:
- Create the Subject Key Identifier from the public key.
- "Please note that commercial CAs can ignore the value, respectively use a value of
their own choice instead. Specifying this option is mostly useful for self-signed
certificates or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: no
subject_key_identifier:
description:
- The subject key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this option can only be used if I(create_subject_key_identifier) is C(no).
- Note that this is only supported if the C(cryptography) backend is used!
type: str
authority_key_identifier:
description:
- The authority key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- If specified, I(authority_cert_issuer) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: str
authority_cert_issuer:
description:
- Names that will be present in the authority cert issuer field of the certificate signing request.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA)
- "Example: C(DNS:ca.example.org)"
- If specified, I(authority_key_identifier) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: list
elements: str
authority_cert_serial_number:
description:
- The authority cert serial number.
- Note that this is only supported if the C(cryptography) backend is used!
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: int
return_content:
description:
- If set to C(yes), will return the (current or generated) CSR's content as I(csr).
type: bool
default: no
version_added: "1.0.0"
extends_documentation_fragment:
- files
notes:
- If the certificate signing request already exists it will be checked whether subjectAltName,
keyUsage, extendedKeyUsage and basicConstraints only contain the requested values, whether
OCSP Must Staple is as requested, and if the request was signed by the given private key.
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with an inline key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_content: "{{ private_key_content }}"
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with a passphrase protected private key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with Subject information
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
country_name: FR
organization_name: Ansible
email_address: jdoe@ansible.com
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with subjectAltName extension
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com'
- name: Generate an OpenSSL CSR with subjectAltName extension with dynamic list
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: "{{ item.value | map('regex_replace', '^', 'DNS:') | list }}"
with_dict:
dns_server:
- www.ansible.com
- m.ansible.com
- name: Force regenerate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with special key usages
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
key_usage:
- digitalSignature
- keyAgreement
extended_key_usage:
- clientAuth
- name: Generate an OpenSSL Certificate Signing Request with OCSP Must Staple
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
ocsp_must_staple: yes
- name: Generate an OpenSSL Certificate Signing Request for WinRM Certificate authentication
community.crypto.openssl_csr:
path: /etc/ssl/csr/winrm.auth.csr
privatekey_path: /etc/ssl/private/winrm.auth.pem
common_name: username
extended_key_usage:
- clientAuth
subject_alt_name: otherName:1.3.6.1.4.1.311.20.2.3;UTF8:username@localhost
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the CSR was generated for
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
filename:
description: Path to the generated Certificate Signing Request
returned: changed or success
type: str
sample: /etc/ssl/csr/www.ansible.com.csr
subject:
description: A list of the subject tuples attached to the CSR
returned: changed or success
type: list
elements: list
sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]"
subjectAltName:
description: The alternative names this CSR is valid for
returned: changed or success
type: list
elements: str
sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ]
keyUsage:
description: Purpose for which the public key may be used
returned: changed or success
type: list
elements: str
sample: [ 'digitalSignature', 'keyAgreement' ]
extendedKeyUsage:
description: Additional restriction on the public key purposes
returned: changed or success
type: list
elements: str
sample: [ 'clientAuth' ]
basicConstraints:
description: Indicates if the certificate belongs to a CA
returned: changed or success
type: list
elements: str
sample: ['CA:TRUE', 'pathLenConstraint:0']
ocsp_must_staple:
description: Indicates whether the certificate has the OCSP
Must Staple feature enabled
returned: changed or success
type: bool
sample: false
name_constraints_permitted:
description: List of permitted subtrees to sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.somedomain.com']
version_added: 1.1.0
name_constraints_excluded:
description: List of excluded subtrees the CA cannot sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.com']
version_added: 1.1.0
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.csr.2019-03-09@11:22~
csr:
description: The (current or generated) CSR's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: "1.0.0"
'''
import abc
import binascii
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate_request,
parse_name_field,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_basic_constraints,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
pyopenssl_parse_name_constraints,
)
MINIMAL_PYOPENSSL_VERSION = '0.15'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# OpenSSL 1.1.0 or newer
OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
OPENSSL_MUST_STAPLE_VALUE = b"status_request"
else:
# OpenSSL 1.0.x or older
OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.x509
import cryptography.x509.oid
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
CRYPTOGRAPHY_MUST_STAPLE_NAME = cryptography.x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
CRYPTOGRAPHY_MUST_STAPLE_VALUE = b"\x30\x03\x02\x01\x05"
class CertificateSigningRequestError(OpenSSLObjectError):
pass
class CertificateSigningRequestBase(OpenSSLObject):
def __init__(self, module):
super(CertificateSigningRequestBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.digest = module.params['digest']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.version = module.params['version']
self.subjectAltName = module.params['subject_alt_name']
self.subjectAltName_critical = module.params['subject_alt_name_critical']
self.keyUsage = module.params['key_usage']
self.keyUsage_critical = module.params['key_usage_critical']
self.extendedKeyUsage = module.params['extended_key_usage']
self.extendedKeyUsage_critical = module.params['extended_key_usage_critical']
self.basicConstraints = module.params['basic_constraints']
self.basicConstraints_critical = module.params['basic_constraints_critical']
self.ocspMustStaple = module.params['ocsp_must_staple']
self.ocspMustStaple_critical = module.params['ocsp_must_staple_critical']
self.name_constraints_permitted = module.params['name_constraints_permitted'] or []
self.name_constraints_excluded = module.params['name_constraints_excluded'] or []
self.name_constraints_critical = module.params['name_constraints_critical']
self.create_subject_key_identifier = module.params['create_subject_key_identifier']
self.subject_key_identifier = module.params['subject_key_identifier']
self.authority_key_identifier = module.params['authority_key_identifier']
self.authority_cert_issuer = module.params['authority_cert_issuer']
self.authority_cert_serial_number = module.params['authority_cert_serial_number']
self.request = None
self.privatekey = None
self.csr_bytes = None
self.return_content = module.params['return_content']
if self.create_subject_key_identifier and self.subject_key_identifier is not None:
module.fail_json(msg='subject_key_identifier cannot be specified if create_subject_key_identifier is true')
self.backup = module.params['backup']
self.backup_file = None
self.subject = [
('C', module.params['country_name']),
('ST', module.params['state_or_province_name']),
('L', module.params['locality_name']),
('O', module.params['organization_name']),
('OU', module.params['organizational_unit_name']),
('CN', module.params['common_name']),
('emailAddress', module.params['email_address']),
]
if module.params['subject']:
self.subject = self.subject + parse_name_field(module.params['subject'])
self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]]
self.using_common_name_for_san = False
if not self.subjectAltName and module.params['use_common_name_for_san']:
for sub in self.subject:
if sub[0] in ('commonName', 'CN'):
self.subjectAltName = ['DNS:%s' % sub[1]]
self.using_common_name_for_san = True
break
if self.subject_key_identifier is not None:
try:
self.subject_key_identifier = binascii.unhexlify(self.subject_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse subject_key_identifier: {0}'.format(e))
if self.authority_key_identifier is not None:
try:
self.authority_key_identifier = binascii.unhexlify(self.authority_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse authority_key_identifier: {0}'.format(e))
@abc.abstractmethod
def _generate_csr(self):
pass
def generate(self, module):
'''Generate the certificate signing request.'''
if not self.check(module, perms_required=False) or self.force:
result = self._generate_csr()
if self.backup:
self.backup_file = module.backup_local(self.path)
if self.return_content:
self.csr_bytes = result
write_file(module, result)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
@abc.abstractmethod
def _load_private_key(self):
pass
@abc.abstractmethod
def _check_csr(self):
pass
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CertificateSigningRequestBase, self).check(module, perms_required)
self._load_private_key()
if not state_and_perms:
return False
return self._check_csr()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(CertificateSigningRequestBase, self).remove(module)
def dump(self):
'''Serialize the object into a dictionary.'''
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'subject': self.subject,
'subjectAltName': self.subjectAltName,
'keyUsage': self.keyUsage,
'extendedKeyUsage': self.extendedKeyUsage,
'basicConstraints': self.basicConstraints,
'ocspMustStaple': self.ocspMustStaple,
'changed': self.changed,
'name_constraints_permitted': self.name_constraints_permitted,
'name_constraints_excluded': self.name_constraints_excluded,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.csr_bytes is None:
self.csr_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['csr'] = self.csr_bytes.decode('utf-8') if self.csr_bytes else None
return result
class CertificateSigningRequestPyOpenSSL(CertificateSigningRequestBase):
def __init__(self, module):
if module.params['create_subject_key_identifier']:
module.fail_json(msg='You cannot use create_subject_key_identifier with the pyOpenSSL backend!')
for o in ('subject_key_identifier', 'authority_key_identifier', 'authority_cert_issuer', 'authority_cert_serial_number'):
if module.params[o] is not None:
module.fail_json(msg='You cannot use {0} with the pyOpenSSL backend!'.format(o))
super(CertificateSigningRequestPyOpenSSL, self).__init__(module)
def _generate_csr(self):
req = crypto.X509Req()
req.set_version(self.version - 1)
subject = req.get_subject()
for entry in self.subject:
if entry[1] is not None:
# Workaround for https://github.com/pyca/pyopenssl/issues/165
nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0]))
if nid == 0:
raise CertificateSigningRequestError('Unknown subject field identifier "{0}"'.format(entry[0]))
res = OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0)
if res == 0:
raise CertificateSigningRequestError('Invalid value for subject field identifier "{0}": {1}'.format(entry[0], entry[1]))
extensions = []
if self.subjectAltName:
altnames = ', '.join(self.subjectAltName)
try:
extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii')))
except OpenSSL.crypto.Error as e:
raise CertificateSigningRequestError(
'Error while parsing Subject Alternative Names {0} (check for missing type prefix, such as "DNS:"!): {1}'.format(
', '.join(["{0}".format(san) for san in self.subjectAltName]), str(e)
)
)
if self.keyUsage:
usages = ', '.join(self.keyUsage)
extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii')))
if self.extendedKeyUsage:
usages = ', '.join(self.extendedKeyUsage)
extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii')))
if self.basicConstraints:
usages = ', '.join(self.basicConstraints)
extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii')))
if self.name_constraints_permitted or self.name_constraints_excluded:
usages = ', '.join(
['permitted;{0}'.format(name) for name in self.name_constraints_permitted] +
['excluded;{0}'.format(name) for name in self.name_constraints_excluded]
)
extensions.append(crypto.X509Extension(b"nameConstraints", self.name_constraints_critical, usages.encode('ascii')))
if self.ocspMustStaple:
extensions.append(crypto.X509Extension(OPENSSL_MUST_STAPLE_NAME, self.ocspMustStaple_critical, OPENSSL_MUST_STAPLE_VALUE))
if extensions:
req.add_extensions(extensions)
req.set_pubkey(self.privatekey)
req.sign(self.privatekey, self.digest)
self.request = req
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request)
def _load_private_key(self):
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
raise CertificateSigningRequestError(exc)
def _check_csr(self):
def _check_subject(csr):
subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject]
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()]
if not set(subject) == set(current_subject):
return False
return True
def _check_subjectAltName(extensions):
altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '')
altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(altnames_ext, errors='surrogate_or_strict').split(',') if altname.strip()]
if self.subjectAltName:
if (set(altnames) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.subjectAltName]) or
altnames_ext.get_critical() != self.subjectAltName_critical):
return False
else:
if altnames:
return False
return True
def _check_keyUsage_(extensions, extName, expected, critical):
usages_ext = [ext for ext in extensions if ext.get_short_name() == extName]
if (not usages_ext and expected) or (usages_ext and not expected):
return False
elif not usages_ext and not expected:
return True
else:
current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')]
expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected]
return set(current) == set(expected) and usages_ext[0].get_critical() == critical
def _check_keyUsage(extensions):
usages_ext = [ext for ext in extensions if ext.get_short_name() == b'keyUsage']
if (not usages_ext and self.keyUsage) or (usages_ext and not self.keyUsage):
return False
elif not usages_ext and not self.keyUsage:
return True
else:
# OpenSSL._util.lib.OBJ_txt2nid() always returns 0 for all keyUsage values
# (since keyUsage has a fixed bitfield for these values and is not extensible).
# Therefore, we create an extension for the wanted values, and compare the
# data of the extensions (which is the serialized bitfield).
expected_ext = crypto.X509Extension(b"keyUsage", False, ', '.join(self.keyUsage).encode('ascii'))
return usages_ext[0].get_data() == expected_ext.get_data() and usages_ext[0].get_critical() == self.keyUsage_critical
def _check_extenededKeyUsage(extensions):
return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical)
def _check_basicConstraints(extensions):
return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical)
def _check_nameConstraints(extensions):
nc_ext = next((ext for ext in extensions if ext.get_short_name() == b'nameConstraints'), '')
permitted, excluded = pyopenssl_parse_name_constraints(nc_ext)
if self.name_constraints_permitted or self.name_constraints_excluded:
if set(permitted) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_permitted]):
return False
if set(excluded) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_excluded]):
return False
if nc_ext.get_critical() != self.name_constraints_critical:
return False
else:
if permitted or excluded:
return False
return True
def _check_ocspMustStaple(extensions):
oms_ext = [ext for ext in extensions if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE]
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
# Older versions of libssl don't know about OCSP Must Staple
oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
if self.ocspMustStaple:
return len(oms_ext) > 0 and oms_ext[0].get_critical() == self.ocspMustStaple_critical
else:
return len(oms_ext) == 0
def _check_extensions(csr):
extensions = csr.get_extensions()
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
try:
return csr.verify(self.privatekey)
except crypto.Error:
return False
try:
csr = load_certificate_request(self.path, backend='pyopenssl')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
class CertificateSigningRequestCryptography(CertificateSigningRequestBase):
def __init__(self, module):
super(CertificateSigningRequestCryptography, self).__init__(module)
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.module = module
if self.version != 1:
module.warn('The cryptography backend only supports version 1. (The only valid value according to RFC 2986.)')
def _generate_csr(self):
csr = cryptography.x509.CertificateSigningRequestBuilder()
try:
csr = csr.subject_name(cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1])) for entry in self.subject
]))
except ValueError as e:
raise CertificateSigningRequestError(e)
if self.subjectAltName:
csr = csr.add_extension(cryptography.x509.SubjectAlternativeName([
cryptography_get_name(name) for name in self.subjectAltName
]), critical=self.subjectAltName_critical)
if self.keyUsage:
params = cryptography_parse_key_usage_params(self.keyUsage)
csr = csr.add_extension(cryptography.x509.KeyUsage(**params), critical=self.keyUsage_critical)
if self.extendedKeyUsage:
usages = [cryptography_name_to_oid(usage) for usage in self.extendedKeyUsage]
csr = csr.add_extension(cryptography.x509.ExtendedKeyUsage(usages), critical=self.extendedKeyUsage_critical)
if self.basicConstraints:
params = {}
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
csr = csr.add_extension(cryptography.x509.BasicConstraints(ca, path_length), critical=self.basicConstraints_critical)
if self.ocspMustStaple:
try:
# This only works with cryptography >= 2.1
csr = csr.add_extension(cryptography.x509.TLSFeature([cryptography.x509.TLSFeatureType.status_request]), critical=self.ocspMustStaple_critical)
except AttributeError as dummy:
csr = csr.add_extension(
cryptography.x509.UnrecognizedExtension(CRYPTOGRAPHY_MUST_STAPLE_NAME, CRYPTOGRAPHY_MUST_STAPLE_VALUE),
critical=self.ocspMustStaple_critical
)
if self.name_constraints_permitted or self.name_constraints_excluded:
try:
csr = csr.add_extension(cryptography.x509.NameConstraints(
[cryptography_get_name(name) for name in self.name_constraints_permitted],
[cryptography_get_name(name) for name in self.name_constraints_excluded],
), critical=self.name_constraints_critical)
except TypeError as e:
raise OpenSSLObjectError('Error while parsing name constraint: {0}'.format(e))
if self.create_subject_key_identifier:
csr = csr.add_extension(
cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
elif self.subject_key_identifier is not None:
csr = csr.add_extension(cryptography.x509.SubjectKeyIdentifier(self.subject_key_identifier), critical=False)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
issuers = None
if self.authority_cert_issuer is not None:
issuers = [cryptography_get_name(n) for n in self.authority_cert_issuer]
csr = csr.add_extension(
cryptography.x509.AuthorityKeyIdentifier(self.authority_key_identifier, issuers, self.authority_cert_serial_number),
critical=False
)
digest = None
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest == 'sha256':
digest = cryptography.hazmat.primitives.hashes.SHA256()
elif self.digest == 'sha384':
digest = cryptography.hazmat.primitives.hashes.SHA384()
elif self.digest == 'sha512':
digest = cryptography.hazmat.primitives.hashes.SHA512()
elif self.digest == 'sha1':
digest = cryptography.hazmat.primitives.hashes.SHA1()
elif self.digest == 'md5':
digest = cryptography.hazmat.primitives.hashes.MD5()
# FIXME
else:
raise CertificateSigningRequestError('Unsupported digest "{0}"'.format(self.digest))
try:
self.request = csr.sign(self.privatekey, digest, self.cryptography_backend)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and digest is None:
self.module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
except UnicodeError as e:
# This catches IDNAErrors, which happens when a bad name is passed as a SAN
# (https://github.com/ansible-collections/community.crypto/issues/105).
# For older cryptography versions, this is handled by idna, which raises
# an idna.core.IDNAError. Later versions of cryptography deprecated and stopped
# requiring idna, whence we cannot easily handle this error. Fortunately, in
# most versions of idna, IDNAError extends UnicodeError. There is only version
# 2.3 where it extends Exception instead (see
# https://github.com/kjd/idna/commit/ebefacd3134d0f5da4745878620a6a1cba86d130
# and then
# https://github.com/kjd/idna/commit/ea03c7b5db7d2a99af082e0239da2b68aeea702a).
msg = 'Error while creating CSR: {0}\n'.format(e)
if self.using_common_name_for_san:
self.module.fail_json(msg=msg + 'This is probably caused because the Common Name is used as a SAN.'
' Specifying use_common_name_for_san=false might fix this.')
self.module.fail_json(msg=msg + 'This is probably caused by an invalid Subject Alternative DNS Name.')
return self.request.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
def _load_private_key(self):
try:
if self.privatekey_content is not None:
content = self.privatekey_content
else:
with open(self.privatekey_path, 'rb') as f:
content = f.read()
self.privatekey = cryptography.hazmat.primitives.serialization.load_pem_private_key(
content,
None if self.privatekey_passphrase is None else to_bytes(self.privatekey_passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise CertificateSigningRequestError(e)
def _check_csr(self):
def _check_subject(csr):
subject = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.subject]
current_subject = [(sub.oid, sub.value) for sub in csr.subject]
return set(subject) == set(current_subject)
def _find_extension(extensions, exttype):
return next(
(ext for ext in extensions if isinstance(ext.value, exttype)),
None
)
def _check_subjectAltName(extensions):
current_altnames_ext = _find_extension(extensions, cryptography.x509.SubjectAlternativeName)
current_altnames = [str(altname) for altname in current_altnames_ext.value] if current_altnames_ext else []
altnames = [str(cryptography_get_name(altname)) for altname in self.subjectAltName] if self.subjectAltName else []
if set(altnames) != set(current_altnames):
return False
if altnames:
if current_altnames_ext.critical != self.subjectAltName_critical:
return False
return True
def _check_keyUsage(extensions):
current_keyusage_ext = _find_extension(extensions, cryptography.x509.KeyUsage)
if not self.keyUsage:
return current_keyusage_ext is None
elif current_keyusage_ext is None:
return False
params = cryptography_parse_key_usage_params(self.keyUsage)
for param in params:
if getattr(current_keyusage_ext.value, '_' + param) != params[param]:
return False
if current_keyusage_ext.critical != self.keyUsage_critical:
return False
return True
def _check_extenededKeyUsage(extensions):
current_usages_ext = _find_extension(extensions, cryptography.x509.ExtendedKeyUsage)
current_usages = [str(usage) for usage in current_usages_ext.value] if current_usages_ext else []
usages = [str(cryptography_name_to_oid(usage)) for usage in self.extendedKeyUsage] if self.extendedKeyUsage else []
if set(current_usages) != set(usages):
return False
if usages:
if current_usages_ext.critical != self.extendedKeyUsage_critical:
return False
return True
def _check_basicConstraints(extensions):
bc_ext = _find_extension(extensions, cryptography.x509.BasicConstraints)
current_ca = bc_ext.value.ca if bc_ext else False
current_path_length = bc_ext.value.path_length if bc_ext else None
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
# Check CA flag
if ca != current_ca:
return False
# Check path length
if path_length != current_path_length:
return False
# Check criticality
if self.basicConstraints:
if bc_ext.critical != self.basicConstraints_critical:
return False
return True
def _check_ocspMustStaple(extensions):
try:
# This only works with cryptography >= 2.1
tlsfeature_ext = _find_extension(extensions, cryptography.x509.TLSFeature)
has_tlsfeature = True
except AttributeError as dummy:
tlsfeature_ext = next(
(ext for ext in extensions if ext.value.oid == CRYPTOGRAPHY_MUST_STAPLE_NAME),
None
)
has_tlsfeature = False
if self.ocspMustStaple:
if not tlsfeature_ext or tlsfeature_ext.critical != self.ocspMustStaple_critical:
return False
if has_tlsfeature:
return cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
else:
return tlsfeature_ext.value.value == CRYPTOGRAPHY_MUST_STAPLE_VALUE
else:
return tlsfeature_ext is None
def _check_nameConstraints(extensions):
current_nc_ext = _find_extension(extensions, cryptography.x509.NameConstraints)
current_nc_perm = [str(altname) for altname in current_nc_ext.value.permitted_subtrees] if current_nc_ext else []
current_nc_excl = [str(altname) for altname in current_nc_ext.value.excluded_subtrees] if current_nc_ext else []
nc_perm = [str(cryptography_get_name(altname)) for altname in self.name_constraints_permitted]
nc_excl = [str(cryptography_get_name(altname)) for altname in self.name_constraints_excluded]
if set(nc_perm) != set(current_nc_perm) or set(nc_excl) != set(current_nc_excl):
return False
if nc_perm or nc_excl:
if current_nc_ext.critical != self.name_constraints_critical:
return False
return True
def _check_subject_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.SubjectKeyIdentifier)
if self.create_subject_key_identifier or self.subject_key_identifier is not None:
if not ext or ext.critical:
return False
if self.create_subject_key_identifier:
digest = cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()).digest
return ext.value.digest == digest
else:
return ext.value.digest == self.subject_key_identifier
else:
return ext is None
def _check_authority_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.AuthorityKeyIdentifier)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
if not ext or ext.critical:
return False
aci = None
csr_aci = None
if self.authority_cert_issuer is not None:
aci = [str(cryptography_get_name(n)) for n in self.authority_cert_issuer]
if ext.value.authority_cert_issuer is not None:
csr_aci = [str(n) for n in ext.value.authority_cert_issuer]
return (ext.value.key_identifier == self.authority_key_identifier
and csr_aci == aci
and ext.value.authority_cert_serial_number == self.authority_cert_serial_number)
else:
return ext is None
def _check_extensions(csr):
extensions = csr.extensions
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_subject_key_identifier(extensions) and
_check_authority_key_identifier(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
if not csr.is_signature_valid:
return False
# To check whether public key of CSR belongs to private key,
# encode both public keys and compare PEMs.
key_a = csr.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
key_b = self.privatekey.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
return key_a == key_b
try:
csr = load_certificate_request(self.path, backend='cryptography')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
digest=dict(type='str', default='sha256'),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
version=dict(type='int', default=1),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
subject=dict(type='dict'),
country_name=dict(type='str', aliases=['C', 'countryName']),
state_or_province_name=dict(type='str', aliases=['ST', 'stateOrProvinceName']),
locality_name=dict(type='str', aliases=['L', 'localityName']),
organization_name=dict(type='str', aliases=['O', 'organizationName']),
organizational_unit_name=dict(type='str', aliases=['OU', 'organizationalUnitName']),
common_name=dict(type='str', aliases=['CN', 'commonName']),
email_address=dict(type='str', aliases=['E', 'emailAddress']),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName']),
subject_alt_name_critical=dict(type='bool', default=False, aliases=['subjectAltName_critical']),
use_common_name_for_san=dict(type='bool', default=True, aliases=['useCommonNameForSAN']),
key_usage=dict(type='list', elements='str', aliases=['keyUsage']),
key_usage_critical=dict(type='bool', default=False, aliases=['keyUsage_critical']),
extended_key_usage=dict(type='list', elements='str', aliases=['extKeyUsage', 'extendedKeyUsage']),
extended_key_usage_critical=dict(type='bool', default=False, aliases=['extKeyUsage_critical', 'extendedKeyUsage_critical']),
basic_constraints=dict(type='list', elements='str', aliases=['basicConstraints']),
basic_constraints_critical=dict(type='bool', default=False, aliases=['basicConstraints_critical']),
ocsp_must_staple=dict(type='bool', default=False, aliases=['ocspMustStaple']),
ocsp_must_staple_critical=dict(type='bool', default=False, aliases=['ocspMustStaple_critical']),
name_constraints_permitted=dict(type='list', elements='str'),
name_constraints_excluded=dict(type='list', elements='str'),
name_constraints_critical=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
create_subject_key_identifier=dict(type='bool', default=False),
subject_key_identifier=dict(type='str'),
authority_key_identifier=dict(type='str'),
authority_cert_issuer=dict(type='list', elements='str'),
authority_cert_serial_number=dict(type='int'),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
),
required_together=[('authority_cert_issuer', 'authority_cert_serial_number')],
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
add_file_common_args=True,
supports_check_mode=True,
)
if module.params['version'] != 1:
module.deprecate('The version option will only support allowed values from community.crypto 2.0.0 on. '
'Currently, only the value 1 is allowed by RFC 2986',
version='2.0.0', collection_name='community.crypto')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
csr = CertificateSigningRequestPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
csr = CertificateSigningRequestCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = csr.dump()
result['changed'] = module.params['force'] or not csr.check(module)
module.exit_json(**result)
csr.generate(module)
else:
if module.check_mode:
result = csr.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
csr.remove(module)
result = csr.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyrigt: (c) 2017, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_csr
short_description: Generate OpenSSL Certificate Signing Request (CSR)
description:
- This module allows one to (re)generate OpenSSL certificate signing requests.
- It uses the pyOpenSSL python library to interact with openssl. This module supports
the subjectAltName, keyUsage, extendedKeyUsage, basicConstraints and OCSP Must Staple
extensions.
- "Please note that the module regenerates existing CSR if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing CSR, consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.3
- Or pyOpenSSL >= 0.15
author:
- Yanis Guenane (@Spredzy)
options:
state:
description:
- Whether the certificate signing request should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
digest:
description:
- The digest used when signing the certificate signing request with the private key.
type: str
default: sha256
privatekey_path:
description:
- The path to the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
version_added: "1.0.0"
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
version:
description:
- The version of the certificate signing request.
- "The only allowed value according to L(RFC 2986,https://tools.ietf.org/html/rfc2986#section-4.1)
is 1."
- This option will no longer accept unsupported values from Ansible 2.14 on.
type: int
default: 1
force:
description:
- Should the certificate signing request be forced regenerated by this ansible module.
type: bool
default: no
path:
description:
- The name of the file into which the generated OpenSSL certificate signing request will be written.
type: path
required: true
subject:
description:
- Key/value pairs that will be present in the subject name field of the certificate signing request.
- If you need to specify more than one value with the same key, use a list as value.
type: dict
country_name:
description:
- The countryName field of the certificate signing request subject.
type: str
aliases: [ C, countryName ]
state_or_province_name:
description:
- The stateOrProvinceName field of the certificate signing request subject.
type: str
aliases: [ ST, stateOrProvinceName ]
locality_name:
description:
- The localityName field of the certificate signing request subject.
type: str
aliases: [ L, localityName ]
organization_name:
description:
- The organizationName field of the certificate signing request subject.
type: str
aliases: [ O, organizationName ]
organizational_unit_name:
description:
- The organizationalUnitName field of the certificate signing request subject.
type: str
aliases: [ OU, organizationalUnitName ]
common_name:
description:
- The commonName field of the certificate signing request subject.
type: str
aliases: [ CN, commonName ]
email_address:
description:
- The emailAddress field of the certificate signing request subject.
type: str
aliases: [ E, emailAddress ]
subject_alt_name:
description:
- SAN extension to attach to the certificate signing request.
- This can either be a 'comma separated string' or a YAML list.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
- Note that if no SAN is specified, but a common name, the common
name will be added as a SAN except if C(useCommonNameForSAN) is
set to I(false).
- More at U(https://tools.ietf.org/html/rfc5280#section-4.2.1.6).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_critical:
description:
- Should the subjectAltName extension be considered as critical.
type: bool
aliases: [ subjectAltName_critical ]
use_common_name_for_san:
description:
- If set to C(yes), the module will fill the common name in for
C(subject_alt_name) with C(DNS:) prefix if no SAN is specified.
type: bool
default: yes
aliases: [ useCommonNameForSAN ]
key_usage:
description:
- This defines the purpose (e.g. encipherment, signature, certificate signing)
of the key contained in the certificate.
type: list
elements: str
aliases: [ keyUsage ]
key_usage_critical:
description:
- Should the keyUsage extension be considered as critical.
type: bool
aliases: [ keyUsage_critical ]
extended_key_usage:
description:
- Additional restrictions (e.g. client authentication, server authentication)
on the allowed purposes for which the public key may be used.
type: list
elements: str
aliases: [ extKeyUsage, extendedKeyUsage ]
extended_key_usage_critical:
description:
- Should the extkeyUsage extension be considered as critical.
type: bool
aliases: [ extKeyUsage_critical, extendedKeyUsage_critical ]
basic_constraints:
description:
- Indicates basic constraints, such as if the certificate is a CA.
type: list
elements: str
aliases: [ basicConstraints ]
basic_constraints_critical:
description:
- Should the basicConstraints extension be considered as critical.
type: bool
aliases: [ basicConstraints_critical ]
ocsp_must_staple:
description:
- Indicates that the certificate should contain the OCSP Must Staple
extension (U(https://tools.ietf.org/html/rfc7633)).
type: bool
aliases: [ ocspMustStaple ]
ocsp_must_staple_critical:
description:
- Should the OCSP Must Staple extension be considered as critical.
- Note that according to the RFC, this extension should not be marked
as critical, as old clients not knowing about OCSP Must Staple
are required to reject such certificates
(see U(https://tools.ietf.org/html/rfc7633#section-4)).
type: bool
aliases: [ ocspMustStaple_critical ]
name_constraints_permitted:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_excluded:
description:
- For CA certificates, this specifies a list of identifiers which describe
subtrees of names that this CA is *not* allowed to issue certificates for.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA).
type: list
elements: str
version_added: 1.1.0
name_constraints_critical:
description:
- Should the Name Constraints extension be considered as critical.
type: bool
version_added: 1.1.0
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
CSR back if you overwrote it with a new one by accident.
type: bool
default: no
create_subject_key_identifier:
description:
- Create the Subject Key Identifier from the public key.
- "Please note that commercial CAs can ignore the value, respectively use a value of
their own choice instead. Specifying this option is mostly useful for self-signed
certificates or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: no
subject_key_identifier:
description:
- The subject key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this option can only be used if I(create_subject_key_identifier) is C(no).
- Note that this is only supported if the C(cryptography) backend is used!
type: str
authority_key_identifier:
description:
- The authority key identifier as a hex string, where two bytes are separated by colons.
- "Example: C(00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33)"
- If specified, I(authority_cert_issuer) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: str
authority_cert_issuer:
description:
- Names that will be present in the authority cert issuer field of the certificate signing request.
- Values must be prefixed by their options. (i.e., C(email), C(URI), C(DNS), C(RID), C(IP), C(dirName),
C(otherName) and the ones specific to your CA)
- "Example: C(DNS:ca.example.org)"
- If specified, I(authority_key_identifier) must also be specified.
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- Note that this is only supported if the C(cryptography) backend is used!
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: list
elements: str
authority_cert_serial_number:
description:
- The authority cert serial number.
- Note that this is only supported if the C(cryptography) backend is used!
- "Please note that commercial CAs ignore this value, respectively use a value of their
own choice. Specifying this option is mostly useful for self-signed certificates
or for own CAs."
- The C(AuthorityKeyIdentifier) will only be added if at least one of I(authority_key_identifier),
I(authority_cert_issuer) and I(authority_cert_serial_number) is specified.
type: int
return_content:
description:
- If set to C(yes), will return the (current or generated) CSR's content as I(csr).
type: bool
default: no
version_added: "1.0.0"
extends_documentation_fragment:
- files
notes:
- If the certificate signing request already exists it will be checked whether subjectAltName,
keyUsage, extendedKeyUsage and basicConstraints only contain the requested values, whether
OCSP Must Staple is as requested, and if the request was signed by the given private key.
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with an inline key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_content: "{{ private_key_content }}"
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with a passphrase protected private key
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with Subject information
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
country_name: FR
organization_name: Ansible
email_address: jdoe@ansible.com
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with subjectAltName extension
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: 'DNS:www.ansible.com,DNS:m.ansible.com'
- name: Generate an OpenSSL CSR with subjectAltName extension with dynamic list
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
subject_alt_name: "{{ item.value | map('regex_replace', '^', 'DNS:') | list }}"
with_dict:
dns_server:
- www.ansible.com
- m.ansible.com
- name: Force regenerate an OpenSSL Certificate Signing Request
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
common_name: www.ansible.com
- name: Generate an OpenSSL Certificate Signing Request with special key usages
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
key_usage:
- digitalSignature
- keyAgreement
extended_key_usage:
- clientAuth
- name: Generate an OpenSSL Certificate Signing Request with OCSP Must Staple
community.crypto.openssl_csr:
path: /etc/ssl/csr/www.ansible.com.csr
privatekey_path: /etc/ssl/private/ansible.com.pem
common_name: www.ansible.com
ocsp_must_staple: yes
- name: Generate an OpenSSL Certificate Signing Request for WinRM Certificate authentication
community.crypto.openssl_csr:
path: /etc/ssl/csr/winrm.auth.csr
privatekey_path: /etc/ssl/private/winrm.auth.pem
common_name: username
extended_key_usage:
- clientAuth
subject_alt_name: otherName:1.3.6.1.4.1.311.20.2.3;UTF8:username@localhost
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the CSR was generated for
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
filename:
description: Path to the generated Certificate Signing Request
returned: changed or success
type: str
sample: /etc/ssl/csr/www.ansible.com.csr
subject:
description: A list of the subject tuples attached to the CSR
returned: changed or success
type: list
elements: list
sample: "[('CN', 'www.ansible.com'), ('O', 'Ansible')]"
subjectAltName:
description: The alternative names this CSR is valid for
returned: changed or success
type: list
elements: str
sample: [ 'DNS:www.ansible.com', 'DNS:m.ansible.com' ]
keyUsage:
description: Purpose for which the public key may be used
returned: changed or success
type: list
elements: str
sample: [ 'digitalSignature', 'keyAgreement' ]
extendedKeyUsage:
description: Additional restriction on the public key purposes
returned: changed or success
type: list
elements: str
sample: [ 'clientAuth' ]
basicConstraints:
description: Indicates if the certificate belongs to a CA
returned: changed or success
type: list
elements: str
sample: ['CA:TRUE', 'pathLenConstraint:0']
ocsp_must_staple:
description: Indicates whether the certificate has the OCSP
Must Staple feature enabled
returned: changed or success
type: bool
sample: false
name_constraints_permitted:
description: List of permitted subtrees to sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.somedomain.com']
version_added: 1.1.0
name_constraints_excluded:
description: List of excluded subtrees the CA cannot sign certificates for.
returned: changed or success
type: list
elements: str
sample: ['email:.com']
version_added: 1.1.0
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.csr.2019-03-09@11:22~
csr:
description: The (current or generated) CSR's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: "1.0.0"
'''
import abc
import binascii
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate_request,
parse_name_field,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_basic_constraints,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
pyopenssl_parse_name_constraints,
)
MINIMAL_PYOPENSSL_VERSION = '0.15'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# OpenSSL 1.1.0 or newer
OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
OPENSSL_MUST_STAPLE_VALUE = b"status_request"
else:
# OpenSSL 1.0.x or older
OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.x509
import cryptography.x509.oid
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
CRYPTOGRAPHY_MUST_STAPLE_NAME = cryptography.x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
CRYPTOGRAPHY_MUST_STAPLE_VALUE = b"\x30\x03\x02\x01\x05"
class CertificateSigningRequestError(OpenSSLObjectError):
pass
class CertificateSigningRequestBase(OpenSSLObject):
def __init__(self, module):
super(CertificateSigningRequestBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.digest = module.params['digest']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.version = module.params['version']
self.subjectAltName = module.params['subject_alt_name']
self.subjectAltName_critical = module.params['subject_alt_name_critical']
self.keyUsage = module.params['key_usage']
self.keyUsage_critical = module.params['key_usage_critical']
self.extendedKeyUsage = module.params['extended_key_usage']
self.extendedKeyUsage_critical = module.params['extended_key_usage_critical']
self.basicConstraints = module.params['basic_constraints']
self.basicConstraints_critical = module.params['basic_constraints_critical']
self.ocspMustStaple = module.params['ocsp_must_staple']
self.ocspMustStaple_critical = module.params['ocsp_must_staple_critical']
self.name_constraints_permitted = module.params['name_constraints_permitted'] or []
self.name_constraints_excluded = module.params['name_constraints_excluded'] or []
self.name_constraints_critical = module.params['name_constraints_critical']
self.create_subject_key_identifier = module.params['create_subject_key_identifier']
self.subject_key_identifier = module.params['subject_key_identifier']
self.authority_key_identifier = module.params['authority_key_identifier']
self.authority_cert_issuer = module.params['authority_cert_issuer']
self.authority_cert_serial_number = module.params['authority_cert_serial_number']
self.request = None
self.privatekey = None
self.csr_bytes = None
self.return_content = module.params['return_content']
if self.create_subject_key_identifier and self.subject_key_identifier is not None:
module.fail_json(msg='subject_key_identifier cannot be specified if create_subject_key_identifier is true')
self.backup = module.params['backup']
self.backup_file = None
self.subject = [
('C', module.params['country_name']),
('ST', module.params['state_or_province_name']),
('L', module.params['locality_name']),
('O', module.params['organization_name']),
('OU', module.params['organizational_unit_name']),
('CN', module.params['common_name']),
('emailAddress', module.params['email_address']),
]
if module.params['subject']:
self.subject = self.subject + parse_name_field(module.params['subject'])
self.subject = [(entry[0], entry[1]) for entry in self.subject if entry[1]]
self.using_common_name_for_san = False
if not self.subjectAltName and module.params['use_common_name_for_san']:
for sub in self.subject:
if sub[0] in ('commonName', 'CN'):
self.subjectAltName = ['DNS:%s' % sub[1]]
self.using_common_name_for_san = True
break
if self.subject_key_identifier is not None:
try:
self.subject_key_identifier = binascii.unhexlify(self.subject_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse subject_key_identifier: {0}'.format(e))
if self.authority_key_identifier is not None:
try:
self.authority_key_identifier = binascii.unhexlify(self.authority_key_identifier.replace(':', ''))
except Exception as e:
raise CertificateSigningRequestError('Cannot parse authority_key_identifier: {0}'.format(e))
@abc.abstractmethod
def _generate_csr(self):
pass
def generate(self, module):
'''Generate the certificate signing request.'''
if not self.check(module, perms_required=False) or self.force:
result = self._generate_csr()
if self.backup:
self.backup_file = module.backup_local(self.path)
if self.return_content:
self.csr_bytes = result
write_file(module, result)
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
@abc.abstractmethod
def _load_private_key(self):
pass
@abc.abstractmethod
def _check_csr(self):
pass
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CertificateSigningRequestBase, self).check(module, perms_required)
self._load_private_key()
if not state_and_perms:
return False
return self._check_csr()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(CertificateSigningRequestBase, self).remove(module)
def dump(self):
'''Serialize the object into a dictionary.'''
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'subject': self.subject,
'subjectAltName': self.subjectAltName,
'keyUsage': self.keyUsage,
'extendedKeyUsage': self.extendedKeyUsage,
'basicConstraints': self.basicConstraints,
'ocspMustStaple': self.ocspMustStaple,
'changed': self.changed,
'name_constraints_permitted': self.name_constraints_permitted,
'name_constraints_excluded': self.name_constraints_excluded,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.csr_bytes is None:
self.csr_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['csr'] = self.csr_bytes.decode('utf-8') if self.csr_bytes else None
return result
class CertificateSigningRequestPyOpenSSL(CertificateSigningRequestBase):
def __init__(self, module):
if module.params['create_subject_key_identifier']:
module.fail_json(msg='You cannot use create_subject_key_identifier with the pyOpenSSL backend!')
for o in ('subject_key_identifier', 'authority_key_identifier', 'authority_cert_issuer', 'authority_cert_serial_number'):
if module.params[o] is not None:
module.fail_json(msg='You cannot use {0} with the pyOpenSSL backend!'.format(o))
super(CertificateSigningRequestPyOpenSSL, self).__init__(module)
def _generate_csr(self):
req = crypto.X509Req()
req.set_version(self.version - 1)
subject = req.get_subject()
for entry in self.subject:
if entry[1] is not None:
# Workaround for https://github.com/pyca/pyopenssl/issues/165
nid = OpenSSL._util.lib.OBJ_txt2nid(to_bytes(entry[0]))
if nid == 0:
raise CertificateSigningRequestError('Unknown subject field identifier "{0}"'.format(entry[0]))
res = OpenSSL._util.lib.X509_NAME_add_entry_by_NID(subject._name, nid, OpenSSL._util.lib.MBSTRING_UTF8, to_bytes(entry[1]), -1, -1, 0)
if res == 0:
raise CertificateSigningRequestError('Invalid value for subject field identifier "{0}": {1}'.format(entry[0], entry[1]))
extensions = []
if self.subjectAltName:
altnames = ', '.join(self.subjectAltName)
try:
extensions.append(crypto.X509Extension(b"subjectAltName", self.subjectAltName_critical, altnames.encode('ascii')))
except OpenSSL.crypto.Error as e:
raise CertificateSigningRequestError(
'Error while parsing Subject Alternative Names {0} (check for missing type prefix, such as "DNS:"!): {1}'.format(
', '.join(["{0}".format(san) for san in self.subjectAltName]), str(e)
)
)
if self.keyUsage:
usages = ', '.join(self.keyUsage)
extensions.append(crypto.X509Extension(b"keyUsage", self.keyUsage_critical, usages.encode('ascii')))
if self.extendedKeyUsage:
usages = ', '.join(self.extendedKeyUsage)
extensions.append(crypto.X509Extension(b"extendedKeyUsage", self.extendedKeyUsage_critical, usages.encode('ascii')))
if self.basicConstraints:
usages = ', '.join(self.basicConstraints)
extensions.append(crypto.X509Extension(b"basicConstraints", self.basicConstraints_critical, usages.encode('ascii')))
if self.name_constraints_permitted or self.name_constraints_excluded:
usages = ', '.join(
['permitted;{0}'.format(name) for name in self.name_constraints_permitted] +
['excluded;{0}'.format(name) for name in self.name_constraints_excluded]
)
extensions.append(crypto.X509Extension(b"nameConstraints", self.name_constraints_critical, usages.encode('ascii')))
if self.ocspMustStaple:
extensions.append(crypto.X509Extension(OPENSSL_MUST_STAPLE_NAME, self.ocspMustStaple_critical, OPENSSL_MUST_STAPLE_VALUE))
if extensions:
req.add_extensions(extensions)
req.set_pubkey(self.privatekey)
req.sign(self.privatekey, self.digest)
self.request = req
return crypto.dump_certificate_request(crypto.FILETYPE_PEM, self.request)
def _load_private_key(self):
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
raise CertificateSigningRequestError(exc)
def _check_csr(self):
def _check_subject(csr):
subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in self.subject]
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(to_bytes(sub[0])), to_bytes(sub[1])) for sub in csr.get_subject().get_components()]
if not set(subject) == set(current_subject):
return False
return True
def _check_subjectAltName(extensions):
altnames_ext = next((ext for ext in extensions if ext.get_short_name() == b'subjectAltName'), '')
altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(altnames_ext, errors='surrogate_or_strict').split(',') if altname.strip()]
if self.subjectAltName:
if (set(altnames) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.subjectAltName]) or
altnames_ext.get_critical() != self.subjectAltName_critical):
return False
else:
if altnames:
return False
return True
def _check_keyUsage_(extensions, extName, expected, critical):
usages_ext = [ext for ext in extensions if ext.get_short_name() == extName]
if (not usages_ext and expected) or (usages_ext and not expected):
return False
elif not usages_ext and not expected:
return True
else:
current = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage.strip())) for usage in str(usages_ext[0]).split(',')]
expected = [OpenSSL._util.lib.OBJ_txt2nid(to_bytes(usage)) for usage in expected]
return set(current) == set(expected) and usages_ext[0].get_critical() == critical
def _check_keyUsage(extensions):
usages_ext = [ext for ext in extensions if ext.get_short_name() == b'keyUsage']
if (not usages_ext and self.keyUsage) or (usages_ext and not self.keyUsage):
return False
elif not usages_ext and not self.keyUsage:
return True
else:
# OpenSSL._util.lib.OBJ_txt2nid() always returns 0 for all keyUsage values
# (since keyUsage has a fixed bitfield for these values and is not extensible).
# Therefore, we create an extension for the wanted values, and compare the
# data of the extensions (which is the serialized bitfield).
expected_ext = crypto.X509Extension(b"keyUsage", False, ', '.join(self.keyUsage).encode('ascii'))
return usages_ext[0].get_data() == expected_ext.get_data() and usages_ext[0].get_critical() == self.keyUsage_critical
def _check_extenededKeyUsage(extensions):
return _check_keyUsage_(extensions, b'extendedKeyUsage', self.extendedKeyUsage, self.extendedKeyUsage_critical)
def _check_basicConstraints(extensions):
return _check_keyUsage_(extensions, b'basicConstraints', self.basicConstraints, self.basicConstraints_critical)
def _check_nameConstraints(extensions):
nc_ext = next((ext for ext in extensions if ext.get_short_name() == b'nameConstraints'), '')
permitted, excluded = pyopenssl_parse_name_constraints(nc_ext)
if self.name_constraints_permitted or self.name_constraints_excluded:
if set(permitted) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_permitted]):
return False
if set(excluded) != set([pyopenssl_normalize_name_attribute(to_text(name)) for name in self.name_constraints_excluded]):
return False
if nc_ext.get_critical() != self.name_constraints_critical:
return False
else:
if permitted or excluded:
return False
return True
def _check_ocspMustStaple(extensions):
oms_ext = [ext for ext in extensions if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE]
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
# Older versions of libssl don't know about OCSP Must Staple
oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
if self.ocspMustStaple:
return len(oms_ext) > 0 and oms_ext[0].get_critical() == self.ocspMustStaple_critical
else:
return len(oms_ext) == 0
def _check_extensions(csr):
extensions = csr.get_extensions()
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
try:
return csr.verify(self.privatekey)
except crypto.Error:
return False
try:
csr = load_certificate_request(self.path, backend='pyopenssl')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
class CertificateSigningRequestCryptography(CertificateSigningRequestBase):
def __init__(self, module):
super(CertificateSigningRequestCryptography, self).__init__(module)
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.module = module
if self.version != 1:
module.warn('The cryptography backend only supports version 1. (The only valid value according to RFC 2986.)')
def _generate_csr(self):
csr = cryptography.x509.CertificateSigningRequestBuilder()
try:
csr = csr.subject_name(cryptography.x509.Name([
cryptography.x509.NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1])) for entry in self.subject
]))
except ValueError as e:
raise CertificateSigningRequestError(e)
if self.subjectAltName:
csr = csr.add_extension(cryptography.x509.SubjectAlternativeName([
cryptography_get_name(name) for name in self.subjectAltName
]), critical=self.subjectAltName_critical)
if self.keyUsage:
params = cryptography_parse_key_usage_params(self.keyUsage)
csr = csr.add_extension(cryptography.x509.KeyUsage(**params), critical=self.keyUsage_critical)
if self.extendedKeyUsage:
usages = [cryptography_name_to_oid(usage) for usage in self.extendedKeyUsage]
csr = csr.add_extension(cryptography.x509.ExtendedKeyUsage(usages), critical=self.extendedKeyUsage_critical)
if self.basicConstraints:
params = {}
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
csr = csr.add_extension(cryptography.x509.BasicConstraints(ca, path_length), critical=self.basicConstraints_critical)
if self.ocspMustStaple:
try:
# This only works with cryptography >= 2.1
csr = csr.add_extension(cryptography.x509.TLSFeature([cryptography.x509.TLSFeatureType.status_request]), critical=self.ocspMustStaple_critical)
except AttributeError as dummy:
csr = csr.add_extension(
cryptography.x509.UnrecognizedExtension(CRYPTOGRAPHY_MUST_STAPLE_NAME, CRYPTOGRAPHY_MUST_STAPLE_VALUE),
critical=self.ocspMustStaple_critical
)
if self.name_constraints_permitted or self.name_constraints_excluded:
try:
csr = csr.add_extension(cryptography.x509.NameConstraints(
[cryptography_get_name(name) for name in self.name_constraints_permitted],
[cryptography_get_name(name) for name in self.name_constraints_excluded],
), critical=self.name_constraints_critical)
except TypeError as e:
raise OpenSSLObjectError('Error while parsing name constraint: {0}'.format(e))
if self.create_subject_key_identifier:
csr = csr.add_extension(
cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
elif self.subject_key_identifier is not None:
csr = csr.add_extension(cryptography.x509.SubjectKeyIdentifier(self.subject_key_identifier), critical=False)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
issuers = None
if self.authority_cert_issuer is not None:
issuers = [cryptography_get_name(n) for n in self.authority_cert_issuer]
csr = csr.add_extension(
cryptography.x509.AuthorityKeyIdentifier(self.authority_key_identifier, issuers, self.authority_cert_serial_number),
critical=False
)
digest = None
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest == 'sha256':
digest = cryptography.hazmat.primitives.hashes.SHA256()
elif self.digest == 'sha384':
digest = cryptography.hazmat.primitives.hashes.SHA384()
elif self.digest == 'sha512':
digest = cryptography.hazmat.primitives.hashes.SHA512()
elif self.digest == 'sha1':
digest = cryptography.hazmat.primitives.hashes.SHA1()
elif self.digest == 'md5':
digest = cryptography.hazmat.primitives.hashes.MD5()
# FIXME
else:
raise CertificateSigningRequestError('Unsupported digest "{0}"'.format(self.digest))
try:
self.request = csr.sign(self.privatekey, digest, self.cryptography_backend)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and digest is None:
self.module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
except UnicodeError as e:
# This catches IDNAErrors, which happens when a bad name is passed as a SAN
# (https://github.com/ansible-collections/community.crypto/issues/105).
# For older cryptography versions, this is handled by idna, which raises
# an idna.core.IDNAError. Later versions of cryptography deprecated and stopped
# requiring idna, whence we cannot easily handle this error. Fortunately, in
# most versions of idna, IDNAError extends UnicodeError. There is only version
# 2.3 where it extends Exception instead (see
# https://github.com/kjd/idna/commit/ebefacd3134d0f5da4745878620a6a1cba86d130
# and then
# https://github.com/kjd/idna/commit/ea03c7b5db7d2a99af082e0239da2b68aeea702a).
msg = 'Error while creating CSR: {0}\n'.format(e)
if self.using_common_name_for_san:
self.module.fail_json(msg=msg + 'This is probably caused because the Common Name is used as a SAN.'
' Specifying use_common_name_for_san=false might fix this.')
self.module.fail_json(msg=msg + 'This is probably caused by an invalid Subject Alternative DNS Name.')
return self.request.public_bytes(cryptography.hazmat.primitives.serialization.Encoding.PEM)
def _load_private_key(self):
try:
if self.privatekey_content is not None:
content = self.privatekey_content
else:
with open(self.privatekey_path, 'rb') as f:
content = f.read()
self.privatekey = cryptography.hazmat.primitives.serialization.load_pem_private_key(
content,
None if self.privatekey_passphrase is None else to_bytes(self.privatekey_passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise CertificateSigningRequestError(e)
def _check_csr(self):
def _check_subject(csr):
subject = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.subject]
current_subject = [(sub.oid, sub.value) for sub in csr.subject]
return set(subject) == set(current_subject)
def _find_extension(extensions, exttype):
return next(
(ext for ext in extensions if isinstance(ext.value, exttype)),
None
)
def _check_subjectAltName(extensions):
current_altnames_ext = _find_extension(extensions, cryptography.x509.SubjectAlternativeName)
current_altnames = [str(altname) for altname in current_altnames_ext.value] if current_altnames_ext else []
altnames = [str(cryptography_get_name(altname)) for altname in self.subjectAltName] if self.subjectAltName else []
if set(altnames) != set(current_altnames):
return False
if altnames:
if current_altnames_ext.critical != self.subjectAltName_critical:
return False
return True
def _check_keyUsage(extensions):
current_keyusage_ext = _find_extension(extensions, cryptography.x509.KeyUsage)
if not self.keyUsage:
return current_keyusage_ext is None
elif current_keyusage_ext is None:
return False
params = cryptography_parse_key_usage_params(self.keyUsage)
for param in params:
if getattr(current_keyusage_ext.value, '_' + param) != params[param]:
return False
if current_keyusage_ext.critical != self.keyUsage_critical:
return False
return True
def _check_extenededKeyUsage(extensions):
current_usages_ext = _find_extension(extensions, cryptography.x509.ExtendedKeyUsage)
current_usages = [str(usage) for usage in current_usages_ext.value] if current_usages_ext else []
usages = [str(cryptography_name_to_oid(usage)) for usage in self.extendedKeyUsage] if self.extendedKeyUsage else []
if set(current_usages) != set(usages):
return False
if usages:
if current_usages_ext.critical != self.extendedKeyUsage_critical:
return False
return True
def _check_basicConstraints(extensions):
bc_ext = _find_extension(extensions, cryptography.x509.BasicConstraints)
current_ca = bc_ext.value.ca if bc_ext else False
current_path_length = bc_ext.value.path_length if bc_ext else None
ca, path_length = cryptography_get_basic_constraints(self.basicConstraints)
# Check CA flag
if ca != current_ca:
return False
# Check path length
if path_length != current_path_length:
return False
# Check criticality
if self.basicConstraints:
if bc_ext.critical != self.basicConstraints_critical:
return False
return True
def _check_ocspMustStaple(extensions):
try:
# This only works with cryptography >= 2.1
tlsfeature_ext = _find_extension(extensions, cryptography.x509.TLSFeature)
has_tlsfeature = True
except AttributeError as dummy:
tlsfeature_ext = next(
(ext for ext in extensions if ext.value.oid == CRYPTOGRAPHY_MUST_STAPLE_NAME),
None
)
has_tlsfeature = False
if self.ocspMustStaple:
if not tlsfeature_ext or tlsfeature_ext.critical != self.ocspMustStaple_critical:
return False
if has_tlsfeature:
return cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
else:
return tlsfeature_ext.value.value == CRYPTOGRAPHY_MUST_STAPLE_VALUE
else:
return tlsfeature_ext is None
def _check_nameConstraints(extensions):
current_nc_ext = _find_extension(extensions, cryptography.x509.NameConstraints)
current_nc_perm = [str(altname) for altname in current_nc_ext.value.permitted_subtrees] if current_nc_ext else []
current_nc_excl = [str(altname) for altname in current_nc_ext.value.excluded_subtrees] if current_nc_ext else []
nc_perm = [str(cryptography_get_name(altname)) for altname in self.name_constraints_permitted]
nc_excl = [str(cryptography_get_name(altname)) for altname in self.name_constraints_excluded]
if set(nc_perm) != set(current_nc_perm) or set(nc_excl) != set(current_nc_excl):
return False
if nc_perm or nc_excl:
if current_nc_ext.critical != self.name_constraints_critical:
return False
return True
def _check_subject_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.SubjectKeyIdentifier)
if self.create_subject_key_identifier or self.subject_key_identifier is not None:
if not ext or ext.critical:
return False
if self.create_subject_key_identifier:
digest = cryptography.x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()).digest
return ext.value.digest == digest
else:
return ext.value.digest == self.subject_key_identifier
else:
return ext is None
def _check_authority_key_identifier(extensions):
ext = _find_extension(extensions, cryptography.x509.AuthorityKeyIdentifier)
if self.authority_key_identifier is not None or self.authority_cert_issuer is not None or self.authority_cert_serial_number is not None:
if not ext or ext.critical:
return False
aci = None
csr_aci = None
if self.authority_cert_issuer is not None:
aci = [str(cryptography_get_name(n)) for n in self.authority_cert_issuer]
if ext.value.authority_cert_issuer is not None:
csr_aci = [str(n) for n in ext.value.authority_cert_issuer]
return (ext.value.key_identifier == self.authority_key_identifier
and csr_aci == aci
and ext.value.authority_cert_serial_number == self.authority_cert_serial_number)
else:
return ext is None
def _check_extensions(csr):
extensions = csr.extensions
return (_check_subjectAltName(extensions) and _check_keyUsage(extensions) and
_check_extenededKeyUsage(extensions) and _check_basicConstraints(extensions) and
_check_ocspMustStaple(extensions) and _check_subject_key_identifier(extensions) and
_check_authority_key_identifier(extensions) and _check_nameConstraints(extensions))
def _check_signature(csr):
if not csr.is_signature_valid:
return False
# To check whether public key of CSR belongs to private key,
# encode both public keys and compare PEMs.
key_a = csr.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
key_b = self.privatekey.public_key().public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.PEM,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
return key_a == key_b
try:
csr = load_certificate_request(self.path, backend='cryptography')
except Exception as dummy:
return False
return _check_subject(csr) and _check_extensions(csr) and _check_signature(csr)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['absent', 'present']),
digest=dict(type='str', default='sha256'),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
version=dict(type='int', default=1),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
subject=dict(type='dict'),
country_name=dict(type='str', aliases=['C', 'countryName']),
state_or_province_name=dict(type='str', aliases=['ST', 'stateOrProvinceName']),
locality_name=dict(type='str', aliases=['L', 'localityName']),
organization_name=dict(type='str', aliases=['O', 'organizationName']),
organizational_unit_name=dict(type='str', aliases=['OU', 'organizationalUnitName']),
common_name=dict(type='str', aliases=['CN', 'commonName']),
email_address=dict(type='str', aliases=['E', 'emailAddress']),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName']),
subject_alt_name_critical=dict(type='bool', default=False, aliases=['subjectAltName_critical']),
use_common_name_for_san=dict(type='bool', default=True, aliases=['useCommonNameForSAN']),
key_usage=dict(type='list', elements='str', aliases=['keyUsage']),
key_usage_critical=dict(type='bool', default=False, aliases=['keyUsage_critical']),
extended_key_usage=dict(type='list', elements='str', aliases=['extKeyUsage', 'extendedKeyUsage']),
extended_key_usage_critical=dict(type='bool', default=False, aliases=['extKeyUsage_critical', 'extendedKeyUsage_critical']),
basic_constraints=dict(type='list', elements='str', aliases=['basicConstraints']),
basic_constraints_critical=dict(type='bool', default=False, aliases=['basicConstraints_critical']),
ocsp_must_staple=dict(type='bool', default=False, aliases=['ocspMustStaple']),
ocsp_must_staple_critical=dict(type='bool', default=False, aliases=['ocspMustStaple_critical']),
name_constraints_permitted=dict(type='list', elements='str'),
name_constraints_excluded=dict(type='list', elements='str'),
name_constraints_critical=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
create_subject_key_identifier=dict(type='bool', default=False),
subject_key_identifier=dict(type='str'),
authority_key_identifier=dict(type='str'),
authority_cert_issuer=dict(type='list', elements='str'),
authority_cert_serial_number=dict(type='int'),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
),
required_together=[('authority_cert_issuer', 'authority_cert_serial_number')],
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
add_file_common_args=True,
supports_check_mode=True,
)
if module.params['version'] != 1:
module.deprecate('The version option will only support allowed values from community.crypto 2.0.0 on. '
'Currently, only the value 1 is allowed by RFC 2986',
version='2.0.0', collection_name='community.crypto')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(name=base_dir, msg='The directory %s does not exist or the file is not a directory' % base_dir)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15 to generate CSRs')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
csr = CertificateSigningRequestPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
csr = CertificateSigningRequestCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = csr.dump()
result['changed'] = module.params['force'] or not csr.check(module)
module.exit_json(**result)
csr.generate(module)
else:
if module.check_mode:
result = csr.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
csr.remove(module)
result = csr.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
4295_1
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey
short_description: Generate OpenSSL private keys
description:
- This module allows one to (re)generate OpenSSL private keys.
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29),
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
- Keys are generated in PEM format.
- "Please note that the module regenerates private keys if they don't match
the module's options. In particular, if you provide another passphrase
(or specify none), change the keysize, etc., the private key will be
regenerated. If you are concerned that this could **overwrite your private key**,
consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
size:
description:
- Size (in bits) of the TLS/SSL key to generate.
type: int
default: 4096
type:
description:
- The algorithm used to generate the TLS/SSL private key.
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
I(curve) option.
type: str
default: RSA
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
curve:
description:
- Note that not all curves are supported by all versions of C(cryptography).
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
- We use the curve names as defined in the
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
type: str
choices:
- secp384r1
- secp521r1
- secp224r1
- secp192r1
- secp256r1
- secp256k1
- brainpoolP256r1
- brainpoolP384r1
- brainpoolP512r1
- sect571k1
- sect409k1
- sect283k1
- sect233k1
- sect163k1
- sect571r1
- sect409r1
- sect283r1
- sect233r1
- sect163r2
force:
description:
- Should the key be regenerated even if it already exists.
type: bool
default: no
path:
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
cipher:
description:
- The cipher to encrypt the private key. (Valid values can be found by
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
depending on your OpenSSL version.)
- When using the C(cryptography) backend, use C(auto).
type: str
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
format:
description:
- Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
is used for all keys which support it. Please note that not every key can be exported in any format.
- The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
but for existing private key files, it will not force a regenerate when its format is not the automatically
selected one for generation.
- Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
To change this behavior, use the I(format_mismatch) option.
- The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
fail if a value different from C(auto_ignore) is used.
type: str
default: auto_ignore
choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
version_added: '1.0.0'
format_mismatch:
description:
- Determines behavior of the module if the format of a private key does not match the expected format, but all
other parameters are as expected.
- If set to C(regenerate) (default), generates a new private key.
- If set to C(convert), the key will be converted to the new format instead.
- Only supported by the C(cryptography) backend.
type: str
default: regenerate
choices: [ regenerate, convert ]
version_added: '1.0.0'
backup:
description:
- Create a backup file including a timestamp so you can get
the original private key back if you overwrote it with a new one by accident.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) private key's content as I(privatekey).
- Note that especially if the private key is not encrypted, you have to make sure that the returned
value is treated appropriately and not accidentally written to logs etc.! Use with care!
type: bool
default: no
version_added: '1.0.0'
regenerate:
description:
- Allows to configure in which situations the module is allowed to regenerate private keys.
The module will always generate a new key if the destination file does not exist.
- By default, the key will be regenerated when it doesn't match the module's options,
except when the key cannot be read or the passphrase does not match. Please note that
this B(changed) for Ansible 2.10. For Ansible 2.9, the behavior was as if C(full_idempotence)
is specified.
- If set to C(never), the module will fail if the key cannot be read or the passphrase
isn't matching, and will never regenerate an existing key.
- If set to C(fail), the module will fail if the key does not correspond to the module's
options.
- If set to C(partial_idempotence), the key will be regenerated if it does not conform to
the module's options. The key is B(not) regenerated if it cannot be read (broken file),
the key is protected by an unknown passphrase, or when they key is not protected by a
passphrase, but a passphrase is specified.
- If set to C(full_idempotence), the key will be regenerated if it does not conform to the
module's options. This is also the case if the key cannot be read (broken file), the key
is protected by an unknown passphrase, or when they key is not protected by a passphrase,
but a passphrase is specified. Make sure you have a B(backup) when using this option!
- If set to C(always), the module will always regenerate the key. This is equivalent to
setting I(force) to C(yes).
- Note that if I(format_mismatch) is set to C(convert) and everything matches except the
format, the key will always be converted, except if I(regenerate) is set to C(always).
type: str
choices:
- never
- fail
- partial_idempotence
- full_idempotence
- always
default: full_idempotence
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
- name: Generate an OpenSSL private key with a different size (2048 bits)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
- name: Force regenerate an OpenSSL private key if it already exists
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Generate an OpenSSL private key with a different algorithm (DSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = r'''
size:
description: Size (in bits) of the TLS/SSL private key.
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key.
returned: changed or success
type: str
sample: RSA
curve:
description: Elliptic curve used to generate the TLS/SSL private key.
returned: changed or success, and I(type) is C(ECC)
type: str
sample: secp256r1
filename:
description: Path to the generated TLS/SSL private key file.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/privatekey.pem.2019-03-09@11:22~
privatekey:
description:
- The (current or generated) private key's content.
- Will be Base64-encoded if the key is in raw format.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X25519_FULL,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_private_key_format,
)
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PrivateKeyError(OpenSSLObjectError):
pass
class PrivateKeyBase(OpenSSLObject):
def __init__(self, module):
super(PrivateKeyBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.format = module.params['format']
self.format_mismatch = module.params['format_mismatch']
self.privatekey_bytes = None
self.return_content = module.params['return_content']
self.regenerate = module.params['regenerate']
if self.regenerate == 'always':
self.force = True
self.backup = module.params['backup']
self.backup_file = None
if module.params['mode'] is None:
module.params['mode'] = '0600'
@abc.abstractmethod
def _generate_private_key(self):
"""(Re-)Generate private key."""
pass
@abc.abstractmethod
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
pass
@abc.abstractmethod
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
pass
@abc.abstractmethod
def _get_fingerprint(self):
pass
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
# Regenerate
if self.backup:
self.backup_file = module.backup_local(self.path)
self._generate_private_key()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
elif not self.check(module, perms_required=False, ignore_conversion=False):
# Convert
if self.backup:
self.backup_file = module.backup_local(self.path)
self._ensure_private_key_loaded()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
self.fingerprint = self._get_fingerprint()
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PrivateKeyBase, self).remove(module)
@abc.abstractmethod
def _check_passphrase(self):
pass
@abc.abstractmethod
def _check_size_and_type(self):
pass
@abc.abstractmethod
def _check_format(self):
pass
def check(self, module, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required=False)
if not state_and_perms:
# key does not exist
return False
if not self._check_passphrase():
if self.regenerate in ('full_idempotence', 'always'):
return False
module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `full_idempotence` or `always`, or with `force=yes`.')
if self.regenerate != 'never':
if not self._check_size_and_type():
if self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong type and/or size.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
if not self._check_format():
# During conversion step, convert if format does not match and format_mismatch == 'convert'
if not ignore_conversion and self.format_mismatch == 'convert':
return False
# During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
if ignore_conversion and self.format_mismatch == 'regenerate' and self.regenerate != 'never':
if not ignore_conversion or self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong format.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
' To convert the key, set `format_mismatch` to `convert`.')
# check whether permissions are correct (in case that needs to be checked)
return not perms_required or super(PrivateKeyBase, self).check(module, perms_required=perms_required)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.privatekey_bytes is None:
self.privatekey_bytes = load_file_if_exists(self.path, ignore_errors=True)
if self.privatekey_bytes:
if identify_private_key_format(self.privatekey_bytes) == 'raw':
result['privatekey'] = base64.b64encode(self.privatekey_bytes)
else:
result['privatekey'] = self.privatekey_bytes.decode('utf-8')
else:
result['privatekey'] = None
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSL(PrivateKeyBase):
def __init__(self, module):
super(PrivateKeyPyOpenSSL, self).__init__(module)
if module.params['type'] == 'RSA':
self.type = crypto.TYPE_RSA
elif module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
else:
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
if self.format != 'auto_ignore':
module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
def _generate_private_key(self):
"""(Re-)Generate private key."""
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
try:
self.privatekey = privatekey = load_privatekey(self.path, self.passphrase)
except OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
def _get_fingerprint(self):
return get_fingerprint(self.path, self.passphrase)
def _check_passphrase(self):
try:
load_privatekey(self.path, self.passphrase)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
self._ensure_private_key_loaded()
return _check_size(self.privatekey) and _check_type(self.privatekey)
def _check_format(self):
# Not supported by this backend
return True
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyPyOpenSSL, self).dump()
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
# Implementation with using cryptography
class PrivateKeyCryptography(PrivateKeyBase):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptography, self).__init__(module)
self.curves = dict()
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp192r1', 'SECP192R1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self.module = module
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.type = module.params['type']
self.curve = module.params['curve']
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _get_wanted_format(self):
if self.format not in ('auto', 'auto_ignore'):
return self.format
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
return 'pkcs8'
else:
return 'pkcs1'
def _generate_private_key(self):
"""(Re-)Generate private key."""
try:
if self.type == 'RSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
self.privatekey = self._load_privatekey()
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
# Select export format and encoding
try:
export_format = self._get_wanted_format()
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
if export_format == 'pkcs1':
# "TraditionalOpenSSL" format is PKCS1
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
elif export_format == 'pkcs8':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
elif export_format == 'raw':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
except AttributeError:
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
try:
return self.privatekey.private_bytes(
encoding=export_encoding,
format=export_format,
encryption_algorithm=encryption_algorithm
)
except ValueError as dummy:
self.module.fail_json(
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
)
except Exception as dummy:
self.module.fail_json(
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
exception=traceback.format_exc()
)
def _load_privatekey(self):
try:
# Read bytes
with open(self.path, 'rb') as f:
data = f.read()
# Interpret bytes depending on format.
format = identify_private_key_format(data)
if format == 'raw':
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
if len(data) == 32:
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
try:
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
except Exception:
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
raise PrivateKeyError('Cannot load raw key')
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _get_fingerprint(self):
# Get bytes of public key
private_key = self._load_privatekey()
public_key = private_key.public_key()
public_key_bytes = public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
# Get fingerprints of public_key_bytes
return get_fingerprint_of_bytes(public_key_bytes)
def _check_passphrase(self):
try:
with open(self.path, 'rb') as f:
data = f.read()
format = identify_private_key_format(data)
if format == 'raw':
# Raw keys cannot be encrypted. To avoid incompatibilities, we try to
# actually load the key (and return False when this fails).
self._load_privatekey()
# Loading the key succeeded. Only return True when no passphrase was
# provided.
return self.passphrase is None
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as dummy:
return False
def _check_size_and_type(self):
self._ensure_private_key_loaded()
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == self.privatekey.key_size
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == self.privatekey.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](self.privatekey)
return False
def _check_format(self):
if self.format == 'auto_ignore':
return True
try:
with open(self.path, 'rb') as f:
content = f.read()
format = identify_private_key_format(content)
return format == self._get_wanted_format()
except Exception as dummy:
return False
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyCryptography, self).dump()
result['type'] = self.type
if self.type == 'ECC':
result['curve'] = self.curve
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
]),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
backup=dict(type='bool', default=False),
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
regenerate=dict(
type='str',
default='full_idempotence',
choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
private_key = PrivateKeyPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
private_key = PrivateKeyCryptography(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = private_key.force \
or not private_key.check(module, ignore_conversion=True) \
or not private_key.check(module, ignore_conversion=False)
module.exit_json(**result)
private_key.generate(module)
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
private_key.remove(module)
result = private_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey
short_description: Generate OpenSSL private keys
description:
- This module allows one to (re)generate OpenSSL private keys.
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29),
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
- Keys are generated in PEM format.
- "Please note that the module regenerates private keys if they don't match
the module's options. In particular, if you provide another passphrase
(or specify none), change the keysize, etc., the private key will be
regenerated. If you are concerned that this could **overwrite your private key**,
consider using the I(backup) option."
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the private key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
size:
description:
- Size (in bits) of the TLS/SSL key to generate.
type: int
default: 4096
type:
description:
- The algorithm used to generate the TLS/SSL private key.
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
I(curve) option.
type: str
default: RSA
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
curve:
description:
- Note that not all curves are supported by all versions of C(cryptography).
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
- We use the curve names as defined in the
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
type: str
choices:
- secp384r1
- secp521r1
- secp224r1
- secp192r1
- secp256r1
- secp256k1
- brainpoolP256r1
- brainpoolP384r1
- brainpoolP512r1
- sect571k1
- sect409k1
- sect283k1
- sect233k1
- sect163k1
- sect571r1
- sect409r1
- sect283r1
- sect233r1
- sect163r2
force:
description:
- Should the key be regenerated even if it already exists.
type: bool
default: no
path:
description:
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
type: path
required: true
passphrase:
description:
- The passphrase for the private key.
type: str
cipher:
description:
- The cipher to encrypt the private key. (Valid values can be found by
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
depending on your OpenSSL version.)
- When using the C(cryptography) backend, use C(auto).
type: str
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
format:
description:
- Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
is used for all keys which support it. Please note that not every key can be exported in any format.
- The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
but for existing private key files, it will not force a regenerate when its format is not the automatically
selected one for generation.
- Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
To change this behavior, use the I(format_mismatch) option.
- The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
fail if a value different from C(auto_ignore) is used.
type: str
default: auto_ignore
choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
version_added: '1.0.0'
format_mismatch:
description:
- Determines behavior of the module if the format of a private key does not match the expected format, but all
other parameters are as expected.
- If set to C(regenerate) (default), generates a new private key.
- If set to C(convert), the key will be converted to the new format instead.
- Only supported by the C(cryptography) backend.
type: str
default: regenerate
choices: [ regenerate, convert ]
version_added: '1.0.0'
backup:
description:
- Create a backup file including a timestamp so you can get
the original private key back if you overwrote it with a new one by accident.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) private key's content as I(privatekey).
- Note that especially if the private key is not encrypted, you have to make sure that the returned
value is treated appropriately and not accidentally written to logs etc.! Use with care!
- Use Ansible's I(no_log) task option to avoid the output being shown. See also
U(https://docs.ansible.com/ansible/latest/reference_appendices/faq.html#how-do-i-keep-secret-data-in-my-playbook).
type: bool
default: no
version_added: '1.0.0'
regenerate:
description:
- Allows to configure in which situations the module is allowed to regenerate private keys.
The module will always generate a new key if the destination file does not exist.
- By default, the key will be regenerated when it doesn't match the module's options,
except when the key cannot be read or the passphrase does not match. Please note that
this B(changed) for Ansible 2.10. For Ansible 2.9, the behavior was as if C(full_idempotence)
is specified.
- If set to C(never), the module will fail if the key cannot be read or the passphrase
isn't matching, and will never regenerate an existing key.
- If set to C(fail), the module will fail if the key does not correspond to the module's
options.
- If set to C(partial_idempotence), the key will be regenerated if it does not conform to
the module's options. The key is B(not) regenerated if it cannot be read (broken file),
the key is protected by an unknown passphrase, or when they key is not protected by a
passphrase, but a passphrase is specified.
- If set to C(full_idempotence), the key will be regenerated if it does not conform to the
module's options. This is also the case if the key cannot be read (broken file), the key
is protected by an unknown passphrase, or when they key is not protected by a passphrase,
but a passphrase is specified. Make sure you have a B(backup) when using this option!
- If set to C(always), the module will always regenerate the key. This is equivalent to
setting I(force) to C(yes).
- Note that if I(format_mismatch) is set to C(convert) and everything matches except the
format, the key will always be converted, except if I(regenerate) is set to C(always).
type: str
choices:
- never
- fail
- partial_idempotence
- full_idempotence
- always
default: full_idempotence
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
passphrase: ansible
cipher: aes256
- name: Generate an OpenSSL private key with a different size (2048 bits)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
size: 2048
- name: Force regenerate an OpenSSL private key if it already exists
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Generate an OpenSSL private key with a different algorithm (DSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
type: DSA
'''
RETURN = r'''
size:
description: Size (in bits) of the TLS/SSL private key.
returned: changed or success
type: int
sample: 4096
type:
description: Algorithm used to generate the TLS/SSL private key.
returned: changed or success
type: str
sample: RSA
curve:
description: Elliptic curve used to generate the TLS/SSL private key.
returned: changed or success, and I(type) is C(ECC)
type: str
sample: secp256r1
filename:
description: Path to the generated TLS/SSL private key file.
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/privatekey.pem.2019-03-09@11:22~
privatekey:
description:
- The (current or generated) private key's content.
- Will be Base64-encoded if the key is in raw format.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X25519_FULL,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_private_key_format,
)
MINIMAL_PYOPENSSL_VERSION = '0.6'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.exceptions
import cryptography.hazmat.backends
import cryptography.hazmat.primitives.serialization
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.asymmetric.dsa
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.utils
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PrivateKeyError(OpenSSLObjectError):
pass
class PrivateKeyBase(OpenSSLObject):
def __init__(self, module):
super(PrivateKeyBase, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.size = module.params['size']
self.passphrase = module.params['passphrase']
self.cipher = module.params['cipher']
self.privatekey = None
self.fingerprint = {}
self.format = module.params['format']
self.format_mismatch = module.params['format_mismatch']
self.privatekey_bytes = None
self.return_content = module.params['return_content']
self.regenerate = module.params['regenerate']
if self.regenerate == 'always':
self.force = True
self.backup = module.params['backup']
self.backup_file = None
if module.params['mode'] is None:
module.params['mode'] = '0600'
@abc.abstractmethod
def _generate_private_key(self):
"""(Re-)Generate private key."""
pass
@abc.abstractmethod
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
pass
@abc.abstractmethod
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
pass
@abc.abstractmethod
def _get_fingerprint(self):
pass
def generate(self, module):
"""Generate a keypair."""
if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
# Regenerate
if self.backup:
self.backup_file = module.backup_local(self.path)
self._generate_private_key()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
elif not self.check(module, perms_required=False, ignore_conversion=False):
# Convert
if self.backup:
self.backup_file = module.backup_local(self.path)
self._ensure_private_key_loaded()
privatekey_data = self._get_private_key_data()
if self.return_content:
self.privatekey_bytes = privatekey_data
write_file(module, privatekey_data, 0o600)
self.changed = True
self.fingerprint = self._get_fingerprint()
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PrivateKeyBase, self).remove(module)
@abc.abstractmethod
def _check_passphrase(self):
pass
@abc.abstractmethod
def _check_size_and_type(self):
pass
@abc.abstractmethod
def _check_format(self):
pass
def check(self, module, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required=False)
if not state_and_perms:
# key does not exist
return False
if not self._check_passphrase():
if self.regenerate in ('full_idempotence', 'always'):
return False
module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `full_idempotence` or `always`, or with `force=yes`.')
if self.regenerate != 'never':
if not self._check_size_and_type():
if self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong type and/or size.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
if not self._check_format():
# During conversion step, convert if format does not match and format_mismatch == 'convert'
if not ignore_conversion and self.format_mismatch == 'convert':
return False
# During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
if ignore_conversion and self.format_mismatch == 'regenerate' and self.regenerate != 'never':
if not ignore_conversion or self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
return False
module.fail_json(msg='Key has wrong format.'
' Will not proceed. To force regeneration, call the module with `generate`'
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
' To convert the key, set `format_mismatch` to `convert`.')
# check whether permissions are correct (in case that needs to be checked)
return not perms_required or super(PrivateKeyBase, self).check(module, perms_required=perms_required)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'size': self.size,
'filename': self.path,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.privatekey_bytes is None:
self.privatekey_bytes = load_file_if_exists(self.path, ignore_errors=True)
if self.privatekey_bytes:
if identify_private_key_format(self.privatekey_bytes) == 'raw':
result['privatekey'] = base64.b64encode(self.privatekey_bytes)
else:
result['privatekey'] = self.privatekey_bytes.decode('utf-8')
else:
result['privatekey'] = None
return result
# Implementation with using pyOpenSSL
class PrivateKeyPyOpenSSL(PrivateKeyBase):
def __init__(self, module):
super(PrivateKeyPyOpenSSL, self).__init__(module)
if module.params['type'] == 'RSA':
self.type = crypto.TYPE_RSA
elif module.params['type'] == 'DSA':
self.type = crypto.TYPE_DSA
else:
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
if self.format != 'auto_ignore':
module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
def _generate_private_key(self):
"""(Re-)Generate private key."""
self.privatekey = crypto.PKey()
try:
self.privatekey.generate_key(self.type, self.size)
except (TypeError, ValueError) as exc:
raise PrivateKeyError(exc)
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
try:
self.privatekey = privatekey = load_privatekey(self.path, self.passphrase)
except OpenSSLBadPassphraseError as exc:
raise PrivateKeyError(exc)
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
if self.cipher and self.passphrase:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
self.cipher, to_bytes(self.passphrase))
else:
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
def _get_fingerprint(self):
return get_fingerprint(self.path, self.passphrase)
def _check_passphrase(self):
try:
load_privatekey(self.path, self.passphrase)
return True
except Exception as dummy:
return False
def _check_size_and_type(self):
def _check_size(privatekey):
return self.size == privatekey.bits()
def _check_type(privatekey):
return self.type == privatekey.type()
self._ensure_private_key_loaded()
return _check_size(self.privatekey) and _check_type(self.privatekey)
def _check_format(self):
# Not supported by this backend
return True
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyPyOpenSSL, self).dump()
if self.type == crypto.TYPE_RSA:
result['type'] = 'RSA'
else:
result['type'] = 'DSA'
return result
# Implementation with using cryptography
class PrivateKeyCryptography(PrivateKeyBase):
def _get_ec_class(self, ectype):
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
if ecclass is None:
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
return ecclass
def _add_curve(self, name, ectype, deprecated=False):
def create(size):
ecclass = self._get_ec_class(ectype)
return ecclass()
def verify(privatekey):
ecclass = self._get_ec_class(ectype)
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
self.curves[name] = {
'create': create,
'verify': verify,
'deprecated': deprecated,
}
def __init__(self, module):
super(PrivateKeyCryptography, self).__init__(module)
self.curves = dict()
self._add_curve('secp384r1', 'SECP384R1')
self._add_curve('secp521r1', 'SECP521R1')
self._add_curve('secp224r1', 'SECP224R1')
self._add_curve('secp192r1', 'SECP192R1')
self._add_curve('secp256r1', 'SECP256R1')
self._add_curve('secp256k1', 'SECP256K1')
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
self.module = module
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
self.type = module.params['type']
self.curve = module.params['curve']
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519')
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.module.fail_json(msg='Your cryptography version does not support X448')
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.module.fail_json(msg='Your cryptography version does not support Ed448')
def _get_wanted_format(self):
if self.format not in ('auto', 'auto_ignore'):
return self.format
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
return 'pkcs8'
else:
return 'pkcs1'
def _generate_private_key(self):
"""(Re-)Generate private key."""
try:
if self.type == 'RSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
public_exponent=65537, # OpenSSL always uses this
key_size=self.size,
backend=self.cryptography_backend
)
if self.type == 'DSA':
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
key_size=self.size,
backend=self.cryptography_backend
)
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
if self.type == 'ECC' and self.curve in self.curves:
if self.curves[self.curve]['deprecated']:
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
curve=self.curves[self.curve]['create'](self.size),
backend=self.cryptography_backend
)
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
def _ensure_private_key_loaded(self):
"""Make sure that the private key has been loaded."""
if self.privatekey is None:
self.privatekey = self._load_privatekey()
def _get_private_key_data(self):
"""Return bytes for self.privatekey"""
# Select export format and encoding
try:
export_format = self._get_wanted_format()
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
if export_format == 'pkcs1':
# "TraditionalOpenSSL" format is PKCS1
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
elif export_format == 'pkcs8':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
elif export_format == 'raw':
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
except AttributeError:
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
# Select key encryption
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
if self.cipher and self.passphrase:
if self.cipher == 'auto':
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
else:
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
# Serialize key
try:
return self.privatekey.private_bytes(
encoding=export_encoding,
format=export_format,
encryption_algorithm=encryption_algorithm
)
except ValueError as dummy:
self.module.fail_json(
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
)
except Exception as dummy:
self.module.fail_json(
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
exception=traceback.format_exc()
)
def _load_privatekey(self):
try:
# Read bytes
with open(self.path, 'rb') as f:
data = f.read()
# Interpret bytes depending on format.
format = identify_private_key_format(data)
if format == 'raw':
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
if len(data) == 32:
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
try:
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
except Exception:
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
raise PrivateKeyError('Cannot load raw key')
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as e:
raise PrivateKeyError(e)
def _get_fingerprint(self):
# Get bytes of public key
private_key = self._load_privatekey()
public_key = private_key.public_key()
public_key_bytes = public_key.public_bytes(
cryptography.hazmat.primitives.serialization.Encoding.DER,
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
)
# Get fingerprints of public_key_bytes
return get_fingerprint_of_bytes(public_key_bytes)
def _check_passphrase(self):
try:
with open(self.path, 'rb') as f:
data = f.read()
format = identify_private_key_format(data)
if format == 'raw':
# Raw keys cannot be encrypted. To avoid incompatibilities, we try to
# actually load the key (and return False when this fails).
self._load_privatekey()
# Loading the key succeeded. Only return True when no passphrase was
# provided.
return self.passphrase is None
else:
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
data,
None if self.passphrase is None else to_bytes(self.passphrase),
backend=self.cryptography_backend
)
except Exception as dummy:
return False
def _check_size_and_type(self):
self._ensure_private_key_loaded()
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return self.type == 'RSA' and self.size == self.privatekey.key_size
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
return self.type == 'DSA' and self.size == self.privatekey.key_size
if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
return self.type == 'X25519'
if CRYPTOGRAPHY_HAS_X448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
return self.type == 'X448'
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
return self.type == 'Ed25519'
if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
return self.type == 'Ed448'
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
if self.type != 'ECC':
return False
if self.curve not in self.curves:
return False
return self.curves[self.curve]['verify'](self.privatekey)
return False
def _check_format(self):
if self.format == 'auto_ignore':
return True
try:
with open(self.path, 'rb') as f:
content = f.read()
format = identify_private_key_format(content)
return format == self._get_wanted_format()
except Exception as dummy:
return False
def dump(self):
"""Serialize the object into a dictionary."""
result = super(PrivateKeyCryptography, self).dump()
result['type'] = self.type
if self.type == 'ECC':
result['curve'] = self.curve
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
size=dict(type='int', default=4096),
type=dict(type='str', default='RSA', choices=[
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
]),
curve=dict(type='str', choices=[
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
]),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
passphrase=dict(type='str', no_log=True),
cipher=dict(type='str'),
backup=dict(type='bool', default=False),
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
regenerate=dict(
type='str',
default='full_idempotence',
choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
),
),
supports_check_mode=True,
add_file_common_args=True,
required_together=[
['cipher', 'passphrase']
],
required_if=[
['type', 'ECC', ['curve']],
],
)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
# First try pyOpenSSL, then cryptography
if can_use_pyopenssl:
backend = 'pyopenssl'
elif can_use_cryptography:
backend = 'cryptography'
else:
# First try cryptography, then pyOpenSSL
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
private_key = PrivateKeyPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
private_key = PrivateKeyCryptography(module)
if private_key.state == 'present':
if module.check_mode:
result = private_key.dump()
result['changed'] = private_key.force \
or not private_key.check(module, ignore_conversion=True) \
or not private_key.check(module, ignore_conversion=False)
module.exit_json(**result)
private_key.generate(module)
else:
if module.check_mode:
result = private_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
private_key.remove(module)
result = private_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
4295_2
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey_info
short_description: Provide information for OpenSSL private keys
description:
- This module allows one to query information on OpenSSL private keys.
- In case the key consistency checks fail, the module will fail as this indicates a faked
private key. In this case, all return variables are still returned. Note that key consistency
checks are not available all key types; if none is available, C(none) is returned for
C(key_is_consistent).
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
C(select_crypto_backend)). Please note that the PyOpenSSL backend was deprecated in Ansible 2.9
and will be removed in community.crypto 2.0.0.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.2.3
author:
- Felix Fontein (@felixfontein)
- Yanis Guenane (@Spredzy)
options:
path:
description:
- Remote absolute path where the private key file is loaded from.
type: path
content:
description:
- Content of the private key file.
- Either I(path) or I(content) must be specified, but not both.
type: str
version_added: '1.0.0'
passphrase:
description:
- The passphrase for the private key.
type: str
return_private_key_data:
description:
- Whether to return private key data.
- Only set this to C(yes) when you want private information about this key to
leave the remote machine.
- "WARNING: you have to make sure that private key data isn't accidentally logged!"
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
seealso:
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Get information on generated key
community.crypto.openssl_privatekey_info:
path: /etc/ssl/private/ansible.com.pem
register: result
- name: Dump information
debug:
var: result
'''
RETURN = r'''
can_load_key:
description: Whether the module was able to load the private key from disk
returned: always
type: bool
can_parse_key:
description: Whether the module was able to parse the private key
returned: always
type: bool
key_is_consistent:
description:
- Whether the key is consistent. Can also return C(none) next to C(yes) and
C(no), to indicate that consistency couldn't be checked.
- In case the check returns C(no), the module will fail.
returned: always
type: bool
public_key:
description: Private key's public key in PEM format
returned: success
type: str
sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
public_key_fingerprints:
description:
- Fingerprints of private key's public key.
- For every hash algorithm available, the fingerprint is computed.
returned: success
type: dict
sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
type:
description:
- The key's type.
- One of C(RSA), C(DSA), C(ECC), C(Ed25519), C(X25519), C(Ed448), or C(X448).
- Will start with C(unknown) if the key type cannot be determined.
returned: success
type: str
sample: RSA
public_data:
description:
- Public key data. Depends on key type.
returned: success
type: dict
private_data:
description:
- Private key data. Depends on key type.
returned: success and when I(return_private_key_data) is set to C(yes)
type: dict
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.math import (
binary_exp_mod,
quick_is_not_prime,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.primitives import serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
SIGNATURE_TEST_DATA = b'1234'
def _get_cryptography_key_info(key):
key_public_data = dict()
key_private_data = dict()
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
key_type = 'RSA'
key_public_data['size'] = key.key_size
key_public_data['modulus'] = key.public_key().public_numbers().n
key_public_data['exponent'] = key.public_key().public_numbers().e
key_private_data['p'] = key.private_numbers().p
key_private_data['q'] = key.private_numbers().q
key_private_data['exponent'] = key.private_numbers().d
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
key_type = 'DSA'
key_public_data['size'] = key.key_size
key_public_data['p'] = key.parameters().parameter_numbers().p
key_public_data['q'] = key.parameters().parameter_numbers().q
key_public_data['g'] = key.parameters().parameter_numbers().g
key_public_data['y'] = key.public_key().public_numbers().y
key_private_data['x'] = key.private_numbers().x
elif CRYPTOGRAPHY_HAS_X25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
key_type = 'X25519'
elif CRYPTOGRAPHY_HAS_X448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
key_type = 'X448'
elif CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
key_type = 'Ed25519'
elif CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
key_type = 'Ed448'
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
key_type = 'ECC'
key_public_data['curve'] = key.public_key().curve.name
key_public_data['x'] = key.public_key().public_numbers().x
key_public_data['y'] = key.public_key().public_numbers().y
key_public_data['exponent_size'] = key.public_key().curve.key_size
key_private_data['multiplier'] = key.private_numbers().private_value
else:
key_type = 'unknown ({0})'.format(type(key))
return key_type, key_public_data, key_private_data
def _check_dsa_consistency(key_public_data, key_private_data):
# Get parameters
p = key_public_data.get('p')
q = key_public_data.get('q')
g = key_public_data.get('g')
y = key_public_data.get('y')
x = key_private_data.get('x')
for v in (p, q, g, y, x):
if v is None:
return None
# Make sure that g is not 0, 1 or -1 in Z/pZ
if g < 2 or g >= p - 1:
return False
# Make sure that x is in range
if x < 1 or x >= q:
return False
# Check whether q divides p-1
if (p - 1) % q != 0:
return False
# Check that g**q mod p == 1
if binary_exp_mod(g, q, p) != 1:
return False
# Check whether g**x mod p == y
if binary_exp_mod(g, x, p) != y:
return False
# Check (quickly) whether p or q are not primes
if quick_is_not_prime(q) or quick_is_not_prime(p):
return False
return True
def _is_cryptography_key_consistent(key, key_public_data, key_private_data):
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return bool(key._backend._lib.RSA_check_key(key._rsa_cdata))
if isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
try:
signature = key.sign(SIGNATURE_TEST_DATA, cryptography.hazmat.primitives.hashes.SHA256())
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.hashes.SHA256()
)
return True
except cryptography.exceptions.InvalidSignature:
return False
if isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
try:
signature = key.sign(
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
return True
except cryptography.exceptions.InvalidSignature:
return False
has_simple_sign_function = False
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
has_simple_sign_function = True
if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
has_simple_sign_function = True
if has_simple_sign_function:
signature = key.sign(SIGNATURE_TEST_DATA)
try:
key.public_key().verify(signature, SIGNATURE_TEST_DATA)
return True
except cryptography.exceptions.InvalidSignature:
return False
# For X25519 and X448, there's no test yet.
return None
class PrivateKeyInfo(OpenSSLObject):
def __init__(self, module, backend):
super(PrivateKeyInfo, self).__init__(
module.params['path'] or '',
'present',
False,
module.check_mode,
)
self.backend = backend
self.module = module
self.content = module.params['content']
self.passphrase = module.params['passphrase']
self.return_private_key_data = module.params['return_private_key_data']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
@abc.abstractmethod
def _get_public_key(self, binary):
pass
@abc.abstractmethod
def _get_key_info(self):
pass
@abc.abstractmethod
def _is_key_consistent(self, key_public_data, key_private_data):
pass
def get_info(self):
result = dict(
can_load_key=False,
can_parse_key=False,
key_is_consistent=None,
)
if self.content is not None:
priv_key_detail = self.content.encode('utf-8')
result['can_load_key'] = True
else:
try:
with open(self.path, 'rb') as b_priv_key_fh:
priv_key_detail = b_priv_key_fh.read()
result['can_load_key'] = True
except (IOError, OSError) as exc:
self.module.fail_json(msg=to_native(exc), **result)
try:
self.key = load_privatekey(
path=None,
content=priv_key_detail,
passphrase=to_bytes(self.passphrase) if self.passphrase is not None else self.passphrase,
backend=self.backend
)
result['can_parse_key'] = True
except OpenSSLObjectError as exc:
self.module.fail_json(msg=to_native(exc), **result)
result['public_key'] = self._get_public_key(binary=False)
pk = self._get_public_key(binary=True)
result['public_key_fingerprints'] = get_fingerprint_of_bytes(pk) if pk is not None else dict()
key_type, key_public_data, key_private_data = self._get_key_info()
result['type'] = key_type
result['public_data'] = key_public_data
if self.return_private_key_data:
result['private_data'] = key_private_data
result['key_is_consistent'] = self._is_key_consistent(key_public_data, key_private_data)
if result['key_is_consistent'] is False:
# Only fail when it is False, to avoid to fail on None (which means "we don't know")
result['key_is_consistent'] = False
self.module.fail_json(
msg="Private key is not consistent! (See "
"https://blog.hboeck.de/archives/888-How-I-tricked-Symantec-with-a-Fake-Private-Key.html)",
**result
)
return result
class PrivateKeyInfoCryptography(PrivateKeyInfo):
"""Validate the supplied private key, using the cryptography backend"""
def __init__(self, module):
super(PrivateKeyInfoCryptography, self).__init__(module, 'cryptography')
def _get_public_key(self, binary):
return self.key.public_key().public_bytes(
serialization.Encoding.DER if binary else serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _get_key_info(self):
return _get_cryptography_key_info(self.key)
def _is_key_consistent(self, key_public_data, key_private_data):
return _is_cryptography_key_consistent(self.key, key_public_data, key_private_data)
class PrivateKeyInfoPyOpenSSL(PrivateKeyInfo):
"""validate the supplied private key."""
def __init__(self, module):
super(PrivateKeyInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
def _get_public_key(self, binary):
try:
return crypto.dump_publickey(
crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
self.key
)
except AttributeError:
try:
# pyOpenSSL < 16.0:
bio = crypto._new_mem_buf()
if binary:
rc = crypto._lib.i2d_PUBKEY_bio(bio, self.key._pkey)
else:
rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.key._pkey)
if rc != 1:
crypto._raise_current_error()
return crypto._bio_to_string(bio)
except AttributeError:
self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
def bigint_to_int(self, bn):
'''Convert OpenSSL BIGINT to Python integer'''
if bn == OpenSSL._util.ffi.NULL:
return None
hexstr = OpenSSL._util.lib.BN_bn2hex(bn)
try:
return int(OpenSSL._util.ffi.string(hexstr), 16)
finally:
OpenSSL._util.lib.OPENSSL_free(hexstr)
def _get_key_info(self):
key_public_data = dict()
key_private_data = dict()
openssl_key_type = self.key.type()
try_fallback = True
if crypto.TYPE_RSA == openssl_key_type:
key_type = 'RSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_RSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.RSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get modulus and exponents
n = OpenSSL._util.ffi.new("BIGNUM **")
e = OpenSSL._util.ffi.new("BIGNUM **")
d = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_key(key, n, e, d)
key_public_data['modulus'] = self.bigint_to_int(n[0])
key_public_data['exponent'] = self.bigint_to_int(e[0])
key_private_data['exponent'] = self.bigint_to_int(d[0])
# Get factors
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_factors(key, p, q)
key_private_data['p'] = self.bigint_to_int(p[0])
key_private_data['q'] = self.bigint_to_int(q[0])
else:
# Get modulus and exponents
key_public_data['modulus'] = self.bigint_to_int(key.n)
key_public_data['exponent'] = self.bigint_to_int(key.e)
key_private_data['exponent'] = self.bigint_to_int(key.d)
# Get factors
key_private_data['p'] = self.bigint_to_int(key.p)
key_private_data['q'] = self.bigint_to_int(key.q)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
elif crypto.TYPE_DSA == openssl_key_type:
key_type = 'DSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_DSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.DSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get public parameters (primes and group element)
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
g = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_pqg(key, p, q, g)
key_public_data['p'] = self.bigint_to_int(p[0])
key_public_data['q'] = self.bigint_to_int(q[0])
key_public_data['g'] = self.bigint_to_int(g[0])
# Get public and private key exponents
y = OpenSSL._util.ffi.new("BIGNUM **")
x = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_key(key, y, x)
key_public_data['y'] = self.bigint_to_int(y[0])
key_private_data['x'] = self.bigint_to_int(x[0])
else:
# Get public parameters (primes and group element)
key_public_data['p'] = self.bigint_to_int(key.p)
key_public_data['q'] = self.bigint_to_int(key.q)
key_public_data['g'] = self.bigint_to_int(key.g)
# Get public and private key exponents
key_public_data['y'] = self.bigint_to_int(key.pub_key)
key_private_data['x'] = self.bigint_to_int(key.priv_key)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
else:
# Return 'unknown'
key_type = 'unknown ({0})'.format(self.key.type())
# If needed and if possible, fall back to cryptography
if try_fallback and PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _get_cryptography_key_info(self.key.to_cryptography_key())
return key_type, key_public_data, key_private_data
def _is_key_consistent(self, key_public_data, key_private_data):
openssl_key_type = self.key.type()
if crypto.TYPE_RSA == openssl_key_type:
try:
return self.key.check()
except crypto.Error:
# OpenSSL error means that key is not consistent
return False
if crypto.TYPE_DSA == openssl_key_type:
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
signature = crypto.sign(self.key, SIGNATURE_TEST_DATA, 'sha256')
# Verify wants a cert (where it can get the public key from)
cert = crypto.X509()
cert.set_pubkey(self.key)
try:
crypto.verify(cert, signature, SIGNATURE_TEST_DATA, 'sha256')
return True
except crypto.Error:
return False
# If needed and if possible, fall back to cryptography
if PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _is_cryptography_key_consistent(self.key.to_cryptography_key(), key_public_data, key_private_data)
return None
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path'),
content=dict(type='str'),
passphrase=dict(type='str', no_log=True),
return_private_key_data=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
),
required_one_of=(
['path', 'content'],
),
mutually_exclusive=(
['path', 'content'],
),
supports_check_mode=True,
)
try:
if module.params['path'] is not None:
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
privatekey = PrivateKeyInfoPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
privatekey = PrivateKeyInfoCryptography(module)
result = privatekey.get_info()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_privatekey_info
short_description: Provide information for OpenSSL private keys
description:
- This module allows one to query information on OpenSSL private keys.
- In case the key consistency checks fail, the module will fail as this indicates a faked
private key. In this case, all return variables are still returned. Note that key consistency
checks are not available all key types; if none is available, C(none) is returned for
C(key_is_consistent).
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
C(select_crypto_backend)). Please note that the PyOpenSSL backend was deprecated in Ansible 2.9
and will be removed in community.crypto 2.0.0.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.2.3
author:
- Felix Fontein (@felixfontein)
- Yanis Guenane (@Spredzy)
options:
path:
description:
- Remote absolute path where the private key file is loaded from.
type: path
content:
description:
- Content of the private key file.
- Either I(path) or I(content) must be specified, but not both.
type: str
version_added: '1.0.0'
passphrase:
description:
- The passphrase for the private key.
type: str
return_private_key_data:
description:
- Whether to return private key data.
- Only set this to C(yes) when you want private information about this key to
leave the remote machine.
- "WARNING: you have to make sure that private key data isn't accidentally logged!"
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
seealso:
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
community.crypto.openssl_privatekey:
path: /etc/ssl/private/ansible.com.pem
- name: Get information on generated key
community.crypto.openssl_privatekey_info:
path: /etc/ssl/private/ansible.com.pem
register: result
- name: Dump information
debug:
var: result
'''
RETURN = r'''
can_load_key:
description: Whether the module was able to load the private key from disk
returned: always
type: bool
can_parse_key:
description: Whether the module was able to parse the private key
returned: always
type: bool
key_is_consistent:
description:
- Whether the key is consistent. Can also return C(none) next to C(yes) and
C(no), to indicate that consistency couldn't be checked.
- In case the check returns C(no), the module will fail.
returned: always
type: bool
public_key:
description: Private key's public key in PEM format
returned: success
type: str
sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
public_key_fingerprints:
description:
- Fingerprints of private key's public key.
- For every hash algorithm available, the fingerprint is computed.
returned: success
type: dict
sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
type:
description:
- The key's type.
- One of C(RSA), C(DSA), C(ECC), C(Ed25519), C(X25519), C(Ed448), or C(X448).
- Will start with C(unknown) if the key type cannot be determined.
returned: success
type: str
sample: RSA
public_data:
description:
- Public key data. Depends on key type.
returned: success
type: dict
private_data:
description:
- Private key data. Depends on key type.
returned: success and when I(return_private_key_data) is set to C(yes)
type: dict
'''
import abc
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_X25519,
CRYPTOGRAPHY_HAS_X448,
CRYPTOGRAPHY_HAS_ED25519,
CRYPTOGRAPHY_HAS_ED448,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint_of_bytes,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.math import (
binary_exp_mod,
quick_is_not_prime,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.primitives import serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
SIGNATURE_TEST_DATA = b'1234'
def _get_cryptography_key_info(key):
key_public_data = dict()
key_private_data = dict()
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
key_type = 'RSA'
key_public_data['size'] = key.key_size
key_public_data['modulus'] = key.public_key().public_numbers().n
key_public_data['exponent'] = key.public_key().public_numbers().e
key_private_data['p'] = key.private_numbers().p
key_private_data['q'] = key.private_numbers().q
key_private_data['exponent'] = key.private_numbers().d
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
key_type = 'DSA'
key_public_data['size'] = key.key_size
key_public_data['p'] = key.parameters().parameter_numbers().p
key_public_data['q'] = key.parameters().parameter_numbers().q
key_public_data['g'] = key.parameters().parameter_numbers().g
key_public_data['y'] = key.public_key().public_numbers().y
key_private_data['x'] = key.private_numbers().x
elif CRYPTOGRAPHY_HAS_X25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
key_type = 'X25519'
elif CRYPTOGRAPHY_HAS_X448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
key_type = 'X448'
elif CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
key_type = 'Ed25519'
elif CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
key_type = 'Ed448'
elif isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
key_type = 'ECC'
key_public_data['curve'] = key.public_key().curve.name
key_public_data['x'] = key.public_key().public_numbers().x
key_public_data['y'] = key.public_key().public_numbers().y
key_public_data['exponent_size'] = key.public_key().curve.key_size
key_private_data['multiplier'] = key.private_numbers().private_value
else:
key_type = 'unknown ({0})'.format(type(key))
return key_type, key_public_data, key_private_data
def _check_dsa_consistency(key_public_data, key_private_data):
# Get parameters
p = key_public_data.get('p')
q = key_public_data.get('q')
g = key_public_data.get('g')
y = key_public_data.get('y')
x = key_private_data.get('x')
for v in (p, q, g, y, x):
if v is None:
return None
# Make sure that g is not 0, 1 or -1 in Z/pZ
if g < 2 or g >= p - 1:
return False
# Make sure that x is in range
if x < 1 or x >= q:
return False
# Check whether q divides p-1
if (p - 1) % q != 0:
return False
# Check that g**q mod p == 1
if binary_exp_mod(g, q, p) != 1:
return False
# Check whether g**x mod p == y
if binary_exp_mod(g, x, p) != y:
return False
# Check (quickly) whether p or q are not primes
if quick_is_not_prime(q) or quick_is_not_prime(p):
return False
return True
def _is_cryptography_key_consistent(key, key_public_data, key_private_data):
if isinstance(key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
return bool(key._backend._lib.RSA_check_key(key._rsa_cdata))
if isinstance(key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
try:
signature = key.sign(SIGNATURE_TEST_DATA, cryptography.hazmat.primitives.hashes.SHA256())
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.hashes.SHA256()
)
return True
except cryptography.exceptions.InvalidSignature:
return False
if isinstance(key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
try:
signature = key.sign(
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
except AttributeError:
# sign() was added in cryptography 1.5, but we support older versions
return None
try:
key.public_key().verify(
signature,
SIGNATURE_TEST_DATA,
cryptography.hazmat.primitives.asymmetric.ec.ECDSA(cryptography.hazmat.primitives.hashes.SHA256())
)
return True
except cryptography.exceptions.InvalidSignature:
return False
has_simple_sign_function = False
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
has_simple_sign_function = True
if CRYPTOGRAPHY_HAS_ED448 and isinstance(key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
has_simple_sign_function = True
if has_simple_sign_function:
signature = key.sign(SIGNATURE_TEST_DATA)
try:
key.public_key().verify(signature, SIGNATURE_TEST_DATA)
return True
except cryptography.exceptions.InvalidSignature:
return False
# For X25519 and X448, there's no test yet.
return None
class PrivateKeyInfo(OpenSSLObject):
def __init__(self, module, backend):
super(PrivateKeyInfo, self).__init__(
module.params['path'] or '',
'present',
False,
module.check_mode,
)
self.backend = backend
self.module = module
self.content = module.params['content']
self.passphrase = module.params['passphrase']
self.return_private_key_data = module.params['return_private_key_data']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
@abc.abstractmethod
def _get_public_key(self, binary):
pass
@abc.abstractmethod
def _get_key_info(self):
pass
@abc.abstractmethod
def _is_key_consistent(self, key_public_data, key_private_data):
pass
def get_info(self):
result = dict(
can_load_key=False,
can_parse_key=False,
key_is_consistent=None,
)
if self.content is not None:
priv_key_detail = self.content.encode('utf-8')
result['can_load_key'] = True
else:
try:
with open(self.path, 'rb') as b_priv_key_fh:
priv_key_detail = b_priv_key_fh.read()
result['can_load_key'] = True
except (IOError, OSError) as exc:
self.module.fail_json(msg=to_native(exc), **result)
try:
self.key = load_privatekey(
path=None,
content=priv_key_detail,
passphrase=to_bytes(self.passphrase) if self.passphrase is not None else self.passphrase,
backend=self.backend
)
result['can_parse_key'] = True
except OpenSSLObjectError as exc:
self.module.fail_json(msg=to_native(exc), **result)
result['public_key'] = self._get_public_key(binary=False)
pk = self._get_public_key(binary=True)
result['public_key_fingerprints'] = get_fingerprint_of_bytes(pk) if pk is not None else dict()
key_type, key_public_data, key_private_data = self._get_key_info()
result['type'] = key_type
result['public_data'] = key_public_data
if self.return_private_key_data:
result['private_data'] = key_private_data
result['key_is_consistent'] = self._is_key_consistent(key_public_data, key_private_data)
if result['key_is_consistent'] is False:
# Only fail when it is False, to avoid to fail on None (which means "we don't know")
result['key_is_consistent'] = False
self.module.fail_json(
msg="Private key is not consistent! (See "
"https://blog.hboeck.de/archives/888-How-I-tricked-Symantec-with-a-Fake-Private-Key.html)",
**result
)
return result
class PrivateKeyInfoCryptography(PrivateKeyInfo):
"""Validate the supplied private key, using the cryptography backend"""
def __init__(self, module):
super(PrivateKeyInfoCryptography, self).__init__(module, 'cryptography')
def _get_public_key(self, binary):
return self.key.public_key().public_bytes(
serialization.Encoding.DER if binary else serialization.Encoding.PEM,
serialization.PublicFormat.SubjectPublicKeyInfo
)
def _get_key_info(self):
return _get_cryptography_key_info(self.key)
def _is_key_consistent(self, key_public_data, key_private_data):
return _is_cryptography_key_consistent(self.key, key_public_data, key_private_data)
class PrivateKeyInfoPyOpenSSL(PrivateKeyInfo):
"""validate the supplied private key."""
def __init__(self, module):
super(PrivateKeyInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
def _get_public_key(self, binary):
try:
return crypto.dump_publickey(
crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
self.key
)
except AttributeError:
try:
# pyOpenSSL < 16.0:
bio = crypto._new_mem_buf()
if binary:
rc = crypto._lib.i2d_PUBKEY_bio(bio, self.key._pkey)
else:
rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.key._pkey)
if rc != 1:
crypto._raise_current_error()
return crypto._bio_to_string(bio)
except AttributeError:
self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
def bigint_to_int(self, bn):
'''Convert OpenSSL BIGINT to Python integer'''
if bn == OpenSSL._util.ffi.NULL:
return None
hexstr = OpenSSL._util.lib.BN_bn2hex(bn)
try:
return int(OpenSSL._util.ffi.string(hexstr), 16)
finally:
OpenSSL._util.lib.OPENSSL_free(hexstr)
def _get_key_info(self):
key_public_data = dict()
key_private_data = dict()
openssl_key_type = self.key.type()
try_fallback = True
if crypto.TYPE_RSA == openssl_key_type:
key_type = 'RSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_RSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.RSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get modulus and exponents
n = OpenSSL._util.ffi.new("BIGNUM **")
e = OpenSSL._util.ffi.new("BIGNUM **")
d = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_key(key, n, e, d)
key_public_data['modulus'] = self.bigint_to_int(n[0])
key_public_data['exponent'] = self.bigint_to_int(e[0])
key_private_data['exponent'] = self.bigint_to_int(d[0])
# Get factors
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.RSA_get0_factors(key, p, q)
key_private_data['p'] = self.bigint_to_int(p[0])
key_private_data['q'] = self.bigint_to_int(q[0])
else:
# Get modulus and exponents
key_public_data['modulus'] = self.bigint_to_int(key.n)
key_public_data['exponent'] = self.bigint_to_int(key.e)
key_private_data['exponent'] = self.bigint_to_int(key.d)
# Get factors
key_private_data['p'] = self.bigint_to_int(key.p)
key_private_data['q'] = self.bigint_to_int(key.q)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
elif crypto.TYPE_DSA == openssl_key_type:
key_type = 'DSA'
key_public_data['size'] = self.key.bits()
try:
# Use OpenSSL directly to extract key data
key = OpenSSL._util.lib.EVP_PKEY_get1_DSA(self.key._pkey)
key = OpenSSL._util.ffi.gc(key, OpenSSL._util.lib.DSA_free)
# OpenSSL 1.1 and newer have functions to extract the parameters
# from the EVP PKEY data structures. Older versions didn't have
# these getters, and it was common use to simply access the values
# directly. Since there's no guarantee that these data structures
# will still be accessible in the future, we use the getters for
# 1.1 and later, and directly access the values for 1.0.x and
# earlier.
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
# Get public parameters (primes and group element)
p = OpenSSL._util.ffi.new("BIGNUM **")
q = OpenSSL._util.ffi.new("BIGNUM **")
g = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_pqg(key, p, q, g)
key_public_data['p'] = self.bigint_to_int(p[0])
key_public_data['q'] = self.bigint_to_int(q[0])
key_public_data['g'] = self.bigint_to_int(g[0])
# Get public and private key exponents
y = OpenSSL._util.ffi.new("BIGNUM **")
x = OpenSSL._util.ffi.new("BIGNUM **")
OpenSSL._util.lib.DSA_get0_key(key, y, x)
key_public_data['y'] = self.bigint_to_int(y[0])
key_private_data['x'] = self.bigint_to_int(x[0])
else:
# Get public parameters (primes and group element)
key_public_data['p'] = self.bigint_to_int(key.p)
key_public_data['q'] = self.bigint_to_int(key.q)
key_public_data['g'] = self.bigint_to_int(key.g)
# Get public and private key exponents
key_public_data['y'] = self.bigint_to_int(key.pub_key)
key_private_data['x'] = self.bigint_to_int(key.priv_key)
try_fallback = False
except AttributeError:
# Use fallback if available
pass
else:
# Return 'unknown'
key_type = 'unknown ({0})'.format(self.key.type())
# If needed and if possible, fall back to cryptography
if try_fallback and PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _get_cryptography_key_info(self.key.to_cryptography_key())
return key_type, key_public_data, key_private_data
def _is_key_consistent(self, key_public_data, key_private_data):
openssl_key_type = self.key.type()
if crypto.TYPE_RSA == openssl_key_type:
try:
return self.key.check()
except crypto.Error:
# OpenSSL error means that key is not consistent
return False
if crypto.TYPE_DSA == openssl_key_type:
result = _check_dsa_consistency(key_public_data, key_private_data)
if result is not None:
return result
signature = crypto.sign(self.key, SIGNATURE_TEST_DATA, 'sha256')
# Verify wants a cert (where it can get the public key from)
cert = crypto.X509()
cert.set_pubkey(self.key)
try:
crypto.verify(cert, signature, SIGNATURE_TEST_DATA, 'sha256')
return True
except crypto.Error:
return False
# If needed and if possible, fall back to cryptography
if PYOPENSSL_VERSION >= LooseVersion('16.1.0') and CRYPTOGRAPHY_FOUND:
return _is_cryptography_key_consistent(self.key.to_cryptography_key(), key_public_data, key_private_data)
return None
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path'),
content=dict(type='str', no_log=True),
passphrase=dict(type='str', no_log=True),
return_private_key_data=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
),
required_one_of=(
['path', 'content'],
),
mutually_exclusive=(
['path', 'content'],
),
supports_check_mode=True,
)
try:
if module.params['path'] is not None:
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
privatekey = PrivateKeyInfoPyOpenSSL(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
privatekey = PrivateKeyInfoCryptography(module)
result = privatekey.get_info()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
4295_3
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_publickey
short_description: Generate an OpenSSL public key from its private key.
description:
- This module allows one to (re)generate OpenSSL public keys from their private keys.
- Keys are generated in PEM or OpenSSH format.
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. When I(format) is C(OpenSSH),
the C(cryptography) backend has to be used. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL >= 16.0.0
- Needs cryptography >= 1.4 if I(format) is C(OpenSSH)
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the public key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
force:
description:
- Should the key be regenerated even it it already exists.
type: bool
default: no
format:
description:
- The format of the public key.
type: str
default: PEM
choices: [ OpenSSH, PEM ]
path:
description:
- Name of the file in which the generated TLS/SSL public key will be written.
type: path
required: true
privatekey_path:
description:
- Path to the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: path
privatekey_content:
description:
- The content of the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the private key.
type: str
backup:
description:
- Create a backup file including a timestamp so you can get the original
public key back if you overwrote it with a different one by accident.
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
return_content:
description:
- If set to C(yes), will return the (current or generated) public key's content as I(publickey).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL public key in PEM format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL public key in PEM format from an inline key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_content: "{{ private_key_content }}"
- name: Generate an OpenSSL public key in OpenSSH v2 format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
format: OpenSSH
- name: Generate an OpenSSL public key with a passphrase protected private key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
- name: Force regenerate an OpenSSL public key if it already exists
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Remove an OpenSSL public key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
state: absent
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the public key was generated from.
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
format:
description: The format of the public key (PEM, OpenSSH, ...).
returned: changed or success
type: str
sample: PEM
filename:
description: Path to the generated TLS/SSL public key file.
returned: changed or success
type: str
sample: /etc/ssl/public/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
- Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/publickey.pem.2019-03-09@11:22~
publickey:
description: The (current or generated) public key's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
)
MINIMAL_PYOPENSSL_VERSION = '16.0.0'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PublicKeyError(OpenSSLObjectError):
pass
class PublicKey(OpenSSLObject):
def __init__(self, module, backend):
super(PublicKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey = None
self.publickey_bytes = None
self.return_content = module.params['return_content']
self.fingerprint = {}
self.backend = backend
self.backup = module.params['backup']
self.backup_file = None
def _create_publickey(self, module):
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
try:
return crypto.dump_publickey(crypto.FILETYPE_PEM, self.privatekey)
except AttributeError as dummy:
raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys')
def generate(self, module):
"""Generate the public key."""
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise PublicKeyError(
'The private key %s does not exist' % self.privatekey_path
)
if not self.check(module, perms_required=False) or self.force:
try:
publickey_content = self._create_publickey(module)
if self.return_content:
self.publickey_bytes = publickey_content
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, publickey_content)
self.changed = True
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
except (IOError, OSError) as exc:
raise PublicKeyError(exc)
self.fingerprint = get_fingerprint(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PublicKey, self).check(module, perms_required)
def _check_privatekey():
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
return False
try:
with open(self.path, 'rb') as public_key_fh:
publickey_content = public_key_fh.read()
if self.return_content:
self.publickey_bytes = publickey_content
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
# Read and dump public key. Makes sure that the comment is stripped off.
current_publickey = crypto_serialization.load_ssh_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
current_publickey = crypto_serialization.load_pem_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
publickey_content = crypto.dump_publickey(
crypto.FILETYPE_PEM,
crypto.load_publickey(crypto.FILETYPE_PEM, publickey_content)
)
except Exception as dummy:
return False
try:
desired_publickey = self._create_publickey(module)
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
return publickey_content == desired_publickey
if not state_and_perms:
return state_and_perms
return _check_privatekey()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PublicKey, self).remove(module)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'format': self.format,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.publickey_bytes is None:
self.publickey_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['publickey'] = self.publickey_bytes.decode('utf-8') if self.publickey_bytes else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
format=dict(type='str', default='PEM', choices=['OpenSSH', 'PEM']),
privatekey_passphrase=dict(type='str', no_log=True),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
)
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION
if module.params['format'] == 'OpenSSH':
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(minimal_cryptography_version)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
if module.params['format'] == 'OpenSSH':
module.fail_json(
msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH)),
exception=CRYPTOGRAPHY_IMP_ERR
)
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
minimal_cryptography_version,
MINIMAL_PYOPENSSL_VERSION))
if module.params['format'] == 'OpenSSH' and backend != 'cryptography':
module.fail_json(msg="Format OpenSSH requires the cryptography backend.")
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(minimal_cryptography_version)),
exception=CRYPTOGRAPHY_IMP_ERR)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg="The directory '%s' does not exist or the file is not a directory" % base_dir
)
try:
public_key = PublicKey(module, backend)
if public_key.state == 'present':
if module.check_mode:
result = public_key.dump()
result['changed'] = module.params['force'] or not public_key.check(module)
module.exit_json(**result)
public_key.generate(module)
else:
if module.check_mode:
result = public_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
public_key.remove(module)
result = public_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_publickey
short_description: Generate an OpenSSL public key from its private key.
description:
- This module allows one to (re)generate OpenSSL public keys from their private keys.
- Keys are generated in PEM or OpenSSH format.
- "The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. When I(format) is C(OpenSSH),
the C(cryptography) backend has to be used. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0."
requirements:
- Either cryptography >= 1.2.3 (older versions might work as well)
- Or pyOpenSSL >= 16.0.0
- Needs cryptography >= 1.4 if I(format) is C(OpenSSH)
author:
- Yanis Guenane (@Spredzy)
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the public key should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
force:
description:
- Should the key be regenerated even it it already exists.
type: bool
default: no
format:
description:
- The format of the public key.
type: str
default: PEM
choices: [ OpenSSH, PEM ]
path:
description:
- Name of the file in which the generated TLS/SSL public key will be written.
type: path
required: true
privatekey_path:
description:
- Path to the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: path
privatekey_content:
description:
- The content of the TLS/SSL private key from which to generate the public key.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
If I(state) is C(present), one of them is required.
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the private key.
type: str
backup:
description:
- Create a backup file including a timestamp so you can get the original
public key back if you overwrote it with a different one by accident.
type: bool
default: no
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
return_content:
description:
- If set to C(yes), will return the (current or generated) public key's content as I(publickey).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment:
- files
seealso:
- module: community.crypto.x509_certificate
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Generate an OpenSSL public key in PEM format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
- name: Generate an OpenSSL public key in PEM format from an inline key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_content: "{{ private_key_content }}"
- name: Generate an OpenSSL public key in OpenSSH v2 format
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
format: OpenSSH
- name: Generate an OpenSSL public key with a passphrase protected private key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
privatekey_passphrase: ansible
- name: Force regenerate an OpenSSL public key if it already exists
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
privatekey_path: /etc/ssl/private/ansible.com.pem
force: yes
- name: Remove an OpenSSL public key
community.crypto.openssl_publickey:
path: /etc/ssl/public/ansible.com.pem
state: absent
'''
RETURN = r'''
privatekey:
description:
- Path to the TLS/SSL private key the public key was generated from.
- Will be C(none) if the private key has been provided in I(privatekey_content).
returned: changed or success
type: str
sample: /etc/ssl/private/ansible.com.pem
format:
description: The format of the public key (PEM, OpenSSH, ...).
returned: changed or success
type: str
sample: PEM
filename:
description: Path to the generated TLS/SSL public key file.
returned: changed or success
type: str
sample: /etc/ssl/public/ansible.com.pem
fingerprint:
description:
- The fingerprint of the public key. Fingerprint will be generated for each hashlib.algorithms available.
- Requires PyOpenSSL >= 16.0 for meaningful output.
returned: changed or success
type: dict
sample:
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/publickey.pem.2019-03-09@11:22~
publickey:
description: The (current or generated) public key's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
get_fingerprint,
)
MINIMAL_PYOPENSSL_VERSION = '16.0.0'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization as crypto_serialization
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class PublicKeyError(OpenSSLObjectError):
pass
class PublicKey(OpenSSLObject):
def __init__(self, module, backend):
super(PublicKey, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.privatekey = None
self.publickey_bytes = None
self.return_content = module.params['return_content']
self.fingerprint = {}
self.backend = backend
self.backup = module.params['backup']
self.backup_file = None
def _create_publickey(self, module):
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
return self.privatekey.public_key().public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
try:
return crypto.dump_publickey(crypto.FILETYPE_PEM, self.privatekey)
except AttributeError as dummy:
raise PublicKeyError('You need to have PyOpenSSL>=16.0.0 to generate public keys')
def generate(self, module):
"""Generate the public key."""
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise PublicKeyError(
'The private key %s does not exist' % self.privatekey_path
)
if not self.check(module, perms_required=False) or self.force:
try:
publickey_content = self._create_publickey(module)
if self.return_content:
self.publickey_bytes = publickey_content
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, publickey_content)
self.changed = True
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
except (IOError, OSError) as exc:
raise PublicKeyError(exc)
self.fingerprint = get_fingerprint(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(PublicKey, self).check(module, perms_required)
def _check_privatekey():
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
return False
try:
with open(self.path, 'rb') as public_key_fh:
publickey_content = public_key_fh.read()
if self.return_content:
self.publickey_bytes = publickey_content
if self.backend == 'cryptography':
if self.format == 'OpenSSH':
# Read and dump public key. Makes sure that the comment is stripped off.
current_publickey = crypto_serialization.load_ssh_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.OpenSSH,
crypto_serialization.PublicFormat.OpenSSH
)
else:
current_publickey = crypto_serialization.load_pem_public_key(publickey_content, backend=default_backend())
publickey_content = current_publickey.public_bytes(
crypto_serialization.Encoding.PEM,
crypto_serialization.PublicFormat.SubjectPublicKeyInfo
)
else:
publickey_content = crypto.dump_publickey(
crypto.FILETYPE_PEM,
crypto.load_publickey(crypto.FILETYPE_PEM, publickey_content)
)
except Exception as dummy:
return False
try:
desired_publickey = self._create_publickey(module)
except OpenSSLBadPassphraseError as exc:
raise PublicKeyError(exc)
return publickey_content == desired_publickey
if not state_and_perms:
return state_and_perms
return _check_privatekey()
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(PublicKey, self).remove(module)
def dump(self):
"""Serialize the object into a dictionary."""
result = {
'privatekey': self.privatekey_path,
'filename': self.path,
'format': self.format,
'changed': self.changed,
'fingerprint': self.fingerprint,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
if self.publickey_bytes is None:
self.publickey_bytes = load_file_if_exists(self.path, ignore_errors=True)
result['publickey'] = self.publickey_bytes.decode('utf-8') if self.publickey_bytes else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
force=dict(type='bool', default=False),
path=dict(type='path', required=True),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
format=dict(type='str', default='PEM', choices=['OpenSSH', 'PEM']),
privatekey_passphrase=dict(type='str', no_log=True),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
return_content=dict(type='bool', default=False),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[('state', 'present', ['privatekey_path', 'privatekey_content'], True)],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
)
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION
if module.params['format'] == 'OpenSSH':
minimal_cryptography_version = MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(minimal_cryptography_version)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
if module.params['format'] == 'OpenSSH':
module.fail_json(
msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION_OPENSSH)),
exception=CRYPTOGRAPHY_IMP_ERR
)
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
minimal_cryptography_version,
MINIMAL_PYOPENSSL_VERSION))
if module.params['format'] == 'OpenSSH' and backend != 'cryptography':
module.fail_json(msg="Format OpenSSH requires the cryptography backend.")
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(minimal_cryptography_version)),
exception=CRYPTOGRAPHY_IMP_ERR)
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg="The directory '%s' does not exist or the file is not a directory" % base_dir
)
try:
public_key = PublicKey(module, backend)
if public_key.state == 'present':
if module.check_mode:
result = public_key.dump()
result['changed'] = module.params['force'] or not public_key.check(module)
module.exit_json(**result)
public_key.generate(module)
else:
if module.check_mode:
result = public_key.dump()
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
public_key.remove(module)
result = public_key.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
4295_4
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Patrick Pichler <ppichler+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_signature
version_added: 1.1.0
short_description: Sign data with openssl
description:
- This module allows one to sign data using a private key.
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
requirements:
- Either cryptography >= 1.4 (some key types require newer versions)
- Or pyOpenSSL >= 0.11 (Ed25519 and Ed448 keys are not supported with this backend)
author:
- Patrick Pichler (@aveexy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
privatekey_path:
description:
- The path to the private key to use when signing.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
path:
description:
- The file to sign.
- This file will only be read and not modified.
type: path
required: true
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
notes:
- |
When using the C(cryptography) backend, the following key types require at least the following C(cryptography) version:
RSA keys: C(cryptography) >= 1.4
DSA and ECDSA keys: C(cryptography) >= 1.5
ed448 and ed25519 keys: C(cryptography) >= 2.6
seealso:
- module: community.crypto.openssl_signature_info
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Sign example file
community.crypto.openssl_signature:
privatekey_path: private.key
path: /tmp/example_file
register: sig
- name: Verify signature of example file
community.crypto.openssl_signature_info:
certificate_path: cert.pem
path: /tmp/example_file
signature: "{{ sig.signature }}"
register: verify
- name: Make sure the signature is valid
assert:
that:
- verify.valid
'''
RETURN = r'''
signature:
description: Base64 encoded signature.
returned: success
type: str
'''
import os
import traceback
from distutils.version import LooseVersion
import base64
MINIMAL_PYOPENSSL_VERSION = '0.11'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_DSA_SIGN,
CRYPTOGRAPHY_HAS_EC_SIGN,
CRYPTOGRAPHY_HAS_ED25519_SIGN,
CRYPTOGRAPHY_HAS_ED448_SIGN,
CRYPTOGRAPHY_HAS_RSA_SIGN,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
)
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class SignatureBase(OpenSSLObject):
def __init__(self, module, backend):
super(SignatureBase, self).__init__(
path=module.params['path'],
state='present',
force=False,
check_mode=module.check_mode
)
self.backend = backend
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
# Implementation with using pyOpenSSL
class SignaturePyOpenSSL(SignatureBase):
def __init__(self, module, backend):
super(SignaturePyOpenSSL, self).__init__(module, backend)
def run(self):
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = OpenSSL.crypto.sign(private_key, _in, "sha256")
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
# Implementation with using cryptography
class SignatureCryptography(SignatureBase):
def __init__(self, module, backend):
super(SignatureCryptography, self).__init__(module, backend)
def run(self):
_padding = cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15()
_hash = cryptography.hazmat.primitives.hashes.SHA256()
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = None
if CRYPTOGRAPHY_HAS_DSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
signature = private_key.sign(_in, _hash)
if CRYPTOGRAPHY_HAS_EC_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
signature = private_key.sign(_in, cryptography.hazmat.primitives.asymmetric.ec.ECDSA(_hash))
if CRYPTOGRAPHY_HAS_ED25519_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_ED448_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_RSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
signature = private_key.sign(_in, _padding, _hash)
if signature is None:
self.module.fail_json(
msg="Unsupported key type. Your cryptography version is {0}".format(CRYPTOGRAPHY_VERSION)
)
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
def main():
module = AnsibleModule(
argument_spec=dict(
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
path=dict(type='path', required=True),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
),
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
required_one_of=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
)
if not os.path.isfile(module.params['path']):
module.fail_json(
name=module.params['path'],
msg='The file {0} does not exist'.format(module.params['path'])
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
_sign = SignaturePyOpenSSL(module, backend)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
_sign = SignatureCryptography(module, backend)
result = _sign.run()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Patrick Pichler <ppichler+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: openssl_signature
version_added: 1.1.0
short_description: Sign data with openssl
description:
- This module allows one to sign data using a private key.
- The module can use the cryptography Python library, or the pyOpenSSL Python
library. By default, it tries to detect which one is available. This can be
overridden with the I(select_crypto_backend) option. Please note that the PyOpenSSL backend
was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
requirements:
- Either cryptography >= 1.4 (some key types require newer versions)
- Or pyOpenSSL >= 0.11 (Ed25519 and Ed448 keys are not supported with this backend)
author:
- Patrick Pichler (@aveexy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
privatekey_path:
description:
- The path to the private key to use when signing.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: path
privatekey_content:
description:
- The content of the private key to use when signing the certificate signing request.
- Either I(privatekey_path) or I(privatekey_content) must be specified, but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the private key.
- This is required if the private key is password protected.
type: str
path:
description:
- The file to sign.
- This file will only be read and not modified.
type: path
required: true
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
notes:
- |
When using the C(cryptography) backend, the following key types require at least the following C(cryptography) version:
RSA keys: C(cryptography) >= 1.4
DSA and ECDSA keys: C(cryptography) >= 1.5
ed448 and ed25519 keys: C(cryptography) >= 2.6
seealso:
- module: community.crypto.openssl_signature_info
- module: community.crypto.openssl_privatekey
'''
EXAMPLES = r'''
- name: Sign example file
community.crypto.openssl_signature:
privatekey_path: private.key
path: /tmp/example_file
register: sig
- name: Verify signature of example file
community.crypto.openssl_signature_info:
certificate_path: cert.pem
path: /tmp/example_file
signature: "{{ sig.signature }}"
register: verify
- name: Make sure the signature is valid
assert:
that:
- verify.valid
'''
RETURN = r'''
signature:
description: Base64 encoded signature.
returned: success
type: str
'''
import os
import traceback
from distutils.version import LooseVersion
import base64
MINIMAL_PYOPENSSL_VERSION = '0.11'
MINIMAL_CRYPTOGRAPHY_VERSION = '1.4'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
import cryptography.hazmat.primitives.asymmetric.padding
import cryptography.hazmat.primitives.hashes
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
CRYPTOGRAPHY_HAS_DSA_SIGN,
CRYPTOGRAPHY_HAS_EC_SIGN,
CRYPTOGRAPHY_HAS_ED25519_SIGN,
CRYPTOGRAPHY_HAS_ED448_SIGN,
CRYPTOGRAPHY_HAS_RSA_SIGN,
OpenSSLObjectError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
)
from ansible.module_utils._text import to_native, to_bytes
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
class SignatureBase(OpenSSLObject):
def __init__(self, module, backend):
super(SignatureBase, self).__init__(
path=module.params['path'],
state='present',
force=False,
check_mode=module.check_mode
)
self.backend = backend
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
def generate(self):
# Empty method because OpenSSLObject wants this
pass
def dump(self):
# Empty method because OpenSSLObject wants this
pass
# Implementation with using pyOpenSSL
class SignaturePyOpenSSL(SignatureBase):
def __init__(self, module, backend):
super(SignaturePyOpenSSL, self).__init__(module, backend)
def run(self):
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = OpenSSL.crypto.sign(private_key, _in, "sha256")
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
# Implementation with using cryptography
class SignatureCryptography(SignatureBase):
def __init__(self, module, backend):
super(SignatureCryptography, self).__init__(module, backend)
def run(self):
_padding = cryptography.hazmat.primitives.asymmetric.padding.PKCS1v15()
_hash = cryptography.hazmat.primitives.hashes.SHA256()
result = dict()
try:
with open(self.path, "rb") as f:
_in = f.read()
private_key = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend,
)
signature = None
if CRYPTOGRAPHY_HAS_DSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
signature = private_key.sign(_in, _hash)
if CRYPTOGRAPHY_HAS_EC_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
signature = private_key.sign(_in, cryptography.hazmat.primitives.asymmetric.ec.ECDSA(_hash))
if CRYPTOGRAPHY_HAS_ED25519_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_ED448_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
signature = private_key.sign(_in)
if CRYPTOGRAPHY_HAS_RSA_SIGN:
if isinstance(private_key, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
signature = private_key.sign(_in, _padding, _hash)
if signature is None:
self.module.fail_json(
msg="Unsupported key type. Your cryptography version is {0}".format(CRYPTOGRAPHY_VERSION)
)
result['signature'] = base64.b64encode(signature)
return result
except Exception as e:
raise OpenSSLObjectError(e)
def main():
module = AnsibleModule(
argument_spec=dict(
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
path=dict(type='path', required=True),
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
),
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
required_one_of=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
)
if not os.path.isfile(module.params['path']):
module.fail_json(
name=module.params['path'],
msg='The file {0} does not exist'.format(module.params['path'])
)
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detection what is possible
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# Decision
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
# Success?
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
try:
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
_sign = SignaturePyOpenSSL(module, backend)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
_sign = SignatureCryptography(module, backend)
result = _sign.run()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == '__main__':
main()
|
4295_5
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_certificate
short_description: Generate and/or check OpenSSL certificates
description:
- This module allows one to (re)generate OpenSSL certificates.
- It implements a notion of provider (ie. C(selfsigned), C(ownca), C(acme), C(assertonly), C(entrust))
for your certificate.
- The C(assertonly) provider is intended for use cases where one is only interested in
checking properties of a supplied certificate. Please note that this provider has been
deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0. See the examples on how
to emulate C(assertonly) usage with M(community.crypto.x509_certificate_info),
M(community.crypto.openssl_csr_info), M(community.crypto.openssl_privatekey_info) and
M(ansible.builtin.assert). This also allows more flexible checks than
the ones offered by the C(assertonly) provider.
- The C(ownca) provider is intended for generating OpenSSL certificate signed with your own
CA (Certificate Authority) certificate (self-signed certificate).
- Many properties that can be specified in this module are for validation of an
existing or newly generated certificate. The proper place to specify them, if you
want to receive a certificate with these properties is a CSR (Certificate Signing Request).
- "Please note that the module regenerates existing certificate if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing certificate, consider using the I(backup) option."
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL.
- If both the cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with C(select_crypto_backend)).
Please note that the PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
- Note that this module was called C(openssl_certificate) when included directly in Ansible up to version 2.9.
When moved to the collection C(community.crypto), it was renamed to
M(community.crypto.x509_certificate). From Ansible 2.10 on, it can still be used by the
old short name (or by C(ansible.builtin.openssl_certificate)), which redirects to
C(community.crypto.x509_certificate). When using FQCNs or when using the
L(collections,https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook)
keyword, the new name M(community.crypto.x509_certificate) should be used to avoid
a deprecation warning.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.6 (if using C(selfsigned) or C(assertonly) provider)
- acme-tiny >= 4.0.0 (if using the C(acme) provider)
author:
- Yanis Guenane (@Spredzy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
state:
description:
- Whether the certificate should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
path:
description:
- Remote absolute path where the generated certificate file should be created or is already located.
type: path
required: true
provider:
description:
- Name of the provider to use to generate/retrieve the OpenSSL certificate.
- The C(assertonly) provider will not generate files and fail if the certificate file is missing.
- The C(assertonly) provider has been deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
Please see the examples on how to emulate it with
M(community.crypto.x509_certificate_info), M(community.crypto.openssl_csr_info),
M(community.crypto.openssl_privatekey_info) and M(ansible.builtin.assert).
- "The C(entrust) provider was added for Ansible 2.9 and requires credentials for the
L(Entrust Certificate Services,https://www.entrustdatacard.com/products/categories/ssl-certificates) (ECS) API."
- Required if I(state) is C(present).
type: str
choices: [ acme, assertonly, entrust, ownca, selfsigned ]
force:
description:
- Generate the certificate, even if it already exists.
type: bool
default: no
csr_path:
description:
- Path to the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_content).
type: path
csr_content:
description:
- Content of the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_path).
type: str
version_added: '1.0.0'
privatekey_path:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_content).
type: path
privatekey_content:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_path).
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path) resp. I(privatekey_content).
- This is required if the private key is password protected.
type: str
selfsigned_version:
description:
- Version of the C(selfsigned) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(selfsigned) provider.
type: int
default: 3
selfsigned_digest:
description:
- Digest algorithm to be used when self-signing the certificate.
- This is only used by the C(selfsigned) provider.
type: str
default: sha256
selfsigned_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(selfsigned) provider.
type: str
default: +0s
aliases: [ selfsigned_notBefore ]
selfsigned_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(selfsigned) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
aliases: [ selfsigned_notAfter ]
selfsigned_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(selfsigned) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_path:
description:
- Remote absolute path of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_content).
type: path
ownca_content:
description:
- Content of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_path).
type: str
version_added: '1.0.0'
ownca_privatekey_path:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_content).
type: path
ownca_privatekey_content:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_path).
type: str
version_added: '1.0.0'
ownca_privatekey_passphrase:
description:
- The passphrase for the I(ownca_privatekey_path) resp. I(ownca_privatekey_content).
- This is only used by the C(ownca) provider.
type: str
ownca_digest:
description:
- The digest algorithm to be used for the C(ownca) certificate.
- This is only used by the C(ownca) provider.
type: str
default: sha256
ownca_version:
description:
- The version of the C(ownca) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(ownca) provider.
type: int
default: 3
ownca_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(ownca) provider.
type: str
default: +0s
ownca_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(ownca) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
ownca_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_create_authority_key_identifier:
description:
- Create a Authority Key Identifier from the CA's certificate. If the CSR provided
a authority key identifier, it is ignored.
- The Authority Key Identifier is generated from the CA certificate's Subject Key Identifier,
if available. If it is not available, the CA certificate's public key will be used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: yes
acme_accountkey_path:
description:
- The path to the accountkey for the C(acme) provider.
- This is only used by the C(acme) provider.
type: path
acme_challenge_path:
description:
- The path to the ACME challenge directory that is served on U(http://<HOST>:80/.well-known/acme-challenge/)
- This is only used by the C(acme) provider.
type: path
acme_chain:
description:
- Include the intermediate certificate to the generated certificate
- This is only used by the C(acme) provider.
- Note that this is only available for older versions of C(acme-tiny).
New versions include the chain automatically, and setting I(acme_chain) to C(yes) results in an error.
type: bool
default: no
acme_directory:
description:
- "The ACME directory to use. You can use any directory that supports the ACME protocol, such as Buypass or Let's Encrypt."
- "Let's Encrypt recommends using their staging server while developing jobs. U(https://letsencrypt.org/docs/staging-environment/)."
type: str
default: https://acme-v02.api.letsencrypt.org/directory
version_added: '1.0.0'
signature_algorithms:
description:
- A list of algorithms that you would accept the certificate to be signed with
(e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']).
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
issuer:
description:
- The key/value pairs that must be present in the issuer name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
issuer_strict:
description:
- If set to C(yes), the I(issuer) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
subject:
description:
- The key/value pairs that must be present in the subject name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
subject_strict:
description:
- If set to C(yes), the I(subject) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
has_expired:
description:
- Checks if the certificate is expired/not expired at the time the module is executed.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
version:
description:
- The version of the certificate.
- Nowadays it should almost always be 3.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: int
valid_at:
description:
- The certificate must be valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
invalid_at:
description:
- The certificate must be invalid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
not_before:
description:
- The certificate must start to become valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notBefore ]
not_after:
description:
- The certificate must expire at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notAfter ]
valid_in:
description:
- The certificate must still be valid at this relative time offset from now.
- Valid format is C([+-]timespec | number_of_seconds) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using this parameter, this module is NOT idempotent.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
key_usage:
description:
- The I(key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ keyUsage ]
key_usage_strict:
description:
- If set to C(yes), the I(key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ keyUsage_strict ]
extended_key_usage:
description:
- The I(extended_key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ extendedKeyUsage ]
extended_key_usage_strict:
description:
- If set to C(yes), the I(extended_key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ extendedKeyUsage_strict ]
subject_alt_name:
description:
- The I(subject_alt_name) extension field must contain these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_strict:
description:
- If set to C(yes), the I(subject_alt_name) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ subjectAltName_strict ]
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
certificate back if you overwrote it with a new one by accident.
- This is not used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
entrust_cert_type:
description:
- Specify the type of certificate requested.
- This is only used by the C(entrust) provider.
type: str
default: STANDARD_SSL
choices: [ 'STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT' ]
entrust_requester_email:
description:
- The email of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_name:
description:
- The name of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_phone:
description:
- The phone number of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_user:
description:
- The username for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_key:
description:
- The key (password) for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_client_cert_path:
description:
- The path to the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_api_client_cert_key_path:
description:
- The path to the private key of the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as an absolute timestamp.
- A valid absolute time format is C(ASN.1 TIME) such as C(2019-06-18).
- A valid relative time format is C([+-]timespec) where timespec can be an integer + C([w | d | h | m | s]), such as C(+365d) or C(+32w1d2h)).
- Time will always be interpreted as UTC.
- Note that only the date (day, month, year) is supported for specifying the expiry date of the issued certificate.
- The full date-time is adjusted to EST (GMT -5:00) before issuance, which may result in a certificate with an expiration date one day
earlier than expected if a relative time is used.
- The minimum certificate lifetime is 90 days, and maximum is three years.
- If this value is not specified, the certificate will stop being valid 365 days the date of issue.
- This is only used by the C(entrust) provider.
type: str
default: +365d
entrust_api_specification_path:
description:
- The path to the specification file defining the Entrust Certificate Services (ECS) API configuration.
- You can use this to keep a local copy of the specification to avoid downloading it every time the module is used.
- This is only used by the C(entrust) provider.
type: path
default: https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml
return_content:
description:
- If set to C(yes), will return the (current or generated) certificate's content as I(certificate).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment: files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
- For security reason, when you use C(ownca) provider, you should NOT run
M(community.crypto.x509_certificate) on a target machine, but on a dedicated CA machine. It
is recommended not to store the CA private key on the target machine. Once signed, the
certificate can be moved to the target machine.
seealso:
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate a Self Signed OpenSSL certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
privatekey_path: /etc/ssl/private/ansible.com.pem
csr_path: /etc/ssl/csr/ansible.com.csr
provider: selfsigned
- name: Generate an OpenSSL certificate signed with your own CA certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
ownca_path: /etc/ssl/crt/ansible_CA.crt
ownca_privatekey_path: /etc/ssl/private/ansible_CA.pem
provider: ownca
- name: Generate a Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
- name: Force (re-)generate a new Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
force: yes
- name: Generate an Entrust certificate via the Entrust Certificate Services (ECS) API
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: entrust
entrust_requester_name: Jo Doe
entrust_requester_email: jdoe@ansible.com
entrust_requester_phone: 555-555-5555
entrust_cert_type: STANDARD_SSL
entrust_api_user: apiusername
entrust_api_key: a^lv*32!cd9LnT
entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt
entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-key.crt
entrust_api_specification_path: /etc/ssl/entrust/api-docs/cms-api-2.1.0.yaml
# The following example shows one assertonly usage using all existing options for
# assertonly, and shows how to emulate the behavior with the x509_certificate_info,
# openssl_csr_info, openssl_privatekey_info and assert modules:
- community.crypto.x509_certificate:
provider: assertonly
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
privatekey_path: /etc/ssl/csr/ansible.com.key
signature_algorithms:
- sha256WithRSAEncryption
- sha512WithRSAEncryption
subject:
commonName: ansible.com
subject_strict: yes
issuer:
commonName: ansible.com
issuer_strict: yes
has_expired: no
version: 3
key_usage:
- Data Encipherment
key_usage_strict: yes
extended_key_usage:
- DVCS
extended_key_usage_strict: yes
subject_alt_name:
- dns:ansible.com
subject_alt_name_strict: yes
not_before: 20190331202428Z
not_after: 20190413202428Z
valid_at: "+1d10h"
invalid_at: 20200331202428Z
valid_in: 10 # in ten seconds
- community.crypto.x509_certificate_info:
path: /etc/ssl/crt/ansible.com.crt
# for valid_at, invalid_at and valid_in
valid_at:
one_day_ten_hours: "+1d10h"
fixed_timestamp: 20200331202428Z
ten_seconds: "+10"
register: result
- community.crypto.openssl_csr_info:
# Verifies that the CSR signature is valid; module will fail if not
path: /etc/ssl/csr/ansible.com.csr
register: result_csr
- community.crypto.openssl_privatekey_info:
path: /etc/ssl/csr/ansible.com.key
register: result_privatekey
- assert:
that:
# When private key is specified for assertonly, this will be checked:
- result.public_key == result_privatekey.public_key
# When CSR is specified for assertonly, this will be checked:
- result.public_key == result_csr.public_key
- result.subject_ordered == result_csr.subject_ordered
- result.extensions_by_oid == result_csr.extensions_by_oid
# signature_algorithms check
- "result.signature_algorithm == 'sha256WithRSAEncryption' or result.signature_algorithm == 'sha512WithRSAEncryption'"
# subject and subject_strict
- "result.subject.commonName == 'ansible.com'"
- "result.subject | length == 1" # the number must be the number of entries you check for
# issuer and issuer_strict
- "result.issuer.commonName == 'ansible.com'"
- "result.issuer | length == 1" # the number must be the number of entries you check for
# has_expired
- not result.expired
# version
- result.version == 3
# key_usage and key_usage_strict
- "'Data Encipherment' in result.key_usage"
- "result.key_usage | length == 1" # the number must be the number of entries you check for
# extended_key_usage and extended_key_usage_strict
- "'DVCS' in result.extended_key_usage"
- "result.extended_key_usage | length == 1" # the number must be the number of entries you check for
# subject_alt_name and subject_alt_name_strict
- "'dns:ansible.com' in result.subject_alt_name"
- "result.subject_alt_name | length == 1" # the number must be the number of entries you check for
# not_before and not_after
- "result.not_before == '20190331202428Z'"
- "result.not_after == '20190413202428Z'"
# valid_at, invalid_at and valid_in
- "result.valid_at.one_day_ten_hours" # for valid_at
- "not result.valid_at.fixed_timestamp" # for invalid_at
- "result.valid_at.ten_seconds" # for valid_in
# Examples for some checks one could use the assertonly provider for:
# (Please note that assertonly has been deprecated!)
# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow:
- name: Check if a certificate is currently still valid, ignoring failures
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
ignore_errors: yes
register: validity_check
- name: Run custom task(s) to get a new, valid certificate in case the initial check failed
command: superspecialSSL recreate /etc/ssl/crt/example.com.crt
when: validity_check.failed
- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
when: validity_check.failed
# Some other checks that assertonly could be used for:
- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
issuer:
O: Let's Encrypt
has_expired: no
- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA)
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
signature_algorithms:
- sha224WithRSAEncryption
- sha256WithRSAEncryption
- sha384WithRSAEncryption
- sha512WithRSAEncryption
- sha224WithECDSAEncryption
- sha256WithECDSAEncryption
- sha384WithECDSAEncryption
- sha512WithECDSAEncryption
- name: Ensure that the existing certificate belongs to the specified private key
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
privatekey_path: /etc/ssl/private/example.com.pem
provider: assertonly
- name: Ensure that the existing certificate is still valid at the winter solstice 2017
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_at: 20171221162800Z
- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_in: 1209600
- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
key_usage:
- digitalSignature
- keyEncipherment
key_usage_strict: true
- name: Ensure that the existing certificate can be used for client authentication
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- name: Ensure that the existing certificate can only be used for client authentication and time stamping
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- 1.3.6.1.5.5.7.3.8
extended_key_usage_strict: true
- name: Ensure that the existing certificate has a certain domain in its subjectAltName
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
subject_alt_name:
- www.example.com
- test.example.com
'''
RETURN = r'''
filename:
description: Path to the generated certificate.
returned: changed or success
type: str
sample: /etc/ssl/crt/www.ansible.com.crt
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.crt.2019-03-09@11:22~
certificate:
description: The (current or generated) certificate's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import datetime
import time
import os
import tempfile
import traceback
from distutils.version import LooseVersion
from random import randrange
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.compat import ipaddress as compat_ipaddress
from ansible_collections.community.crypto.plugins.module_utils.ecs.api import ECSClient, RestOperationException, SessionConfigurationException
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
load_certificate_request,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_compare_public_keys,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import NameAttribute, Name
from cryptography.x509.oid import NameOID
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CertificateError(OpenSSLObjectError):
pass
class Certificate(OpenSSLObject):
def __init__(self, module, backend):
super(Certificate, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.provider = module.params['provider']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.csr_path = module.params['csr_path']
self.csr_content = module.params['csr_content']
if self.csr_content is not None:
self.csr_content = self.csr_content.encode('utf-8')
self.cert = None
self.privatekey = None
self.csr = None
self.backend = backend
self.module = module
self.return_content = module.params['return_content']
# The following are default values which make sure check() works as
# before if providers do not explicitly change these properties.
self.create_subject_key_identifier = 'never_create'
self.create_authority_key_identifier = False
self.backup = module.params['backup']
self.backup_file = None
def _validate_privatekey(self):
if self.backend == 'pyopenssl':
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
elif self.backend == 'cryptography':
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr(self):
if self.backend == 'pyopenssl':
# Verify that CSR is signed by certificate's private key
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
# Check subject
if self.csr.get_subject() != self.cert.get_subject():
return False
# Check extensions
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
elif self.backend == 'cryptography':
# Verify that CSR is signed by certificate's private key
if not self.csr.is_signature_valid:
return False
if not cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key()):
return False
# Check subject
if self.csr.subject != self.cert.subject:
return False
# Check extensions
cert_exts = list(self.cert.extensions)
csr_exts = list(self.csr.extensions)
if self.create_subject_key_identifier != 'never_create':
# Filter out SubjectKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), csr_exts))
if self.create_authority_key_identifier:
# Filter out AuthorityKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), csr_exts))
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = self.csr.extensions.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(Certificate, self).remove(module)
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Certificate, self).check(module, perms_required)
if not state_and_perms:
return False
try:
self.cert = load_certificate(self.path, backend=self.backend)
except Exception as dummy:
return False
if self.privatekey_path or self.privatekey_content:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if not self._validate_privatekey():
return False
if self.csr_path or self.csr_content:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
if not self._validate_csr():
return False
# Check SubjectKeyIdentifier
if self.backend == 'cryptography' and self.create_subject_key_identifier != 'never_create':
# Get hold of certificate's SKI
try:
ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
return False
# Get hold of CSR's SKI for 'create_if_not_provided'
csr_ext = None
if self.create_subject_key_identifier == 'create_if_not_provided':
try:
csr_ext = self.csr.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
pass
if csr_ext is None:
# If CSR had no SKI, or we chose to ignore it ('always_create'), compare with created SKI
if ext.value.digest != x509.SubjectKeyIdentifier.from_public_key(self.cert.public_key()).digest:
return False
else:
# If CSR had SKI and we didn't ignore it ('create_if_not_provided'), compare SKIs
if ext.value.digest != csr_ext.value.digest:
return False
return True
class CertificateAbsent(Certificate):
def __init__(self, module):
super(CertificateAbsent, self).__init__(module, 'cryptography') # backend doesn't matter
def generate(self, module):
pass
def dump(self, check_mode=False):
# Use only for absent
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
result['certificate'] = None
return result
class SelfSignedCertificateCryptography(Certificate):
"""Generate the self-signed certificate, using the cryptography backend"""
def __init__(self, module):
super(SelfSignedCertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['selfsigned_create_subject_key_identifier']
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['selfsigned_digest'])
self.version = module.params['selfsigned_version']
self.serial_number = x509.random_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self._module = module
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=to_native(exc))
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['selfsigned_digest']
)
else:
self.digest = None
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
try:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.csr.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.privatekey.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
except ValueError as e:
raise CertificateError(str(e))
try:
certificate = cert_builder.sign(
private_key=self.privatekey, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
def generate_serial_number():
"""Generate a serial number for a certificate"""
while True:
result = randrange(0, 1 << 160)
if result >= 1000:
return result
class SelfSignedCertificate(Certificate):
"""Generate the self-signed certificate."""
def __init__(self, module):
super(SelfSignedCertificate, self).__init__(module, 'pyopenssl')
if module.params['selfsigned_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='selfsigned_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = module.params['selfsigned_digest']
self.version = module.params['selfsigned_version']
self.serial_number = generate_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.csr.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
class OwnCACertificateCryptography(Certificate):
"""Generate the own CA certificate. Using the cryptography backend"""
def __init__(self, module):
super(OwnCACertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['ownca_create_subject_key_identifier']
self.create_authority_key_identifier = module.params['ownca_create_authority_key_identifier']
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['ownca_digest'])
self.version = module.params['ownca_version']
self.serial_number = x509.random_serial_number()
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
backend=self.backend
)
try:
self.ca_private_key = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
if cryptography_key_needs_digest_for_signing(self.ca_private_key):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['ownca_digest']
)
else:
self.digest = None
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.ca_cert.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.csr.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
if self.create_authority_key_identifier and isinstance(extension.value, x509.AuthorityKeyIdentifier):
continue
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.csr.public_key()),
critical=False
)
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext),
critical=False
)
except cryptography.x509.ExtensionNotFound:
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key()),
critical=False
)
try:
certificate = cert_builder.sign(
private_key=self.ca_private_key, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
if not super(OwnCACertificateCryptography, self).check(module, perms_required):
return False
# Check AuthorityKeyIdentifier
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
expected_ext = (
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext)
)
except cryptography.x509.ExtensionNotFound:
expected_ext = x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key())
try:
ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
if ext.value != expected_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
class OwnCACertificate(Certificate):
"""Generate the own CA certificate."""
def __init__(self, module):
super(OwnCACertificate, self).__init__(module, 'pyopenssl')
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = module.params['ownca_digest']
self.version = module.params['ownca_version']
self.serial_number = generate_serial_number()
if module.params['ownca_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='ownca_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
if module.params['ownca_create_authority_key_identifier']:
module.warn('ownca_create_authority_key_identifier is ignored by the pyOpenSSL backend!')
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
)
try:
self.ca_privatekey = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.ca_cert.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.ca_privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
def compare_sets(subset, superset, equality=False):
if equality:
return set(subset) == set(superset)
else:
return all(x in superset for x in subset)
def compare_dicts(subset, superset, equality=False):
if equality:
return subset == superset
else:
return all(superset.get(x) == v for x, v in subset.items())
NO_EXTENSION = 'no extension'
class AssertOnlyCertificateBase(Certificate):
def __init__(self, module, backend):
super(AssertOnlyCertificateBase, self).__init__(module, backend)
self.signature_algorithms = module.params['signature_algorithms']
if module.params['subject']:
self.subject = parse_name_field(module.params['subject'])
else:
self.subject = []
self.subject_strict = module.params['subject_strict']
if module.params['issuer']:
self.issuer = parse_name_field(module.params['issuer'])
else:
self.issuer = []
self.issuer_strict = module.params['issuer_strict']
self.has_expired = module.params['has_expired']
self.version = module.params['version']
self.key_usage = module.params['key_usage']
self.key_usage_strict = module.params['key_usage_strict']
self.extended_key_usage = module.params['extended_key_usage']
self.extended_key_usage_strict = module.params['extended_key_usage_strict']
self.subject_alt_name = module.params['subject_alt_name']
self.subject_alt_name_strict = module.params['subject_alt_name_strict']
self.not_before = module.params['not_before']
self.not_after = module.params['not_after']
self.valid_at = module.params['valid_at']
self.invalid_at = module.params['invalid_at']
self.valid_in = module.params['valid_in']
if self.valid_in and not self.valid_in.startswith("+") and not self.valid_in.startswith("-"):
try:
int(self.valid_in)
except ValueError:
module.fail_json(msg='The supplied value for "valid_in" (%s) is not an integer or a valid timespec' % self.valid_in)
self.valid_in = "+" + self.valid_in + "s"
# Load objects
self.cert = load_certificate(self.path, backend=self.backend)
if self.privatekey_path is not None or self.privatekey_content is not None:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if self.csr_path is not None or self.csr_content is not None:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
@abc.abstractmethod
def _validate_privatekey(self):
pass
@abc.abstractmethod
def _validate_csr_signature(self):
pass
@abc.abstractmethod
def _validate_csr_subject(self):
pass
@abc.abstractmethod
def _validate_csr_extensions(self):
pass
@abc.abstractmethod
def _validate_signature_algorithms(self):
pass
@abc.abstractmethod
def _validate_subject(self):
pass
@abc.abstractmethod
def _validate_issuer(self):
pass
@abc.abstractmethod
def _validate_has_expired(self):
pass
@abc.abstractmethod
def _validate_version(self):
pass
@abc.abstractmethod
def _validate_key_usage(self):
pass
@abc.abstractmethod
def _validate_extended_key_usage(self):
pass
@abc.abstractmethod
def _validate_subject_alt_name(self):
pass
@abc.abstractmethod
def _validate_not_before(self):
pass
@abc.abstractmethod
def _validate_not_after(self):
pass
@abc.abstractmethod
def _validate_valid_at(self):
pass
@abc.abstractmethod
def _validate_invalid_at(self):
pass
@abc.abstractmethod
def _validate_valid_in(self):
pass
def assertonly(self, module):
messages = []
if self.privatekey_path is not None or self.privatekey_content is not None:
if not self._validate_privatekey():
messages.append(
'Certificate %s and private key %s do not match' %
(self.path, self.privatekey_path or '(provided in module options)')
)
if self.csr_path is not None or self.csr_content is not None:
if not self._validate_csr_signature():
messages.append(
'Certificate %s and CSR %s do not match: private key mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_subject():
messages.append(
'Certificate %s and CSR %s do not match: subject mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_extensions():
messages.append(
'Certificate %s and CSR %s do not match: extensions mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if self.signature_algorithms is not None:
wrong_alg = self._validate_signature_algorithms()
if wrong_alg:
messages.append(
'Invalid signature algorithm (got %s, expected one of %s)' %
(wrong_alg, self.signature_algorithms)
)
if self.subject is not None:
failure = self._validate_subject()
if failure:
dummy, cert_subject = failure
messages.append(
'Invalid subject component (got %s, expected all of %s to be present)' %
(cert_subject, self.subject)
)
if self.issuer is not None:
failure = self._validate_issuer()
if failure:
dummy, cert_issuer = failure
messages.append(
'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer)
)
if self.has_expired is not None:
cert_expired = self._validate_has_expired()
if cert_expired != self.has_expired:
messages.append(
'Certificate expiration check failed (certificate expiration is %s, expected %s)' %
(cert_expired, self.has_expired)
)
if self.version is not None:
cert_version = self._validate_version()
if cert_version != self.version:
messages.append(
'Invalid certificate version number (got %s, expected %s)' %
(cert_version, self.version)
)
if self.key_usage is not None:
failure = self._validate_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no keyUsage extension')
elif failure:
dummy, cert_key_usage = failure
messages.append(
'Invalid keyUsage components (got %s, expected all of %s to be present)' %
(cert_key_usage, self.key_usage)
)
if self.extended_key_usage is not None:
failure = self._validate_extended_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no extendedKeyUsage extension')
elif failure:
dummy, ext_cert_key_usage = failure
messages.append(
'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (ext_cert_key_usage, self.extended_key_usage)
)
if self.subject_alt_name is not None:
failure = self._validate_subject_alt_name()
if failure == NO_EXTENSION:
messages.append('Found no subjectAltName extension')
elif failure:
dummy, cert_san = failure
messages.append(
'Invalid subjectAltName component (got %s, expected all of %s to be present)' %
(cert_san, self.subject_alt_name)
)
if self.not_before is not None:
cert_not_valid_before = self._validate_not_before()
if cert_not_valid_before != get_relative_time_option(self.not_before, 'not_before', backend=self.backend):
messages.append(
'Invalid not_before component (got %s, expected %s to be present)' %
(cert_not_valid_before, self.not_before)
)
if self.not_after is not None:
cert_not_valid_after = self._validate_not_after()
if cert_not_valid_after != get_relative_time_option(self.not_after, 'not_after', backend=self.backend):
messages.append(
'Invalid not_after component (got %s, expected %s to be present)' %
(cert_not_valid_after, self.not_after)
)
if self.valid_at is not None:
not_before, valid_at, not_after = self._validate_valid_at()
if not (not_before <= valid_at <= not_after):
messages.append(
'Certificate is not valid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.valid_at, not_before, not_after)
)
if self.invalid_at is not None:
not_before, invalid_at, not_after = self._validate_invalid_at()
if not_before <= invalid_at <= not_after:
messages.append(
'Certificate is not invalid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.invalid_at, not_before, not_after)
)
if self.valid_in is not None:
not_before, valid_in, not_after = self._validate_valid_in()
if not not_before <= valid_in <= not_after:
messages.append(
'Certificate is not valid in %s from now (that would be %s) - not_before: %s - not_after: %s' %
(self.valid_in, valid_in, not_before, not_after)
)
return messages
def generate(self, module):
"""Don't generate anything - only assert"""
messages = self.assertonly(module)
if messages:
module.fail_json(msg=' | '.join(messages))
def check(self, module, perms_required=False):
"""Ensure the resource is in its desired state."""
messages = self.assertonly(module)
return len(messages) == 0
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
class AssertOnlyCertificateCryptography(AssertOnlyCertificateBase):
"""Validate the supplied cert, using the cryptography backend"""
def __init__(self, module):
super(AssertOnlyCertificateCryptography, self).__init__(module, 'cryptography')
def _validate_privatekey(self):
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr_signature(self):
if not self.csr.is_signature_valid:
return False
return cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key())
def _validate_csr_subject(self):
return self.csr.subject == self.cert.subject
def _validate_csr_extensions(self):
cert_exts = self.cert.extensions
csr_exts = self.csr.extensions
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = csr_exts.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def _validate_signature_algorithms(self):
if self.cert.signature_algorithm_oid._name not in self.signature_algorithms:
return self.cert.signature_algorithm_oid._name
def _validate_subject(self):
expected_subject = Name([NameAttribute(oid=cryptography_name_to_oid(sub[0]), value=to_text(sub[1]))
for sub in self.subject])
cert_subject = self.cert.subject
if not compare_sets(expected_subject, cert_subject, self.subject_strict):
return expected_subject, cert_subject
def _validate_issuer(self):
expected_issuer = Name([NameAttribute(oid=cryptography_name_to_oid(iss[0]), value=to_text(iss[1]))
for iss in self.issuer])
cert_issuer = self.cert.issuer
if not compare_sets(expected_issuer, cert_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
cert_not_after = self.cert.not_valid_after
cert_expired = cert_not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
if self.cert.version == x509.Version.v1:
return 1
if self.cert.version == x509.Version.v3:
return 3
return "unknown"
def _validate_key_usage(self):
try:
current_key_usage = self.cert.extensions.get_extension_for_class(x509.KeyUsage).value
test_key_usage = dict(
digital_signature=current_key_usage.digital_signature,
content_commitment=current_key_usage.content_commitment,
key_encipherment=current_key_usage.key_encipherment,
data_encipherment=current_key_usage.data_encipherment,
key_agreement=current_key_usage.key_agreement,
key_cert_sign=current_key_usage.key_cert_sign,
crl_sign=current_key_usage.crl_sign,
encipher_only=False,
decipher_only=False
)
if test_key_usage['key_agreement']:
test_key_usage.update(dict(
encipher_only=current_key_usage.encipher_only,
decipher_only=current_key_usage.decipher_only
))
key_usages = cryptography_parse_key_usage_params(self.key_usage)
if not compare_dicts(key_usages, test_key_usage, self.key_usage_strict):
return self.key_usage, [k for k, v in test_key_usage.items() if v is True]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
try:
current_ext_keyusage = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
usages = [cryptography_name_to_oid(usage) for usage in self.extended_key_usage]
expected_ext_keyusage = x509.ExtendedKeyUsage(usages)
if not compare_sets(expected_ext_keyusage, current_ext_keyusage, self.extended_key_usage_strict):
return [eku.value for eku in expected_ext_keyusage], [eku.value for eku in current_ext_keyusage]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
try:
current_san = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
expected_san = [cryptography_get_name(san) for san in self.subject_alt_name]
if not compare_sets(expected_san, current_san, self.subject_alt_name_strict):
return self.subject_alt_name, current_san
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.not_valid_before
def _validate_not_after(self):
return self.cert.not_valid_after
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, 'valid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, 'invalid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_valid_in(self):
valid_in_date = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
return self.cert.not_valid_before, valid_in_date, self.cert.not_valid_after
class AssertOnlyCertificate(AssertOnlyCertificateBase):
"""validate the supplied certificate."""
def __init__(self, module):
super(AssertOnlyCertificate, self).__init__(module, 'pyopenssl')
# Ensure inputs are properly sanitized before comparison.
for param in ['signature_algorithms', 'key_usage', 'extended_key_usage',
'subject_alt_name', 'subject', 'issuer', 'not_before',
'not_after', 'valid_at', 'invalid_at']:
attr = getattr(self, param)
if isinstance(attr, list) and attr:
if isinstance(attr[0], str):
setattr(self, param, [to_bytes(item) for item in attr])
elif isinstance(attr[0], tuple):
setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr])
elif isinstance(attr, tuple):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, dict):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, str):
setattr(self, param, to_bytes(attr))
def _validate_privatekey(self):
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
def _validate_csr_signature(self):
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
def _validate_csr_subject(self):
if self.csr.get_subject() != self.cert.get_subject():
return False
def _validate_csr_extensions(self):
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
def _validate_signature_algorithms(self):
if self.cert.get_signature_algorithm() not in self.signature_algorithms:
return self.cert.get_signature_algorithm()
def _validate_subject(self):
expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject]
cert_subject = self.cert.get_subject().get_components()
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject]
if not compare_sets(expected_subject, current_subject, self.subject_strict):
return expected_subject, current_subject
def _validate_issuer(self):
expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer]
cert_issuer = self.cert.get_issuer().get_components()
current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer]
if not compare_sets(expected_issuer, current_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
# The following 3 lines are the same as the current PyOpenSSL code for cert.has_expired().
# Older version of PyOpenSSL have a buggy implementation,
# to avoid issues with those we added the code from a more recent release here.
time_string = to_native(self.cert.get_notAfter())
not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
cert_expired = not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
# Version numbers in certs are off by one:
# v1: 0, v2: 1, v3: 2 ...
return self.cert.get_version() + 1
def _validate_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'keyUsage':
found = True
expected_extension = crypto.X509Extension(b"keyUsage", False, b', '.join(self.key_usage))
key_usage = [usage.strip() for usage in to_text(expected_extension, errors='surrogate_or_strict').split(',')]
current_ku = [usage.strip() for usage in to_text(extension, errors='surrogate_or_strict').split(',')]
if not compare_sets(key_usage, current_ku, self.key_usage_strict):
return self.key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'extendedKeyUsage':
found = True
extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extended_key_usage]
current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
to_bytes(extension, errors='surrogate_or_strict').split(b',')]
if not compare_sets(extKeyUsage, current_xku, self.extended_key_usage_strict):
return self.extended_key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'subjectAltName':
found = True
l_altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(extension, errors='surrogate_or_strict').split(', ')]
sans = [pyopenssl_normalize_name_attribute(to_text(san, errors='surrogate_or_strict')) for san in self.subject_alt_name]
if not compare_sets(sans, l_altnames, self.subject_alt_name_strict):
return self.subject_alt_name, l_altnames
if not found:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.get_notBefore()
def _validate_not_after(self):
return self.cert.get_notAfter()
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, "valid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, "invalid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_valid_in(self):
valid_in_asn1 = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
valid_in_date = to_bytes(valid_in_asn1, errors='surrogate_or_strict')
return self.cert.get_notBefore(), valid_in_date, self.cert.get_notAfter()
class EntrustCertificate(Certificate):
"""Retrieve a certificate using Entrust (ECS)."""
def __init__(self, module, backend):
super(EntrustCertificate, self).__init__(module, backend)
self.trackingId = None
self.notAfter = get_relative_time_option(module.params['entrust_not_after'], 'entrust_not_after', backend=self.backend)
if self.csr_content is None or not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend,
)
# ECS API defaults to using the validated organization tied to the account.
# We want to always force behavior of trying to use the organization provided in the CSR.
# To that end we need to parse out the organization from the CSR.
self.csr_org = None
if self.backend == 'pyopenssl':
csr_subject = self.csr.get_subject()
csr_subject_components = csr_subject.get_components()
for k, v in csr_subject_components:
if k.upper() == 'O':
# Entrust does not support multiple validated organizations in a single certificate
if self.csr_org is not None:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(csr_subject)))
else:
self.csr_org = v
elif self.backend == 'cryptography':
csr_subject_orgs = self.csr.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
if len(csr_subject_orgs) == 1:
self.csr_org = csr_subject_orgs[0].value
elif len(csr_subject_orgs) > 1:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(self.csr.subject)))
# If no organization in the CSR, explicitly tell ECS that it should be blank in issued cert, not defaulted to
# organization tied to the account.
if self.csr_org is None:
self.csr_org = ''
try:
self.ecs_client = ECSClient(
entrust_api_user=module.params.get('entrust_api_user'),
entrust_api_key=module.params.get('entrust_api_key'),
entrust_api_cert=module.params.get('entrust_api_client_cert_path'),
entrust_api_cert_key=module.params.get('entrust_api_client_cert_key_path'),
entrust_api_specification_path=module.params.get('entrust_api_specification_path')
)
except SessionConfigurationException as e:
module.fail_json(msg='Failed to initialize Entrust Provider: {0}'.format(to_native(e.message)))
def generate(self, module):
if not self.check(module, perms_required=False) or self.force:
# Read the CSR that was generated for us
body = {}
if self.csr_content is not None:
body['csr'] = self.csr_content
else:
with open(self.csr_path, 'r') as csr_file:
body['csr'] = csr_file.read()
body['certType'] = module.params['entrust_cert_type']
# Handle expiration (30 days if not specified)
expiry = self.notAfter
if not expiry:
gmt_now = datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))
expiry = gmt_now + datetime.timedelta(days=365)
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
body['certExpiryDate'] = expiry_iso3339
body['org'] = self.csr_org
body['tracking'] = {
'requesterName': module.params['entrust_requester_name'],
'requesterEmail': module.params['entrust_requester_email'],
'requesterPhone': module.params['entrust_requester_phone'],
}
try:
result = self.ecs_client.NewCertRequest(Body=body)
self.trackingId = result.get('trackingId')
except RestOperationException as e:
module.fail_json(msg='Failed to request new certificate from Entrust Certificate Services (ECS): {0}'.format(to_native(e.message)))
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(result.get('endEntityCert')))
self.cert = load_certificate(self.path, backend=self.backend)
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
parent_check = super(EntrustCertificate, self).check(module, perms_required)
try:
cert_details = self._get_cert_details()
except RestOperationException as e:
module.fail_json(msg='Failed to get status of existing certificate from Entrust Certificate Services (ECS): {0}.'.format(to_native(e.message)))
# Always issue a new certificate if the certificate is expired, suspended or revoked
status = cert_details.get('status', False)
if status == 'EXPIRED' or status == 'SUSPENDED' or status == 'REVOKED':
return False
# If the requested cert type was specified and it is for a different certificate type than the initial certificate, a new one is needed
if module.params['entrust_cert_type'] and cert_details.get('certType') and module.params['entrust_cert_type'] != cert_details.get('certType'):
return False
return parent_check
def _get_cert_details(self):
cert_details = {}
if self.cert:
serial_number = None
expiry = None
if self.backend == 'pyopenssl':
serial_number = "{0:X}".format(self.cert.get_serial_number())
time_string = to_native(self.cert.get_notAfter())
expiry = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
elif self.backend == 'cryptography':
serial_number = "{0:X}".format(cryptography_serial_number_of_cert(self.cert))
expiry = self.cert.not_valid_after
# get some information about the expiry of this certificate
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
cert_details['expiresAfter'] = expiry_iso3339
# If a trackingId is not already defined (from the result of a generate)
# use the serial number to identify the tracking Id
if self.trackingId is None and serial_number is not None:
cert_results = self.ecs_client.GetCertificates(serialNumber=serial_number).get('certificates', {})
# Finding 0 or more than 1 result is a very unlikely use case, it simply means we cannot perform additional checks
# on the 'state' as returned by Entrust Certificate Services (ECS). The general certificate validity is
# still checked as it is in the rest of the module.
if len(cert_results) == 1:
self.trackingId = cert_results[0].get('trackingId')
if self.trackingId is not None:
cert_details.update(self.ecs_client.GetCertificate(trackingId=self.trackingId))
return cert_details
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
result.update(self._get_cert_details())
return result
class AcmeCertificate(Certificate):
"""Retrieve a certificate using the ACME protocol."""
# Since there's no real use of the backend,
# other than the 'self.check' function, we just pass the backend to the constructor
def __init__(self, module, backend):
super(AcmeCertificate, self).__init__(module, backend)
self.accountkey_path = module.params['acme_accountkey_path']
self.challenge_path = module.params['acme_challenge_path']
self.use_chain = module.params['acme_chain']
self.acme_directory = module.params['acme_directory']
def generate(self, module):
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not os.path.exists(self.accountkey_path):
raise CertificateError(
'The account key %s does not exist' % self.accountkey_path
)
if not os.path.exists(self.challenge_path):
raise CertificateError(
'The challenge path %s does not exist' % self.challenge_path
)
if not self.check(module, perms_required=False) or self.force:
acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True)
command = [acme_tiny_path]
if self.use_chain:
command.append('--chain')
command.extend(['--account-key', self.accountkey_path])
if self.csr_content is not None:
# We need to temporarily write the CSR to disk
fd, tmpsrc = tempfile.mkstemp()
module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
f = os.fdopen(fd, 'wb')
try:
f.write(self.csr_content)
except Exception as err:
try:
f.close()
except Exception as dummy:
pass
module.fail_json(
msg="failed to create temporary CSR file: %s" % to_native(err),
exception=traceback.format_exc()
)
f.close()
command.extend(['--csr', tmpsrc])
else:
command.extend(['--csr', self.csr_path])
command.extend(['--acme-dir', self.challenge_path])
command.extend(['--directory-url', self.acme_directory])
try:
crt = module.run_command(command, check_rc=True)[1]
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(crt))
self.changed = True
except OSError as exc:
raise CertificateError(exc)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'accountkey': self.accountkey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
path=dict(type='path', required=True),
provider=dict(type='str', choices=['acme', 'assertonly', 'entrust', 'ownca', 'selfsigned']),
force=dict(type='bool', default=False,),
csr_path=dict(type='path'),
csr_content=dict(type='str'),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
# General properties of a certificate
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
# provider: assertonly
signature_algorithms=dict(type='list', elements='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
has_expired=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
version=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage=dict(type='list', elements='str', aliases=['keyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage_strict=dict(type='bool', default=False, aliases=['keyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage=dict(type='list', elements='str', aliases=['extendedKeyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage_strict=dict(type='bool', default=False, aliases=['extendedKeyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name_strict=dict(type='bool', default=False, aliases=['subjectAltName_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_before=dict(type='str', aliases=['notBefore'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_after=dict(type='str', aliases=['notAfter'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
invalid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_in=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
# provider: selfsigned
selfsigned_version=dict(type='int', default=3),
selfsigned_digest=dict(type='str', default='sha256'),
selfsigned_not_before=dict(type='str', default='+0s', aliases=['selfsigned_notBefore']),
selfsigned_not_after=dict(type='str', default='+3650d', aliases=['selfsigned_notAfter']),
selfsigned_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
# provider: ownca
ownca_path=dict(type='path'),
ownca_content=dict(type='str'),
ownca_privatekey_path=dict(type='path'),
ownca_privatekey_content=dict(type='str'),
ownca_privatekey_passphrase=dict(type='str', no_log=True),
ownca_digest=dict(type='str', default='sha256'),
ownca_version=dict(type='int', default=3),
ownca_not_before=dict(type='str', default='+0s'),
ownca_not_after=dict(type='str', default='+3650d'),
ownca_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
ownca_create_authority_key_identifier=dict(type='bool', default=True),
# provider: acme
acme_accountkey_path=dict(type='path'),
acme_challenge_path=dict(type='path'),
acme_chain=dict(type='bool', default=False),
acme_directory=dict(type='str', default="https://acme-v02.api.letsencrypt.org/directory"),
# provider: entrust
entrust_cert_type=dict(type='str', default='STANDARD_SSL',
choices=['STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL',
'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT']),
entrust_requester_email=dict(type='str'),
entrust_requester_name=dict(type='str'),
entrust_requester_phone=dict(type='str'),
entrust_api_user=dict(type='str'),
entrust_api_key=dict(type='str', no_log=True),
entrust_api_client_cert_path=dict(type='path'),
entrust_api_client_cert_key_path=dict(type='path', no_log=True),
entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
entrust_not_after=dict(type='str', default='+365d'),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[
['state', 'present', ['provider']],
['provider', 'entrust', ['entrust_requester_email', 'entrust_requester_name', 'entrust_requester_phone',
'entrust_api_user', 'entrust_api_key', 'entrust_api_client_cert_path',
'entrust_api_client_cert_key_path']],
],
mutually_exclusive=[
['csr_path', 'csr_content'],
['privatekey_path', 'privatekey_content'],
['ownca_path', 'ownca_content'],
['ownca_privatekey_path', 'ownca_privatekey_content'],
],
)
if module._name == 'community.crypto.openssl_certificate':
module.deprecate("The 'community.crypto.openssl_certificate' module has been renamed to 'community.crypto.x509_certificate'",
version='2.0.0', collection_name='community.crypto')
try:
if module.params['state'] == 'absent':
certificate = CertificateAbsent(module)
else:
if module.params['provider'] != 'assertonly' and module.params['csr_path'] is None and module.params['csr_content'] is None:
module.fail_json(msg='csr_path or csr_content is required when provider is not assertonly')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
provider = module.params['provider']
if provider == 'assertonly':
module.deprecate("The 'assertonly' provider is deprecated; please see the examples of "
"the 'x509_certificate' module on how to replace it with other modules",
version='2.0.0', collection_name='community.crypto')
elif provider == 'selfsigned':
if module.params['privatekey_path'] is None and module.params['privatekey_content'] is None:
module.fail_json(msg='One of privatekey_path and privatekey_content must be specified for the selfsigned provider.')
elif provider == 'acme':
if module.params['acme_accountkey_path'] is None:
module.fail_json(msg='The acme_accountkey_path option must be specified for the acme provider.')
if module.params['acme_challenge_path'] is None:
module.fail_json(msg='The acme_challenge_path option must be specified for the acme provider.')
elif provider == 'ownca':
if module.params['ownca_path'] is None and module.params['ownca_content'] is None:
module.fail_json(msg='One of ownca_path and ownca_content must be specified for the ownca provider.')
if module.params['ownca_privatekey_path'] is None and module.params['ownca_privatekey_content'] is None:
module.fail_json(msg='One of ownca_privatekey_path and ownca_privatekey_content must be specified for the ownca provider.')
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.warn('crypto backend forced to pyopenssl. The cryptography library does not support v2 certificates')
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
if module.params['provider'] in ['selfsigned', 'ownca', 'assertonly']:
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
if provider == 'selfsigned':
certificate = SelfSignedCertificate(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'pyopenssl')
elif provider == 'ownca':
certificate = OwnCACertificate(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'pyopenssl')
else:
certificate = AssertOnlyCertificate(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.fail_json(msg='The cryptography backend does not support v2 certificates, '
'use select_crypto_backend=pyopenssl for v2 certificates')
if provider == 'selfsigned':
certificate = SelfSignedCertificateCryptography(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'cryptography')
elif provider == 'ownca':
certificate = OwnCACertificateCryptography(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'cryptography')
else:
certificate = AssertOnlyCertificateCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = module.params['force'] or not certificate.check(module)
module.exit_json(**result)
certificate.generate(module)
else:
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
certificate.remove(module)
result = certificate.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_certificate
short_description: Generate and/or check OpenSSL certificates
description:
- This module allows one to (re)generate OpenSSL certificates.
- It implements a notion of provider (ie. C(selfsigned), C(ownca), C(acme), C(assertonly), C(entrust))
for your certificate.
- The C(assertonly) provider is intended for use cases where one is only interested in
checking properties of a supplied certificate. Please note that this provider has been
deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0. See the examples on how
to emulate C(assertonly) usage with M(community.crypto.x509_certificate_info),
M(community.crypto.openssl_csr_info), M(community.crypto.openssl_privatekey_info) and
M(ansible.builtin.assert). This also allows more flexible checks than
the ones offered by the C(assertonly) provider.
- The C(ownca) provider is intended for generating OpenSSL certificate signed with your own
CA (Certificate Authority) certificate (self-signed certificate).
- Many properties that can be specified in this module are for validation of an
existing or newly generated certificate. The proper place to specify them, if you
want to receive a certificate with these properties is a CSR (Certificate Signing Request).
- "Please note that the module regenerates existing certificate if it doesn't match the module's
options, or if it seems to be corrupt. If you are concerned that this could overwrite
your existing certificate, consider using the I(backup) option."
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL.
- If both the cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with C(select_crypto_backend)).
Please note that the PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
- Note that this module was called C(openssl_certificate) when included directly in Ansible up to version 2.9.
When moved to the collection C(community.crypto), it was renamed to
M(community.crypto.x509_certificate). From Ansible 2.10 on, it can still be used by the
old short name (or by C(ansible.builtin.openssl_certificate)), which redirects to
C(community.crypto.x509_certificate). When using FQCNs or when using the
L(collections,https://docs.ansible.com/ansible/latest/user_guide/collections_using.html#using-collections-in-a-playbook)
keyword, the new name M(community.crypto.x509_certificate) should be used to avoid
a deprecation warning.
requirements:
- PyOpenSSL >= 0.15 or cryptography >= 1.6 (if using C(selfsigned) or C(assertonly) provider)
- acme-tiny >= 4.0.0 (if using the C(acme) provider)
author:
- Yanis Guenane (@Spredzy)
- Markus Teufelberger (@MarkusTeufelberger)
options:
state:
description:
- Whether the certificate should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
path:
description:
- Remote absolute path where the generated certificate file should be created or is already located.
type: path
required: true
provider:
description:
- Name of the provider to use to generate/retrieve the OpenSSL certificate.
- The C(assertonly) provider will not generate files and fail if the certificate file is missing.
- The C(assertonly) provider has been deprecated in Ansible 2.9 and will be removed in community.crypto 2.0.0.
Please see the examples on how to emulate it with
M(community.crypto.x509_certificate_info), M(community.crypto.openssl_csr_info),
M(community.crypto.openssl_privatekey_info) and M(ansible.builtin.assert).
- "The C(entrust) provider was added for Ansible 2.9 and requires credentials for the
L(Entrust Certificate Services,https://www.entrustdatacard.com/products/categories/ssl-certificates) (ECS) API."
- Required if I(state) is C(present).
type: str
choices: [ acme, assertonly, entrust, ownca, selfsigned ]
force:
description:
- Generate the certificate, even if it already exists.
type: bool
default: no
csr_path:
description:
- Path to the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_content).
type: path
csr_content:
description:
- Content of the Certificate Signing Request (CSR) used to generate this certificate.
- This is not required in C(assertonly) mode.
- This is mutually exclusive with I(csr_path).
type: str
version_added: '1.0.0'
privatekey_path:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_content).
type: path
privatekey_content:
description:
- Path to the private key to use when signing the certificate.
- This is mutually exclusive with I(privatekey_path).
type: str
version_added: '1.0.0'
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path) resp. I(privatekey_content).
- This is required if the private key is password protected.
type: str
selfsigned_version:
description:
- Version of the C(selfsigned) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(selfsigned) provider.
type: int
default: 3
selfsigned_digest:
description:
- Digest algorithm to be used when self-signing the certificate.
- This is only used by the C(selfsigned) provider.
type: str
default: sha256
selfsigned_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(selfsigned) provider.
type: str
default: +0s
aliases: [ selfsigned_notBefore ]
selfsigned_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(selfsigned) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
aliases: [ selfsigned_notAfter ]
selfsigned_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(selfsigned) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_path:
description:
- Remote absolute path of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_content).
type: path
ownca_content:
description:
- Content of the CA (Certificate Authority) certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_path).
type: str
version_added: '1.0.0'
ownca_privatekey_path:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_content).
type: path
ownca_privatekey_content:
description:
- Path to the CA (Certificate Authority) private key to use when signing the certificate.
- This is only used by the C(ownca) provider.
- This is mutually exclusive with I(ownca_privatekey_path).
type: str
version_added: '1.0.0'
ownca_privatekey_passphrase:
description:
- The passphrase for the I(ownca_privatekey_path) resp. I(ownca_privatekey_content).
- This is only used by the C(ownca) provider.
type: str
ownca_digest:
description:
- The digest algorithm to be used for the C(ownca) certificate.
- This is only used by the C(ownca) provider.
type: str
default: sha256
ownca_version:
description:
- The version of the C(ownca) certificate.
- Nowadays it should almost always be C(3).
- This is only used by the C(ownca) provider.
type: int
default: 3
ownca_not_before:
description:
- The point in time the certificate is valid from.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will start being valid from now.
- This is only used by the C(ownca) provider.
type: str
default: +0s
ownca_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent.
- If this value is not specified, the certificate will stop being valid 10 years from now.
- This is only used by the C(ownca) provider.
- On macOS 10.15 and onwards, TLS server certificates must have a validity period of 825 days or fewer.
Please see U(https://support.apple.com/en-us/HT210176) for more details.
type: str
default: +3650d
ownca_create_subject_key_identifier:
description:
- Whether to create the Subject Key Identifier (SKI) from the public key.
- A value of C(create_if_not_provided) (default) only creates a SKI when the CSR does not
provide one.
- A value of C(always_create) always creates a SKI. If the CSR provides one, that one is
ignored.
- A value of C(never_create) never creates a SKI. If the CSR provides one, that one is used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: str
choices: [create_if_not_provided, always_create, never_create]
default: create_if_not_provided
ownca_create_authority_key_identifier:
description:
- Create a Authority Key Identifier from the CA's certificate. If the CSR provided
a authority key identifier, it is ignored.
- The Authority Key Identifier is generated from the CA certificate's Subject Key Identifier,
if available. If it is not available, the CA certificate's public key will be used.
- This is only used by the C(ownca) provider.
- Note that this is only supported if the C(cryptography) backend is used!
type: bool
default: yes
acme_accountkey_path:
description:
- The path to the accountkey for the C(acme) provider.
- This is only used by the C(acme) provider.
type: path
acme_challenge_path:
description:
- The path to the ACME challenge directory that is served on U(http://<HOST>:80/.well-known/acme-challenge/)
- This is only used by the C(acme) provider.
type: path
acme_chain:
description:
- Include the intermediate certificate to the generated certificate
- This is only used by the C(acme) provider.
- Note that this is only available for older versions of C(acme-tiny).
New versions include the chain automatically, and setting I(acme_chain) to C(yes) results in an error.
type: bool
default: no
acme_directory:
description:
- "The ACME directory to use. You can use any directory that supports the ACME protocol, such as Buypass or Let's Encrypt."
- "Let's Encrypt recommends using their staging server while developing jobs. U(https://letsencrypt.org/docs/staging-environment/)."
type: str
default: https://acme-v02.api.letsencrypt.org/directory
version_added: '1.0.0'
signature_algorithms:
description:
- A list of algorithms that you would accept the certificate to be signed with
(e.g. ['sha256WithRSAEncryption', 'sha512WithRSAEncryption']).
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
issuer:
description:
- The key/value pairs that must be present in the issuer name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
issuer_strict:
description:
- If set to C(yes), the I(issuer) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
subject:
description:
- The key/value pairs that must be present in the subject name field of the certificate.
- If you need to specify more than one value with the same key, use a list as value.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: dict
subject_strict:
description:
- If set to C(yes), the I(subject) field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
has_expired:
description:
- Checks if the certificate is expired/not expired at the time the module is executed.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
version:
description:
- The version of the certificate.
- Nowadays it should almost always be 3.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: int
valid_at:
description:
- The certificate must be valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
invalid_at:
description:
- The certificate must be invalid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
not_before:
description:
- The certificate must start to become valid at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notBefore ]
not_after:
description:
- The certificate must expire at this point in time.
- The timestamp is formatted as an ASN.1 TIME.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
aliases: [ notAfter ]
valid_in:
description:
- The certificate must still be valid at this relative time offset from now.
- Valid format is C([+-]timespec | number_of_seconds) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using this parameter, this module is NOT idempotent.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: str
key_usage:
description:
- The I(key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ keyUsage ]
key_usage_strict:
description:
- If set to C(yes), the I(key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ keyUsage_strict ]
extended_key_usage:
description:
- The I(extended_key_usage) extension field must contain all these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ extendedKeyUsage ]
extended_key_usage_strict:
description:
- If set to C(yes), the I(extended_key_usage) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ extendedKeyUsage_strict ]
subject_alt_name:
description:
- The I(subject_alt_name) extension field must contain these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: list
elements: str
aliases: [ subjectAltName ]
subject_alt_name_strict:
description:
- If set to C(yes), the I(subject_alt_name) extension field must contain only these values.
- This is only used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
aliases: [ subjectAltName_strict ]
select_crypto_backend:
description:
- Determines which crypto backend to use.
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in community.crypto 2.0.0.
From that point on, only the C(cryptography) backend will be available.
type: str
default: auto
choices: [ auto, cryptography, pyopenssl ]
backup:
description:
- Create a backup file including a timestamp so you can get the original
certificate back if you overwrote it with a new one by accident.
- This is not used by the C(assertonly) provider.
- This option is deprecated since Ansible 2.9 and will be removed with the C(assertonly) provider in community.crypto 2.0.0.
For alternatives, see the example on replacing C(assertonly).
type: bool
default: no
entrust_cert_type:
description:
- Specify the type of certificate requested.
- This is only used by the C(entrust) provider.
type: str
default: STANDARD_SSL
choices: [ 'STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL', 'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT' ]
entrust_requester_email:
description:
- The email of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_name:
description:
- The name of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_requester_phone:
description:
- The phone number of the requester of the certificate (for tracking purposes).
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_user:
description:
- The username for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_key:
description:
- The key (password) for authentication to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: str
entrust_api_client_cert_path:
description:
- The path to the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_api_client_cert_key_path:
description:
- The path to the private key of the client certificate used to authenticate to the Entrust Certificate Services (ECS) API.
- This is only used by the C(entrust) provider.
- This is required if the provider is C(entrust).
type: path
entrust_not_after:
description:
- The point in time at which the certificate stops being valid.
- Time can be specified either as relative time or as an absolute timestamp.
- A valid absolute time format is C(ASN.1 TIME) such as C(2019-06-18).
- A valid relative time format is C([+-]timespec) where timespec can be an integer + C([w | d | h | m | s]), such as C(+365d) or C(+32w1d2h)).
- Time will always be interpreted as UTC.
- Note that only the date (day, month, year) is supported for specifying the expiry date of the issued certificate.
- The full date-time is adjusted to EST (GMT -5:00) before issuance, which may result in a certificate with an expiration date one day
earlier than expected if a relative time is used.
- The minimum certificate lifetime is 90 days, and maximum is three years.
- If this value is not specified, the certificate will stop being valid 365 days the date of issue.
- This is only used by the C(entrust) provider.
type: str
default: +365d
entrust_api_specification_path:
description:
- The path to the specification file defining the Entrust Certificate Services (ECS) API configuration.
- You can use this to keep a local copy of the specification to avoid downloading it every time the module is used.
- This is only used by the C(entrust) provider.
type: path
default: https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml
return_content:
description:
- If set to C(yes), will return the (current or generated) certificate's content as I(certificate).
type: bool
default: no
version_added: '1.0.0'
extends_documentation_fragment: files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
- For security reason, when you use C(ownca) provider, you should NOT run
M(community.crypto.x509_certificate) on a target machine, but on a dedicated CA machine. It
is recommended not to store the CA private key on the target machine. Once signed, the
certificate can be moved to the target machine.
seealso:
- module: community.crypto.openssl_csr
- module: community.crypto.openssl_dhparam
- module: community.crypto.openssl_pkcs12
- module: community.crypto.openssl_privatekey
- module: community.crypto.openssl_publickey
'''
EXAMPLES = r'''
- name: Generate a Self Signed OpenSSL certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
privatekey_path: /etc/ssl/private/ansible.com.pem
csr_path: /etc/ssl/csr/ansible.com.csr
provider: selfsigned
- name: Generate an OpenSSL certificate signed with your own CA certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
ownca_path: /etc/ssl/crt/ansible_CA.crt
ownca_privatekey_path: /etc/ssl/private/ansible_CA.pem
provider: ownca
- name: Generate a Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
- name: Force (re-)generate a new Let's Encrypt Certificate
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: acme
acme_accountkey_path: /etc/ssl/private/ansible.com.pem
acme_challenge_path: /etc/ssl/challenges/ansible.com/
force: yes
- name: Generate an Entrust certificate via the Entrust Certificate Services (ECS) API
community.crypto.x509_certificate:
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
provider: entrust
entrust_requester_name: Jo Doe
entrust_requester_email: jdoe@ansible.com
entrust_requester_phone: 555-555-5555
entrust_cert_type: STANDARD_SSL
entrust_api_user: apiusername
entrust_api_key: a^lv*32!cd9LnT
entrust_api_client_cert_path: /etc/ssl/entrust/ecs-client.crt
entrust_api_client_cert_key_path: /etc/ssl/entrust/ecs-key.crt
entrust_api_specification_path: /etc/ssl/entrust/api-docs/cms-api-2.1.0.yaml
# The following example shows one assertonly usage using all existing options for
# assertonly, and shows how to emulate the behavior with the x509_certificate_info,
# openssl_csr_info, openssl_privatekey_info and assert modules:
- community.crypto.x509_certificate:
provider: assertonly
path: /etc/ssl/crt/ansible.com.crt
csr_path: /etc/ssl/csr/ansible.com.csr
privatekey_path: /etc/ssl/csr/ansible.com.key
signature_algorithms:
- sha256WithRSAEncryption
- sha512WithRSAEncryption
subject:
commonName: ansible.com
subject_strict: yes
issuer:
commonName: ansible.com
issuer_strict: yes
has_expired: no
version: 3
key_usage:
- Data Encipherment
key_usage_strict: yes
extended_key_usage:
- DVCS
extended_key_usage_strict: yes
subject_alt_name:
- dns:ansible.com
subject_alt_name_strict: yes
not_before: 20190331202428Z
not_after: 20190413202428Z
valid_at: "+1d10h"
invalid_at: 20200331202428Z
valid_in: 10 # in ten seconds
- community.crypto.x509_certificate_info:
path: /etc/ssl/crt/ansible.com.crt
# for valid_at, invalid_at and valid_in
valid_at:
one_day_ten_hours: "+1d10h"
fixed_timestamp: 20200331202428Z
ten_seconds: "+10"
register: result
- community.crypto.openssl_csr_info:
# Verifies that the CSR signature is valid; module will fail if not
path: /etc/ssl/csr/ansible.com.csr
register: result_csr
- community.crypto.openssl_privatekey_info:
path: /etc/ssl/csr/ansible.com.key
register: result_privatekey
- assert:
that:
# When private key is specified for assertonly, this will be checked:
- result.public_key == result_privatekey.public_key
# When CSR is specified for assertonly, this will be checked:
- result.public_key == result_csr.public_key
- result.subject_ordered == result_csr.subject_ordered
- result.extensions_by_oid == result_csr.extensions_by_oid
# signature_algorithms check
- "result.signature_algorithm == 'sha256WithRSAEncryption' or result.signature_algorithm == 'sha512WithRSAEncryption'"
# subject and subject_strict
- "result.subject.commonName == 'ansible.com'"
- "result.subject | length == 1" # the number must be the number of entries you check for
# issuer and issuer_strict
- "result.issuer.commonName == 'ansible.com'"
- "result.issuer | length == 1" # the number must be the number of entries you check for
# has_expired
- not result.expired
# version
- result.version == 3
# key_usage and key_usage_strict
- "'Data Encipherment' in result.key_usage"
- "result.key_usage | length == 1" # the number must be the number of entries you check for
# extended_key_usage and extended_key_usage_strict
- "'DVCS' in result.extended_key_usage"
- "result.extended_key_usage | length == 1" # the number must be the number of entries you check for
# subject_alt_name and subject_alt_name_strict
- "'dns:ansible.com' in result.subject_alt_name"
- "result.subject_alt_name | length == 1" # the number must be the number of entries you check for
# not_before and not_after
- "result.not_before == '20190331202428Z'"
- "result.not_after == '20190413202428Z'"
# valid_at, invalid_at and valid_in
- "result.valid_at.one_day_ten_hours" # for valid_at
- "not result.valid_at.fixed_timestamp" # for invalid_at
- "result.valid_at.ten_seconds" # for valid_in
# Examples for some checks one could use the assertonly provider for:
# (Please note that assertonly has been deprecated!)
# How to use the assertonly provider to implement and trigger your own custom certificate generation workflow:
- name: Check if a certificate is currently still valid, ignoring failures
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
ignore_errors: yes
register: validity_check
- name: Run custom task(s) to get a new, valid certificate in case the initial check failed
command: superspecialSSL recreate /etc/ssl/crt/example.com.crt
when: validity_check.failed
- name: Check the new certificate again for validity with the same parameters, this time failing the play if it is still invalid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
has_expired: no
when: validity_check.failed
# Some other checks that assertonly could be used for:
- name: Verify that an existing certificate was issued by the Let's Encrypt CA and is currently still valid
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
issuer:
O: Let's Encrypt
has_expired: no
- name: Ensure that a certificate uses a modern signature algorithm (no SHA1, MD5 or DSA)
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
signature_algorithms:
- sha224WithRSAEncryption
- sha256WithRSAEncryption
- sha384WithRSAEncryption
- sha512WithRSAEncryption
- sha224WithECDSAEncryption
- sha256WithECDSAEncryption
- sha384WithECDSAEncryption
- sha512WithECDSAEncryption
- name: Ensure that the existing certificate belongs to the specified private key
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
privatekey_path: /etc/ssl/private/example.com.pem
provider: assertonly
- name: Ensure that the existing certificate is still valid at the winter solstice 2017
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_at: 20171221162800Z
- name: Ensure that the existing certificate is still valid 2 weeks (1209600 seconds) from now
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
valid_in: 1209600
- name: Ensure that the existing certificate is only used for digital signatures and encrypting other keys
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
key_usage:
- digitalSignature
- keyEncipherment
key_usage_strict: true
- name: Ensure that the existing certificate can be used for client authentication
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- name: Ensure that the existing certificate can only be used for client authentication and time stamping
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
extended_key_usage:
- clientAuth
- 1.3.6.1.5.5.7.3.8
extended_key_usage_strict: true
- name: Ensure that the existing certificate has a certain domain in its subjectAltName
community.crypto.x509_certificate:
path: /etc/ssl/crt/example.com.crt
provider: assertonly
subject_alt_name:
- www.example.com
- test.example.com
'''
RETURN = r'''
filename:
description: Path to the generated certificate.
returned: changed or success
type: str
sample: /etc/ssl/crt/www.ansible.com.crt
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/www.ansible.com.crt.2019-03-09@11:22~
certificate:
description: The (current or generated) certificate's content.
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
version_added: '1.0.0'
'''
import abc
import datetime
import time
import os
import tempfile
import traceback
from distutils.version import LooseVersion
from random import randrange
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible_collections.community.crypto.plugins.module_utils.compat import ipaddress as compat_ipaddress
from ansible_collections.community.crypto.plugins.module_utils.ecs.api import ECSClient, RestOperationException, SessionConfigurationException
from ansible_collections.community.crypto.plugins.module_utils.io import (
load_file_if_exists,
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
load_certificate_request,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_compare_public_keys,
cryptography_get_name,
cryptography_name_to_oid,
cryptography_key_needs_digest_for_signing,
cryptography_parse_key_usage_params,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.pyopenssl_support import (
pyopenssl_normalize_name_attribute,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
MINIMAL_PYOPENSSL_VERSION = '0.15'
PYOPENSSL_IMP_ERR = None
try:
import OpenSSL
from OpenSSL import crypto
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
except ImportError:
PYOPENSSL_IMP_ERR = traceback.format_exc()
PYOPENSSL_FOUND = False
else:
PYOPENSSL_FOUND = True
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import NameAttribute, Name
from cryptography.x509.oid import NameOID
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CertificateError(OpenSSLObjectError):
pass
class Certificate(OpenSSLObject):
def __init__(self, module, backend):
super(Certificate, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.provider = module.params['provider']
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.csr_path = module.params['csr_path']
self.csr_content = module.params['csr_content']
if self.csr_content is not None:
self.csr_content = self.csr_content.encode('utf-8')
self.cert = None
self.privatekey = None
self.csr = None
self.backend = backend
self.module = module
self.return_content = module.params['return_content']
# The following are default values which make sure check() works as
# before if providers do not explicitly change these properties.
self.create_subject_key_identifier = 'never_create'
self.create_authority_key_identifier = False
self.backup = module.params['backup']
self.backup_file = None
def _validate_privatekey(self):
if self.backend == 'pyopenssl':
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
elif self.backend == 'cryptography':
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr(self):
if self.backend == 'pyopenssl':
# Verify that CSR is signed by certificate's private key
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
# Check subject
if self.csr.get_subject() != self.cert.get_subject():
return False
# Check extensions
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
elif self.backend == 'cryptography':
# Verify that CSR is signed by certificate's private key
if not self.csr.is_signature_valid:
return False
if not cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key()):
return False
# Check subject
if self.csr.subject != self.cert.subject:
return False
# Check extensions
cert_exts = list(self.cert.extensions)
csr_exts = list(self.csr.extensions)
if self.create_subject_key_identifier != 'never_create':
# Filter out SubjectKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.SubjectKeyIdentifier), csr_exts))
if self.create_authority_key_identifier:
# Filter out AuthorityKeyIdentifier extension before comparison
cert_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), cert_exts))
csr_exts = list(filter(lambda x: not isinstance(x.value, x509.AuthorityKeyIdentifier), csr_exts))
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = self.csr.extensions.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def remove(self, module):
if self.backup:
self.backup_file = module.backup_local(self.path)
super(Certificate, self).remove(module)
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(Certificate, self).check(module, perms_required)
if not state_and_perms:
return False
try:
self.cert = load_certificate(self.path, backend=self.backend)
except Exception as dummy:
return False
if self.privatekey_path or self.privatekey_content:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if not self._validate_privatekey():
return False
if self.csr_path or self.csr_content:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
if not self._validate_csr():
return False
# Check SubjectKeyIdentifier
if self.backend == 'cryptography' and self.create_subject_key_identifier != 'never_create':
# Get hold of certificate's SKI
try:
ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
return False
# Get hold of CSR's SKI for 'create_if_not_provided'
csr_ext = None
if self.create_subject_key_identifier == 'create_if_not_provided':
try:
csr_ext = self.csr.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
except cryptography.x509.ExtensionNotFound as dummy:
pass
if csr_ext is None:
# If CSR had no SKI, or we chose to ignore it ('always_create'), compare with created SKI
if ext.value.digest != x509.SubjectKeyIdentifier.from_public_key(self.cert.public_key()).digest:
return False
else:
# If CSR had SKI and we didn't ignore it ('create_if_not_provided'), compare SKIs
if ext.value.digest != csr_ext.value.digest:
return False
return True
class CertificateAbsent(Certificate):
def __init__(self, module):
super(CertificateAbsent, self).__init__(module, 'cryptography') # backend doesn't matter
def generate(self, module):
pass
def dump(self, check_mode=False):
# Use only for absent
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
result['certificate'] = None
return result
class SelfSignedCertificateCryptography(Certificate):
"""Generate the self-signed certificate, using the cryptography backend"""
def __init__(self, module):
super(SelfSignedCertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['selfsigned_create_subject_key_identifier']
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['selfsigned_digest'])
self.version = module.params['selfsigned_version']
self.serial_number = x509.random_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self._module = module
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=to_native(exc))
if cryptography_key_needs_digest_for_signing(self.privatekey):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['selfsigned_digest']
)
else:
self.digest = None
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
try:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.csr.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.privatekey.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.privatekey.public_key()),
critical=False
)
except ValueError as e:
raise CertificateError(str(e))
try:
certificate = cert_builder.sign(
private_key=self.privatekey, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
def generate_serial_number():
"""Generate a serial number for a certificate"""
while True:
result = randrange(0, 1 << 160)
if result >= 1000:
return result
class SelfSignedCertificate(Certificate):
"""Generate the self-signed certificate."""
def __init__(self, module):
super(SelfSignedCertificate, self).__init__(module, 'pyopenssl')
if module.params['selfsigned_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='selfsigned_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
self.notBefore = get_relative_time_option(module.params['selfsigned_not_before'], 'selfsigned_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['selfsigned_not_after'], 'selfsigned_not_after', backend=self.backend)
self.digest = module.params['selfsigned_digest']
self.version = module.params['selfsigned_version']
self.serial_number = generate_serial_number()
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key file {0} does not exist'.format(self.privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.privatekey_content is None and not os.path.exists(self.privatekey_path):
raise CertificateError(
'The private key %s does not exist' % self.privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.csr.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
class OwnCACertificateCryptography(Certificate):
"""Generate the own CA certificate. Using the cryptography backend"""
def __init__(self, module):
super(OwnCACertificateCryptography, self).__init__(module, 'cryptography')
self.create_subject_key_identifier = module.params['ownca_create_subject_key_identifier']
self.create_authority_key_identifier = module.params['ownca_create_authority_key_identifier']
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = select_message_digest(module.params['ownca_digest'])
self.version = module.params['ownca_version']
self.serial_number = x509.random_serial_number()
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
backend=self.backend
)
try:
self.ca_private_key = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
if cryptography_key_needs_digest_for_signing(self.ca_private_key):
if self.digest is None:
raise CertificateError(
'The digest %s is not supported with the cryptography backend' % module.params['ownca_digest']
)
else:
self.digest = None
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert_builder = x509.CertificateBuilder()
cert_builder = cert_builder.subject_name(self.csr.subject)
cert_builder = cert_builder.issuer_name(self.ca_cert.subject)
cert_builder = cert_builder.serial_number(self.serial_number)
cert_builder = cert_builder.not_valid_before(self.notBefore)
cert_builder = cert_builder.not_valid_after(self.notAfter)
cert_builder = cert_builder.public_key(self.csr.public_key())
has_ski = False
for extension in self.csr.extensions:
if isinstance(extension.value, x509.SubjectKeyIdentifier):
if self.create_subject_key_identifier == 'always_create':
continue
has_ski = True
if self.create_authority_key_identifier and isinstance(extension.value, x509.AuthorityKeyIdentifier):
continue
cert_builder = cert_builder.add_extension(extension.value, critical=extension.critical)
if not has_ski and self.create_subject_key_identifier != 'never_create':
cert_builder = cert_builder.add_extension(
x509.SubjectKeyIdentifier.from_public_key(self.csr.public_key()),
critical=False
)
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext),
critical=False
)
except cryptography.x509.ExtensionNotFound:
cert_builder = cert_builder.add_extension(
x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key()),
critical=False
)
try:
certificate = cert_builder.sign(
private_key=self.ca_private_key, algorithm=self.digest,
backend=default_backend()
)
except TypeError as e:
if str(e) == 'Algorithm must be a registered hash algorithm.' and self.digest is None:
module.fail_json(msg='Signing with Ed25519 and Ed448 keys requires cryptography 2.8 or newer.')
raise
self.cert = certificate
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, certificate.public_bytes(Encoding.PEM))
self.changed = True
else:
self.cert = load_certificate(self.path, backend=self.backend)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
if not super(OwnCACertificateCryptography, self).check(module, perms_required):
return False
# Check AuthorityKeyIdentifier
if self.create_authority_key_identifier:
try:
ext = self.ca_cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
expected_ext = (
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext.value)
if CRYPTOGRAPHY_VERSION >= LooseVersion('2.7') else
x509.AuthorityKeyIdentifier.from_issuer_subject_key_identifier(ext)
)
except cryptography.x509.ExtensionNotFound:
expected_ext = x509.AuthorityKeyIdentifier.from_issuer_public_key(self.ca_cert.public_key())
try:
ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
if ext.value != expected_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.notAfter.strftime("%Y%m%d%H%M%SZ"),
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.not_valid_before.strftime("%Y%m%d%H%M%SZ"),
'notAfter': self.cert.not_valid_after.strftime("%Y%m%d%H%M%SZ"),
'serial_number': cryptography_serial_number_of_cert(self.cert),
})
return result
class OwnCACertificate(Certificate):
"""Generate the own CA certificate."""
def __init__(self, module):
super(OwnCACertificate, self).__init__(module, 'pyopenssl')
self.notBefore = get_relative_time_option(module.params['ownca_not_before'], 'ownca_not_before', backend=self.backend)
self.notAfter = get_relative_time_option(module.params['ownca_not_after'], 'ownca_not_after', backend=self.backend)
self.digest = module.params['ownca_digest']
self.version = module.params['ownca_version']
self.serial_number = generate_serial_number()
if module.params['ownca_create_subject_key_identifier'] != 'create_if_not_provided':
module.fail_json(msg='ownca_create_subject_key_identifier cannot be used with the pyOpenSSL backend!')
if module.params['ownca_create_authority_key_identifier']:
module.warn('ownca_create_authority_key_identifier is ignored by the pyOpenSSL backend!')
self.ca_cert_path = module.params['ownca_path']
self.ca_cert_content = module.params['ownca_content']
if self.ca_cert_content is not None:
self.ca_cert_content = self.ca_cert_content.encode('utf-8')
self.ca_privatekey_path = module.params['ownca_privatekey_path']
self.ca_privatekey_content = module.params['ownca_privatekey_content']
if self.ca_privatekey_content is not None:
self.ca_privatekey_content = self.ca_privatekey_content.encode('utf-8')
self.ca_privatekey_passphrase = module.params['ownca_privatekey_passphrase']
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate file {0} does not exist'.format(self.ca_cert_path)
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key file {0} does not exist'.format(self.ca_privatekey_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
)
self.ca_cert = load_certificate(
path=self.ca_cert_path,
content=self.ca_cert_content,
)
try:
self.ca_privatekey = load_privatekey(
path=self.ca_privatekey_path,
content=self.ca_privatekey_content,
passphrase=self.ca_privatekey_passphrase
)
except OpenSSLBadPassphraseError as exc:
module.fail_json(msg=str(exc))
def generate(self, module):
if self.ca_cert_content is None and not os.path.exists(self.ca_cert_path):
raise CertificateError(
'The CA certificate %s does not exist' % self.ca_cert_path
)
if self.ca_privatekey_content is None and not os.path.exists(self.ca_privatekey_path):
raise CertificateError(
'The CA private key %s does not exist' % self.ca_privatekey_path
)
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not self.check(module, perms_required=False) or self.force:
cert = crypto.X509()
cert.set_serial_number(self.serial_number)
cert.set_notBefore(to_bytes(self.notBefore))
cert.set_notAfter(to_bytes(self.notAfter))
cert.set_subject(self.csr.get_subject())
cert.set_issuer(self.ca_cert.get_subject())
cert.set_version(self.version - 1)
cert.set_pubkey(self.csr.get_pubkey())
cert.add_extensions(self.csr.get_extensions())
cert.sign(self.ca_privatekey, self.digest)
self.cert = cert
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, crypto.dump_certificate(crypto.FILETYPE_PEM, self.cert))
self.changed = True
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
'ca_cert': self.ca_cert_path,
'ca_privatekey': self.ca_privatekey_path
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
if check_mode:
result.update({
'notBefore': self.notBefore,
'notAfter': self.notAfter,
'serial_number': self.serial_number,
})
else:
result.update({
'notBefore': self.cert.get_notBefore(),
'notAfter': self.cert.get_notAfter(),
'serial_number': self.cert.get_serial_number(),
})
return result
def compare_sets(subset, superset, equality=False):
if equality:
return set(subset) == set(superset)
else:
return all(x in superset for x in subset)
def compare_dicts(subset, superset, equality=False):
if equality:
return subset == superset
else:
return all(superset.get(x) == v for x, v in subset.items())
NO_EXTENSION = 'no extension'
class AssertOnlyCertificateBase(Certificate):
def __init__(self, module, backend):
super(AssertOnlyCertificateBase, self).__init__(module, backend)
self.signature_algorithms = module.params['signature_algorithms']
if module.params['subject']:
self.subject = parse_name_field(module.params['subject'])
else:
self.subject = []
self.subject_strict = module.params['subject_strict']
if module.params['issuer']:
self.issuer = parse_name_field(module.params['issuer'])
else:
self.issuer = []
self.issuer_strict = module.params['issuer_strict']
self.has_expired = module.params['has_expired']
self.version = module.params['version']
self.key_usage = module.params['key_usage']
self.key_usage_strict = module.params['key_usage_strict']
self.extended_key_usage = module.params['extended_key_usage']
self.extended_key_usage_strict = module.params['extended_key_usage_strict']
self.subject_alt_name = module.params['subject_alt_name']
self.subject_alt_name_strict = module.params['subject_alt_name_strict']
self.not_before = module.params['not_before']
self.not_after = module.params['not_after']
self.valid_at = module.params['valid_at']
self.invalid_at = module.params['invalid_at']
self.valid_in = module.params['valid_in']
if self.valid_in and not self.valid_in.startswith("+") and not self.valid_in.startswith("-"):
try:
int(self.valid_in)
except ValueError:
module.fail_json(msg='The supplied value for "valid_in" (%s) is not an integer or a valid timespec' % self.valid_in)
self.valid_in = "+" + self.valid_in + "s"
# Load objects
self.cert = load_certificate(self.path, backend=self.backend)
if self.privatekey_path is not None or self.privatekey_content is not None:
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend=self.backend
)
except OpenSSLBadPassphraseError as exc:
raise CertificateError(exc)
if self.csr_path is not None or self.csr_content is not None:
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend
)
@abc.abstractmethod
def _validate_privatekey(self):
pass
@abc.abstractmethod
def _validate_csr_signature(self):
pass
@abc.abstractmethod
def _validate_csr_subject(self):
pass
@abc.abstractmethod
def _validate_csr_extensions(self):
pass
@abc.abstractmethod
def _validate_signature_algorithms(self):
pass
@abc.abstractmethod
def _validate_subject(self):
pass
@abc.abstractmethod
def _validate_issuer(self):
pass
@abc.abstractmethod
def _validate_has_expired(self):
pass
@abc.abstractmethod
def _validate_version(self):
pass
@abc.abstractmethod
def _validate_key_usage(self):
pass
@abc.abstractmethod
def _validate_extended_key_usage(self):
pass
@abc.abstractmethod
def _validate_subject_alt_name(self):
pass
@abc.abstractmethod
def _validate_not_before(self):
pass
@abc.abstractmethod
def _validate_not_after(self):
pass
@abc.abstractmethod
def _validate_valid_at(self):
pass
@abc.abstractmethod
def _validate_invalid_at(self):
pass
@abc.abstractmethod
def _validate_valid_in(self):
pass
def assertonly(self, module):
messages = []
if self.privatekey_path is not None or self.privatekey_content is not None:
if not self._validate_privatekey():
messages.append(
'Certificate %s and private key %s do not match' %
(self.path, self.privatekey_path or '(provided in module options)')
)
if self.csr_path is not None or self.csr_content is not None:
if not self._validate_csr_signature():
messages.append(
'Certificate %s and CSR %s do not match: private key mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_subject():
messages.append(
'Certificate %s and CSR %s do not match: subject mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if not self._validate_csr_extensions():
messages.append(
'Certificate %s and CSR %s do not match: extensions mismatch' %
(self.path, self.csr_path or '(provided in module options)')
)
if self.signature_algorithms is not None:
wrong_alg = self._validate_signature_algorithms()
if wrong_alg:
messages.append(
'Invalid signature algorithm (got %s, expected one of %s)' %
(wrong_alg, self.signature_algorithms)
)
if self.subject is not None:
failure = self._validate_subject()
if failure:
dummy, cert_subject = failure
messages.append(
'Invalid subject component (got %s, expected all of %s to be present)' %
(cert_subject, self.subject)
)
if self.issuer is not None:
failure = self._validate_issuer()
if failure:
dummy, cert_issuer = failure
messages.append(
'Invalid issuer component (got %s, expected all of %s to be present)' % (cert_issuer, self.issuer)
)
if self.has_expired is not None:
cert_expired = self._validate_has_expired()
if cert_expired != self.has_expired:
messages.append(
'Certificate expiration check failed (certificate expiration is %s, expected %s)' %
(cert_expired, self.has_expired)
)
if self.version is not None:
cert_version = self._validate_version()
if cert_version != self.version:
messages.append(
'Invalid certificate version number (got %s, expected %s)' %
(cert_version, self.version)
)
if self.key_usage is not None:
failure = self._validate_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no keyUsage extension')
elif failure:
dummy, cert_key_usage = failure
messages.append(
'Invalid keyUsage components (got %s, expected all of %s to be present)' %
(cert_key_usage, self.key_usage)
)
if self.extended_key_usage is not None:
failure = self._validate_extended_key_usage()
if failure == NO_EXTENSION:
messages.append('Found no extendedKeyUsage extension')
elif failure:
dummy, ext_cert_key_usage = failure
messages.append(
'Invalid extendedKeyUsage component (got %s, expected all of %s to be present)' % (ext_cert_key_usage, self.extended_key_usage)
)
if self.subject_alt_name is not None:
failure = self._validate_subject_alt_name()
if failure == NO_EXTENSION:
messages.append('Found no subjectAltName extension')
elif failure:
dummy, cert_san = failure
messages.append(
'Invalid subjectAltName component (got %s, expected all of %s to be present)' %
(cert_san, self.subject_alt_name)
)
if self.not_before is not None:
cert_not_valid_before = self._validate_not_before()
if cert_not_valid_before != get_relative_time_option(self.not_before, 'not_before', backend=self.backend):
messages.append(
'Invalid not_before component (got %s, expected %s to be present)' %
(cert_not_valid_before, self.not_before)
)
if self.not_after is not None:
cert_not_valid_after = self._validate_not_after()
if cert_not_valid_after != get_relative_time_option(self.not_after, 'not_after', backend=self.backend):
messages.append(
'Invalid not_after component (got %s, expected %s to be present)' %
(cert_not_valid_after, self.not_after)
)
if self.valid_at is not None:
not_before, valid_at, not_after = self._validate_valid_at()
if not (not_before <= valid_at <= not_after):
messages.append(
'Certificate is not valid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.valid_at, not_before, not_after)
)
if self.invalid_at is not None:
not_before, invalid_at, not_after = self._validate_invalid_at()
if not_before <= invalid_at <= not_after:
messages.append(
'Certificate is not invalid for the specified date (%s) - not_before: %s - not_after: %s' %
(self.invalid_at, not_before, not_after)
)
if self.valid_in is not None:
not_before, valid_in, not_after = self._validate_valid_in()
if not not_before <= valid_in <= not_after:
messages.append(
'Certificate is not valid in %s from now (that would be %s) - not_before: %s - not_after: %s' %
(self.valid_in, valid_in, not_before, not_after)
)
return messages
def generate(self, module):
"""Don't generate anything - only assert"""
messages = self.assertonly(module)
if messages:
module.fail_json(msg=' | '.join(messages))
def check(self, module, perms_required=False):
"""Ensure the resource is in its desired state."""
messages = self.assertonly(module)
return len(messages) == 0
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
class AssertOnlyCertificateCryptography(AssertOnlyCertificateBase):
"""Validate the supplied cert, using the cryptography backend"""
def __init__(self, module):
super(AssertOnlyCertificateCryptography, self).__init__(module, 'cryptography')
def _validate_privatekey(self):
return cryptography_compare_public_keys(self.cert.public_key(), self.privatekey.public_key())
def _validate_csr_signature(self):
if not self.csr.is_signature_valid:
return False
return cryptography_compare_public_keys(self.csr.public_key(), self.cert.public_key())
def _validate_csr_subject(self):
return self.csr.subject == self.cert.subject
def _validate_csr_extensions(self):
cert_exts = self.cert.extensions
csr_exts = self.csr.extensions
if len(cert_exts) != len(csr_exts):
return False
for cert_ext in cert_exts:
try:
csr_ext = csr_exts.get_extension_for_oid(cert_ext.oid)
if cert_ext != csr_ext:
return False
except cryptography.x509.ExtensionNotFound as dummy:
return False
return True
def _validate_signature_algorithms(self):
if self.cert.signature_algorithm_oid._name not in self.signature_algorithms:
return self.cert.signature_algorithm_oid._name
def _validate_subject(self):
expected_subject = Name([NameAttribute(oid=cryptography_name_to_oid(sub[0]), value=to_text(sub[1]))
for sub in self.subject])
cert_subject = self.cert.subject
if not compare_sets(expected_subject, cert_subject, self.subject_strict):
return expected_subject, cert_subject
def _validate_issuer(self):
expected_issuer = Name([NameAttribute(oid=cryptography_name_to_oid(iss[0]), value=to_text(iss[1]))
for iss in self.issuer])
cert_issuer = self.cert.issuer
if not compare_sets(expected_issuer, cert_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
cert_not_after = self.cert.not_valid_after
cert_expired = cert_not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
if self.cert.version == x509.Version.v1:
return 1
if self.cert.version == x509.Version.v3:
return 3
return "unknown"
def _validate_key_usage(self):
try:
current_key_usage = self.cert.extensions.get_extension_for_class(x509.KeyUsage).value
test_key_usage = dict(
digital_signature=current_key_usage.digital_signature,
content_commitment=current_key_usage.content_commitment,
key_encipherment=current_key_usage.key_encipherment,
data_encipherment=current_key_usage.data_encipherment,
key_agreement=current_key_usage.key_agreement,
key_cert_sign=current_key_usage.key_cert_sign,
crl_sign=current_key_usage.crl_sign,
encipher_only=False,
decipher_only=False
)
if test_key_usage['key_agreement']:
test_key_usage.update(dict(
encipher_only=current_key_usage.encipher_only,
decipher_only=current_key_usage.decipher_only
))
key_usages = cryptography_parse_key_usage_params(self.key_usage)
if not compare_dicts(key_usages, test_key_usage, self.key_usage_strict):
return self.key_usage, [k for k, v in test_key_usage.items() if v is True]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
try:
current_ext_keyusage = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage).value
usages = [cryptography_name_to_oid(usage) for usage in self.extended_key_usage]
expected_ext_keyusage = x509.ExtendedKeyUsage(usages)
if not compare_sets(expected_ext_keyusage, current_ext_keyusage, self.extended_key_usage_strict):
return [eku.value for eku in expected_ext_keyusage], [eku.value for eku in current_ext_keyusage]
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
try:
current_san = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName).value
expected_san = [cryptography_get_name(san) for san in self.subject_alt_name]
if not compare_sets(expected_san, current_san, self.subject_alt_name_strict):
return self.subject_alt_name, current_san
except cryptography.x509.ExtensionNotFound:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.not_valid_before
def _validate_not_after(self):
return self.cert.not_valid_after
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, 'valid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, 'invalid_at', backend=self.backend)
return self.cert.not_valid_before, rt, self.cert.not_valid_after
def _validate_valid_in(self):
valid_in_date = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
return self.cert.not_valid_before, valid_in_date, self.cert.not_valid_after
class AssertOnlyCertificate(AssertOnlyCertificateBase):
"""validate the supplied certificate."""
def __init__(self, module):
super(AssertOnlyCertificate, self).__init__(module, 'pyopenssl')
# Ensure inputs are properly sanitized before comparison.
for param in ['signature_algorithms', 'key_usage', 'extended_key_usage',
'subject_alt_name', 'subject', 'issuer', 'not_before',
'not_after', 'valid_at', 'invalid_at']:
attr = getattr(self, param)
if isinstance(attr, list) and attr:
if isinstance(attr[0], str):
setattr(self, param, [to_bytes(item) for item in attr])
elif isinstance(attr[0], tuple):
setattr(self, param, [(to_bytes(item[0]), to_bytes(item[1])) for item in attr])
elif isinstance(attr, tuple):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, dict):
setattr(self, param, dict((to_bytes(k), to_bytes(v)) for (k, v) in attr.items()))
elif isinstance(attr, str):
setattr(self, param, to_bytes(attr))
def _validate_privatekey(self):
ctx = OpenSSL.SSL.Context(OpenSSL.SSL.TLSv1_2_METHOD)
ctx.use_privatekey(self.privatekey)
ctx.use_certificate(self.cert)
try:
ctx.check_privatekey()
return True
except OpenSSL.SSL.Error:
return False
def _validate_csr_signature(self):
try:
self.csr.verify(self.cert.get_pubkey())
except OpenSSL.crypto.Error:
return False
def _validate_csr_subject(self):
if self.csr.get_subject() != self.cert.get_subject():
return False
def _validate_csr_extensions(self):
csr_extensions = self.csr.get_extensions()
cert_extension_count = self.cert.get_extension_count()
if len(csr_extensions) != cert_extension_count:
return False
for extension_number in range(0, cert_extension_count):
cert_extension = self.cert.get_extension(extension_number)
csr_extension = filter(lambda extension: extension.get_short_name() == cert_extension.get_short_name(), csr_extensions)
if cert_extension.get_data() != list(csr_extension)[0].get_data():
return False
return True
def _validate_signature_algorithms(self):
if self.cert.get_signature_algorithm() not in self.signature_algorithms:
return self.cert.get_signature_algorithm()
def _validate_subject(self):
expected_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in self.subject]
cert_subject = self.cert.get_subject().get_components()
current_subject = [(OpenSSL._util.lib.OBJ_txt2nid(sub[0]), sub[1]) for sub in cert_subject]
if not compare_sets(expected_subject, current_subject, self.subject_strict):
return expected_subject, current_subject
def _validate_issuer(self):
expected_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in self.issuer]
cert_issuer = self.cert.get_issuer().get_components()
current_issuer = [(OpenSSL._util.lib.OBJ_txt2nid(iss[0]), iss[1]) for iss in cert_issuer]
if not compare_sets(expected_issuer, current_issuer, self.issuer_strict):
return self.issuer, cert_issuer
def _validate_has_expired(self):
# The following 3 lines are the same as the current PyOpenSSL code for cert.has_expired().
# Older version of PyOpenSSL have a buggy implementation,
# to avoid issues with those we added the code from a more recent release here.
time_string = to_native(self.cert.get_notAfter())
not_after = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
cert_expired = not_after < datetime.datetime.utcnow()
return cert_expired
def _validate_version(self):
# Version numbers in certs are off by one:
# v1: 0, v2: 1, v3: 2 ...
return self.cert.get_version() + 1
def _validate_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'keyUsage':
found = True
expected_extension = crypto.X509Extension(b"keyUsage", False, b', '.join(self.key_usage))
key_usage = [usage.strip() for usage in to_text(expected_extension, errors='surrogate_or_strict').split(',')]
current_ku = [usage.strip() for usage in to_text(extension, errors='surrogate_or_strict').split(',')]
if not compare_sets(key_usage, current_ku, self.key_usage_strict):
return self.key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.key_usage:
return NO_EXTENSION
def _validate_extended_key_usage(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'extendedKeyUsage':
found = True
extKeyUsage = [OpenSSL._util.lib.OBJ_txt2nid(keyUsage) for keyUsage in self.extended_key_usage]
current_xku = [OpenSSL._util.lib.OBJ_txt2nid(usage.strip()) for usage in
to_bytes(extension, errors='surrogate_or_strict').split(b',')]
if not compare_sets(extKeyUsage, current_xku, self.extended_key_usage_strict):
return self.extended_key_usage, str(extension).split(', ')
if not found:
# This is only bad if the user specified a non-empty list
if self.extended_key_usage:
return NO_EXTENSION
def _validate_subject_alt_name(self):
found = False
for extension_idx in range(0, self.cert.get_extension_count()):
extension = self.cert.get_extension(extension_idx)
if extension.get_short_name() == b'subjectAltName':
found = True
l_altnames = [pyopenssl_normalize_name_attribute(altname.strip()) for altname in
to_text(extension, errors='surrogate_or_strict').split(', ')]
sans = [pyopenssl_normalize_name_attribute(to_text(san, errors='surrogate_or_strict')) for san in self.subject_alt_name]
if not compare_sets(sans, l_altnames, self.subject_alt_name_strict):
return self.subject_alt_name, l_altnames
if not found:
# This is only bad if the user specified a non-empty list
if self.subject_alt_name:
return NO_EXTENSION
def _validate_not_before(self):
return self.cert.get_notBefore()
def _validate_not_after(self):
return self.cert.get_notAfter()
def _validate_valid_at(self):
rt = get_relative_time_option(self.valid_at, "valid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_invalid_at(self):
rt = get_relative_time_option(self.invalid_at, "invalid_at", backend=self.backend)
rt = to_bytes(rt, errors='surrogate_or_strict')
return self.cert.get_notBefore(), rt, self.cert.get_notAfter()
def _validate_valid_in(self):
valid_in_asn1 = get_relative_time_option(self.valid_in, "valid_in", backend=self.backend)
valid_in_date = to_bytes(valid_in_asn1, errors='surrogate_or_strict')
return self.cert.get_notBefore(), valid_in_date, self.cert.get_notAfter()
class EntrustCertificate(Certificate):
"""Retrieve a certificate using Entrust (ECS)."""
def __init__(self, module, backend):
super(EntrustCertificate, self).__init__(module, backend)
self.trackingId = None
self.notAfter = get_relative_time_option(module.params['entrust_not_after'], 'entrust_not_after', backend=self.backend)
if self.csr_content is None or not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file {0} does not exist'.format(self.csr_path)
)
self.csr = load_certificate_request(
path=self.csr_path,
content=self.csr_content,
backend=self.backend,
)
# ECS API defaults to using the validated organization tied to the account.
# We want to always force behavior of trying to use the organization provided in the CSR.
# To that end we need to parse out the organization from the CSR.
self.csr_org = None
if self.backend == 'pyopenssl':
csr_subject = self.csr.get_subject()
csr_subject_components = csr_subject.get_components()
for k, v in csr_subject_components:
if k.upper() == 'O':
# Entrust does not support multiple validated organizations in a single certificate
if self.csr_org is not None:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(csr_subject)))
else:
self.csr_org = v
elif self.backend == 'cryptography':
csr_subject_orgs = self.csr.subject.get_attributes_for_oid(NameOID.ORGANIZATION_NAME)
if len(csr_subject_orgs) == 1:
self.csr_org = csr_subject_orgs[0].value
elif len(csr_subject_orgs) > 1:
module.fail_json(msg=("Entrust provider does not currently support multiple validated organizations. Multiple organizations found in "
"Subject DN: '{0}'. ".format(self.csr.subject)))
# If no organization in the CSR, explicitly tell ECS that it should be blank in issued cert, not defaulted to
# organization tied to the account.
if self.csr_org is None:
self.csr_org = ''
try:
self.ecs_client = ECSClient(
entrust_api_user=module.params.get('entrust_api_user'),
entrust_api_key=module.params.get('entrust_api_key'),
entrust_api_cert=module.params.get('entrust_api_client_cert_path'),
entrust_api_cert_key=module.params.get('entrust_api_client_cert_key_path'),
entrust_api_specification_path=module.params.get('entrust_api_specification_path')
)
except SessionConfigurationException as e:
module.fail_json(msg='Failed to initialize Entrust Provider: {0}'.format(to_native(e.message)))
def generate(self, module):
if not self.check(module, perms_required=False) or self.force:
# Read the CSR that was generated for us
body = {}
if self.csr_content is not None:
body['csr'] = self.csr_content
else:
with open(self.csr_path, 'r') as csr_file:
body['csr'] = csr_file.read()
body['certType'] = module.params['entrust_cert_type']
# Handle expiration (30 days if not specified)
expiry = self.notAfter
if not expiry:
gmt_now = datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))
expiry = gmt_now + datetime.timedelta(days=365)
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
body['certExpiryDate'] = expiry_iso3339
body['org'] = self.csr_org
body['tracking'] = {
'requesterName': module.params['entrust_requester_name'],
'requesterEmail': module.params['entrust_requester_email'],
'requesterPhone': module.params['entrust_requester_phone'],
}
try:
result = self.ecs_client.NewCertRequest(Body=body)
self.trackingId = result.get('trackingId')
except RestOperationException as e:
module.fail_json(msg='Failed to request new certificate from Entrust Certificate Services (ECS): {0}'.format(to_native(e.message)))
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(result.get('endEntityCert')))
self.cert = load_certificate(self.path, backend=self.backend)
self.changed = True
def check(self, module, perms_required=True):
"""Ensure the resource is in its desired state."""
parent_check = super(EntrustCertificate, self).check(module, perms_required)
try:
cert_details = self._get_cert_details()
except RestOperationException as e:
module.fail_json(msg='Failed to get status of existing certificate from Entrust Certificate Services (ECS): {0}.'.format(to_native(e.message)))
# Always issue a new certificate if the certificate is expired, suspended or revoked
status = cert_details.get('status', False)
if status == 'EXPIRED' or status == 'SUSPENDED' or status == 'REVOKED':
return False
# If the requested cert type was specified and it is for a different certificate type than the initial certificate, a new one is needed
if module.params['entrust_cert_type'] and cert_details.get('certType') and module.params['entrust_cert_type'] != cert_details.get('certType'):
return False
return parent_check
def _get_cert_details(self):
cert_details = {}
if self.cert:
serial_number = None
expiry = None
if self.backend == 'pyopenssl':
serial_number = "{0:X}".format(self.cert.get_serial_number())
time_string = to_native(self.cert.get_notAfter())
expiry = datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
elif self.backend == 'cryptography':
serial_number = "{0:X}".format(cryptography_serial_number_of_cert(self.cert))
expiry = self.cert.not_valid_after
# get some information about the expiry of this certificate
expiry_iso3339 = expiry.strftime("%Y-%m-%dT%H:%M:%S.00Z")
cert_details['expiresAfter'] = expiry_iso3339
# If a trackingId is not already defined (from the result of a generate)
# use the serial number to identify the tracking Id
if self.trackingId is None and serial_number is not None:
cert_results = self.ecs_client.GetCertificates(serialNumber=serial_number).get('certificates', {})
# Finding 0 or more than 1 result is a very unlikely use case, it simply means we cannot perform additional checks
# on the 'state' as returned by Entrust Certificate Services (ECS). The general certificate validity is
# still checked as it is in the rest of the module.
if len(cert_results) == 1:
self.trackingId = cert_results[0].get('trackingId')
if self.trackingId is not None:
cert_details.update(self.ecs_client.GetCertificate(trackingId=self.trackingId))
return cert_details
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
result.update(self._get_cert_details())
return result
class AcmeCertificate(Certificate):
"""Retrieve a certificate using the ACME protocol."""
# Since there's no real use of the backend,
# other than the 'self.check' function, we just pass the backend to the constructor
def __init__(self, module, backend):
super(AcmeCertificate, self).__init__(module, backend)
self.accountkey_path = module.params['acme_accountkey_path']
self.challenge_path = module.params['acme_challenge_path']
self.use_chain = module.params['acme_chain']
self.acme_directory = module.params['acme_directory']
def generate(self, module):
if self.csr_content is None and not os.path.exists(self.csr_path):
raise CertificateError(
'The certificate signing request file %s does not exist' % self.csr_path
)
if not os.path.exists(self.accountkey_path):
raise CertificateError(
'The account key %s does not exist' % self.accountkey_path
)
if not os.path.exists(self.challenge_path):
raise CertificateError(
'The challenge path %s does not exist' % self.challenge_path
)
if not self.check(module, perms_required=False) or self.force:
acme_tiny_path = self.module.get_bin_path('acme-tiny', required=True)
command = [acme_tiny_path]
if self.use_chain:
command.append('--chain')
command.extend(['--account-key', self.accountkey_path])
if self.csr_content is not None:
# We need to temporarily write the CSR to disk
fd, tmpsrc = tempfile.mkstemp()
module.add_cleanup_file(tmpsrc) # Ansible will delete the file on exit
f = os.fdopen(fd, 'wb')
try:
f.write(self.csr_content)
except Exception as err:
try:
f.close()
except Exception as dummy:
pass
module.fail_json(
msg="failed to create temporary CSR file: %s" % to_native(err),
exception=traceback.format_exc()
)
f.close()
command.extend(['--csr', tmpsrc])
else:
command.extend(['--csr', self.csr_path])
command.extend(['--acme-dir', self.challenge_path])
command.extend(['--directory-url', self.acme_directory])
try:
crt = module.run_command(command, check_rc=True)[1]
if self.backup:
self.backup_file = module.backup_local(self.path)
write_file(module, to_bytes(crt))
self.changed = True
except OSError as exc:
raise CertificateError(exc)
file_args = module.load_file_common_arguments(module.params)
if module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'accountkey': self.accountkey_path,
'csr': self.csr_path,
}
if self.backup_file:
result['backup_file'] = self.backup_file
if self.return_content:
content = load_file_if_exists(self.path, ignore_errors=True)
result['certificate'] = content.decode('utf-8') if content else None
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
path=dict(type='path', required=True),
provider=dict(type='str', choices=['acme', 'assertonly', 'entrust', 'ownca', 'selfsigned']),
force=dict(type='bool', default=False,),
csr_path=dict(type='path'),
csr_content=dict(type='str'),
backup=dict(type='bool', default=False),
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
return_content=dict(type='bool', default=False),
# General properties of a certificate
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
# provider: assertonly
signature_algorithms=dict(type='list', elements='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer=dict(type='dict', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
issuer_strict=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
has_expired=dict(type='bool', default=False, removed_in_version='2.0.0', removed_from_collection='community.crypto'),
version=dict(type='int', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage=dict(type='list', elements='str', aliases=['keyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
key_usage_strict=dict(type='bool', default=False, aliases=['keyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage=dict(type='list', elements='str', aliases=['extendedKeyUsage'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
extended_key_usage_strict=dict(type='bool', default=False, aliases=['extendedKeyUsage_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name=dict(type='list', elements='str', aliases=['subjectAltName'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
subject_alt_name_strict=dict(type='bool', default=False, aliases=['subjectAltName_strict'],
removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_before=dict(type='str', aliases=['notBefore'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
not_after=dict(type='str', aliases=['notAfter'], removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
invalid_at=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
valid_in=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.crypto'),
# provider: selfsigned
selfsigned_version=dict(type='int', default=3),
selfsigned_digest=dict(type='str', default='sha256'),
selfsigned_not_before=dict(type='str', default='+0s', aliases=['selfsigned_notBefore']),
selfsigned_not_after=dict(type='str', default='+3650d', aliases=['selfsigned_notAfter']),
selfsigned_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
# provider: ownca
ownca_path=dict(type='path'),
ownca_content=dict(type='str'),
ownca_privatekey_path=dict(type='path'),
ownca_privatekey_content=dict(type='str', no_log=True),
ownca_privatekey_passphrase=dict(type='str', no_log=True),
ownca_digest=dict(type='str', default='sha256'),
ownca_version=dict(type='int', default=3),
ownca_not_before=dict(type='str', default='+0s'),
ownca_not_after=dict(type='str', default='+3650d'),
ownca_create_subject_key_identifier=dict(
type='str',
default='create_if_not_provided',
choices=['create_if_not_provided', 'always_create', 'never_create']
),
ownca_create_authority_key_identifier=dict(type='bool', default=True),
# provider: acme
acme_accountkey_path=dict(type='path'),
acme_challenge_path=dict(type='path'),
acme_chain=dict(type='bool', default=False),
acme_directory=dict(type='str', default="https://acme-v02.api.letsencrypt.org/directory"),
# provider: entrust
entrust_cert_type=dict(type='str', default='STANDARD_SSL',
choices=['STANDARD_SSL', 'ADVANTAGE_SSL', 'UC_SSL', 'EV_SSL', 'WILDCARD_SSL',
'PRIVATE_SSL', 'PD_SSL', 'CDS_ENT_LITE', 'CDS_ENT_PRO', 'SMIME_ENT']),
entrust_requester_email=dict(type='str'),
entrust_requester_name=dict(type='str'),
entrust_requester_phone=dict(type='str'),
entrust_api_user=dict(type='str'),
entrust_api_key=dict(type='str', no_log=True),
entrust_api_client_cert_path=dict(type='path'),
entrust_api_client_cert_key_path=dict(type='path', no_log=True),
entrust_api_specification_path=dict(type='path', default='https://cloud.entrust.net/EntrustCloud/documentation/cms-api-2.1.0.yaml'),
entrust_not_after=dict(type='str', default='+365d'),
),
supports_check_mode=True,
add_file_common_args=True,
required_if=[
['state', 'present', ['provider']],
['provider', 'entrust', ['entrust_requester_email', 'entrust_requester_name', 'entrust_requester_phone',
'entrust_api_user', 'entrust_api_key', 'entrust_api_client_cert_path',
'entrust_api_client_cert_key_path']],
],
mutually_exclusive=[
['csr_path', 'csr_content'],
['privatekey_path', 'privatekey_content'],
['ownca_path', 'ownca_content'],
['ownca_privatekey_path', 'ownca_privatekey_content'],
],
)
if module._name == 'community.crypto.openssl_certificate':
module.deprecate("The 'community.crypto.openssl_certificate' module has been renamed to 'community.crypto.x509_certificate'",
version='2.0.0', collection_name='community.crypto')
try:
if module.params['state'] == 'absent':
certificate = CertificateAbsent(module)
else:
if module.params['provider'] != 'assertonly' and module.params['csr_path'] is None and module.params['csr_content'] is None:
module.fail_json(msg='csr_path or csr_content is required when provider is not assertonly')
base_dir = os.path.dirname(module.params['path']) or '.'
if not os.path.isdir(base_dir):
module.fail_json(
name=base_dir,
msg='The directory %s does not exist or the file is not a directory' % base_dir
)
provider = module.params['provider']
if provider == 'assertonly':
module.deprecate("The 'assertonly' provider is deprecated; please see the examples of "
"the 'x509_certificate' module on how to replace it with other modules",
version='2.0.0', collection_name='community.crypto')
elif provider == 'selfsigned':
if module.params['privatekey_path'] is None and module.params['privatekey_content'] is None:
module.fail_json(msg='One of privatekey_path and privatekey_content must be specified for the selfsigned provider.')
elif provider == 'acme':
if module.params['acme_accountkey_path'] is None:
module.fail_json(msg='The acme_accountkey_path option must be specified for the acme provider.')
if module.params['acme_challenge_path'] is None:
module.fail_json(msg='The acme_challenge_path option must be specified for the acme provider.')
elif provider == 'ownca':
if module.params['ownca_path'] is None and module.params['ownca_content'] is None:
module.fail_json(msg='One of ownca_path and ownca_content must be specified for the ownca provider.')
if module.params['ownca_privatekey_path'] is None and module.params['ownca_privatekey_content'] is None:
module.fail_json(msg='One of ownca_privatekey_path and ownca_privatekey_content must be specified for the ownca provider.')
backend = module.params['select_crypto_backend']
if backend == 'auto':
# Detect what backend we can use
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
# If cryptography is available we'll use it
if can_use_cryptography:
backend = 'cryptography'
elif can_use_pyopenssl:
backend = 'pyopenssl'
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.warn('crypto backend forced to pyopenssl. The cryptography library does not support v2 certificates')
backend = 'pyopenssl'
# Fail if no backend has been found
if backend == 'auto':
module.fail_json(msg=("Can't detect any of the required Python libraries "
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
MINIMAL_CRYPTOGRAPHY_VERSION,
MINIMAL_PYOPENSSL_VERSION))
if backend == 'pyopenssl':
if not PYOPENSSL_FOUND:
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
exception=PYOPENSSL_IMP_ERR)
if module.params['provider'] in ['selfsigned', 'ownca', 'assertonly']:
try:
getattr(crypto.X509Req, 'get_extensions')
except AttributeError:
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
version='2.0.0', collection_name='community.crypto')
if provider == 'selfsigned':
certificate = SelfSignedCertificate(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'pyopenssl')
elif provider == 'ownca':
certificate = OwnCACertificate(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'pyopenssl')
else:
certificate = AssertOnlyCertificate(module)
elif backend == 'cryptography':
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
if module.params['selfsigned_version'] == 2 or module.params['ownca_version'] == 2:
module.fail_json(msg='The cryptography backend does not support v2 certificates, '
'use select_crypto_backend=pyopenssl for v2 certificates')
if provider == 'selfsigned':
certificate = SelfSignedCertificateCryptography(module)
elif provider == 'acme':
certificate = AcmeCertificate(module, 'cryptography')
elif provider == 'ownca':
certificate = OwnCACertificateCryptography(module)
elif provider == 'entrust':
certificate = EntrustCertificate(module, 'cryptography')
else:
certificate = AssertOnlyCertificateCryptography(module)
if module.params['state'] == 'present':
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = module.params['force'] or not certificate.check(module)
module.exit_json(**result)
certificate.generate(module)
else:
if module.check_mode:
result = certificate.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
certificate.remove(module)
result = certificate.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
4295_6
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_crl
version_added: '1.0.0'
short_description: Generate Certificate Revocation Lists (CRLs)
description:
- This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
- Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
or as a path to a certificate file in PEM format.
requirements:
- cryptography >= 1.2
author:
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the CRL file should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
mode:
description:
- Defines how to process entries of existing CRLs.
- If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
as specified in I(revoked_certificates).
- If set to C(update), makes sure that the CRL contains the revoked certificates from
I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
already exists, all entries from the existing CRL will also be included in the new CRL.
When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
type: str
default: generate
choices: [ generate, update ]
force:
description:
- Should the CRL be forced to be regenerated.
type: bool
default: no
backup:
description:
- Create a backup file including a timestamp so you can get the original
CRL back if you overwrote it with a new one by accident.
type: bool
default: no
path:
description:
- Remote absolute path where the generated CRL file should be created or is already located.
type: path
required: yes
format:
description:
- Whether the CRL file should be in PEM or DER format.
- If an existing CRL file does match everything but I(format), it will be converted to the correct format
instead of regenerated.
type: str
choices: [pem, der]
default: pem
privatekey_path:
description:
- Path to the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path).
- This is required if the private key is password protected.
type: str
issuer:
description:
- Key/value pairs that will be present in the issuer name field of the CRL.
- If you need to specify more than one value with the same key, use a list as value.
- Required if I(state) is C(present).
type: dict
last_update:
description:
- The point in time from which this CRL can be trusted.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
next_update:
description:
- "The absolute latest point in time by which this I(issuer) is expected to have issued
another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
- Required if I(state) is C(present).
type: str
digest:
description:
- Digest algorithm to be used when signing the CRL.
type: str
default: sha256
revoked_certificates:
description:
- List of certificates to be revoked.
- Required if I(state) is C(present).
type: list
elements: dict
suboptions:
path:
description:
- Path to a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(content) and I(serial_number). One of these three options
must be specified.
type: path
content:
description:
- Content of a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(path) and I(serial_number). One of these three options
must be specified.
type: str
serial_number:
description:
- Serial number of the certificate.
- Mutually exclusive with I(path) and I(content). One of these three options must
be specified.
type: int
revocation_date:
description:
- The point in time the certificate was revoked.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
issuer:
description:
- The certificate's issuer.
- "Example: C(DNS:ca.example.org)"
type: list
elements: str
issuer_critical:
description:
- Whether the certificate issuer extension should be critical.
type: bool
default: no
reason:
description:
- The value for the revocation reason extension.
type: str
choices:
- unspecified
- key_compromise
- ca_compromise
- affiliation_changed
- superseded
- cessation_of_operation
- certificate_hold
- privilege_withdrawn
- aa_compromise
- remove_from_crl
reason_critical:
description:
- Whether the revocation reason extension should be critical.
type: bool
default: no
invalidity_date:
description:
- The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent. This will NOT
change when I(ignore_timestamps) is set to C(yes).
type: str
invalidity_date_critical:
description:
- Whether the invalidity date extension should be critical.
type: bool
default: no
ignore_timestamps:
description:
- Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
I(invalidity_date) in I(revoked_certificates) will never be ignored.
- Use this in combination with relative timestamps for these values to get idempotency.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) CRL's content as I(crl).
type: bool
default: no
extends_documentation_fragment:
- files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
'''
EXAMPLES = r'''
- name: Generate a CRL
community.crypto.x509_crl:
path: /etc/ssl/my-ca.crl
privatekey_path: /etc/ssl/private/my-ca.pem
issuer:
CN: My CA
last_update: "+0s"
next_update: "+7d"
revoked_certificates:
- serial_number: 1234
revocation_date: 20190331202428Z
issuer:
CN: My CA
- serial_number: 2345
revocation_date: 20191013152910Z
reason: affiliation_changed
invalidity_date: 20191001000000Z
- path: /etc/ssl/crt/revoked-cert.pem
revocation_date: 20191010010203Z
'''
RETURN = r'''
filename:
description: Path to the generated CRL
returned: changed or success
type: str
sample: /path/to/my-ca.crl
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/my-ca.crl.2019-03-09@11:22~
privatekey:
description: Path to the private CA key
returned: changed or success
type: str
sample: /path/to/my-ca.pem
format:
description:
- Whether the CRL is in PEM format (C(pem)) or in DER format (C(der)).
returned: success
type: str
sample: pem
issuer:
description:
- The CRL's issuer.
- Note that for repeated values, only the last one will be returned.
returned: success
type: dict
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
issuer_ordered:
description: The CRL's issuer as an ordered list of tuples.
returned: success
type: list
elements: list
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
last_update:
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
next_update:
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
digest:
description: The signature algorithm used to sign the CRL.
returned: success
type: str
sample: sha256WithRSAEncryption
revoked_certificates:
description: List of certificates to be revoked.
returned: success
type: list
elements: dict
contains:
serial_number:
description: Serial number of the certificate.
type: int
sample: 1234
revocation_date:
description: The point in time the certificate was revoked as ASN.1 TIME.
type: str
sample: 20190413202428Z
issuer:
description: The certificate's issuer.
type: list
elements: str
sample: '["DNS:ca.example.org"]'
issuer_critical:
description: Whether the certificate issuer extension is critical.
type: bool
sample: no
reason:
description:
- The value for the revocation reason extension.
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
C(remove_from_crl).
type: str
sample: key_compromise
reason_critical:
description: Whether the revocation reason extension is critical.
type: bool
sample: no
invalidity_date:
description: |
The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid as ASN.1 TIME.
type: str
sample: 20190413202428Z
invalidity_date_critical:
description: Whether the invalidity date extension is critical.
type: bool
sample: no
crl:
description:
- The (current or generated) CRL's content.
- Will be the CRL itself if I(format) is C(pem), and Base64 of the
CRL if I(format) is C(der).
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
'''
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_name,
cryptography_name_to_oid,
cryptography_oid_to_name,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_crl import (
REVOCATION_REASON_MAP,
TIMESTAMP_FORMAT,
cryptography_decode_revoked_certificate,
cryptography_dump_revoked,
cryptography_get_signature_algorithm_oid_from_crl,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_pem_format,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import (
CertificateRevocationListBuilder,
RevokedCertificateBuilder,
NameAttribute,
Name,
)
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CRLError(OpenSSLObjectError):
pass
class CRL(OpenSSLObject):
def __init__(self, module):
super(CRL, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.update = module.params['mode'] == 'update'
self.ignore_timestamps = module.params['ignore_timestamps']
self.return_content = module.params['return_content']
self.crl_content = None
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.issuer = parse_name_field(module.params['issuer'])
self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
self.last_update = get_relative_time_option(module.params['last_update'], 'last_update')
self.next_update = get_relative_time_option(module.params['next_update'], 'next_update')
self.digest = select_message_digest(module.params['digest'])
if self.digest is None:
raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
self.revoked_certificates = []
for i, rc in enumerate(module.params['revoked_certificates']):
result = {
'serial_number': None,
'revocation_date': None,
'issuer': None,
'issuer_critical': False,
'reason': None,
'reason_critical': False,
'invalidity_date': None,
'invalidity_date_critical': False,
}
path_prefix = 'revoked_certificates[{0}].'.format(i)
if rc['path'] is not None or rc['content'] is not None:
# Load certificate from file or content
try:
if rc['content'] is not None:
rc['content'] = rc['content'].encode('utf-8')
cert = load_certificate(rc['path'], content=rc['content'], backend='cryptography')
result['serial_number'] = cryptography_serial_number_of_cert(cert)
except OpenSSLObjectError as e:
if rc['content'] is not None:
module.fail_json(
msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
)
else:
module.fail_json(
msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
)
else:
# Specify serial_number (and potentially issuer) directly
result['serial_number'] = rc['serial_number']
# All other options
if rc['issuer']:
result['issuer'] = [cryptography_get_name(issuer) for issuer in rc['issuer']]
result['issuer_critical'] = rc['issuer_critical']
result['revocation_date'] = get_relative_time_option(
rc['revocation_date'],
path_prefix + 'revocation_date'
)
if rc['reason']:
result['reason'] = REVOCATION_REASON_MAP[rc['reason']]
result['reason_critical'] = rc['reason_critical']
if rc['invalidity_date']:
result['invalidity_date'] = get_relative_time_option(
rc['invalidity_date'],
path_prefix + 'invalidity_date'
)
result['invalidity_date_critical'] = rc['invalidity_date_critical']
self.revoked_certificates.append(result)
self.module = module
self.backup = module.params['backup']
self.backup_file = None
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend='cryptography'
)
except OpenSSLBadPassphraseError as exc:
raise CRLError(exc)
self.crl = None
try:
with open(self.path, 'rb') as f:
data = f.read()
self.actual_format = 'pem' if identify_pem_format(data) else 'der'
if self.actual_format == 'pem':
self.crl = x509.load_pem_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = data
else:
self.crl = x509.load_der_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = base64.b64encode(data)
except Exception as dummy:
self.crl_content = None
self.actual_format = self.format
def remove(self):
if self.backup:
self.backup_file = self.module.backup_local(self.path)
super(CRL, self).remove(self.module)
def _compress_entry(self, entry):
if self.ignore_timestamps:
# Throw out revocation_date
return (
entry['serial_number'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
else:
return (
entry['serial_number'],
entry['revocation_date'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
def check(self, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CRL, self).check(self.module, perms_required)
if not state_and_perms:
return False
if self.crl is None:
return False
if self.last_update != self.crl.last_update and not self.ignore_timestamps:
return False
if self.next_update != self.crl.next_update and not self.ignore_timestamps:
return False
if self.digest.name != self.crl.signature_hash_algorithm.name:
return False
want_issuer = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
return False
old_entries = [self._compress_entry(cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
if self.update:
# We don't simply use a set so that duplicate entries are treated correctly
for entry in new_entries:
try:
old_entries.remove(entry)
except ValueError:
return False
else:
if old_entries != new_entries:
return False
if self.format != self.actual_format and not ignore_conversion:
return False
return True
def _generate_crl(self):
backend = default_backend()
crl = CertificateRevocationListBuilder()
try:
crl = crl.issuer_name(Name([
NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1]))
for entry in self.issuer
]))
except ValueError as e:
raise CRLError(e)
crl = crl.last_update(self.last_update)
crl = crl.next_update(self.next_update)
if self.update and self.crl:
new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
for entry in self.crl:
decoded_entry = self._compress_entry(cryptography_decode_revoked_certificate(entry))
if decoded_entry not in new_entries:
crl = crl.add_revoked_certificate(entry)
for entry in self.revoked_certificates:
revoked_cert = RevokedCertificateBuilder()
revoked_cert = revoked_cert.serial_number(entry['serial_number'])
revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
if entry['issuer'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CertificateIssuer([
cryptography_get_name(name) for name in entry['issuer']
]),
entry['issuer_critical']
)
if entry['reason'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CRLReason(entry['reason']),
entry['reason_critical']
)
if entry['invalidity_date'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.InvalidityDate(entry['invalidity_date']),
entry['invalidity_date_critical']
)
crl = crl.add_revoked_certificate(revoked_cert.build(backend))
self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
if self.format == 'pem':
return self.crl.public_bytes(Encoding.PEM)
else:
return self.crl.public_bytes(Encoding.DER)
def generate(self):
result = None
if not self.check(perms_required=False, ignore_conversion=True) or self.force:
result = self._generate_crl()
elif not self.check(perms_required=False, ignore_conversion=False) and self.crl:
if self.format == 'pem':
result = self.crl.public_bytes(Encoding.PEM)
else:
result = self.crl.public_bytes(Encoding.DER)
if result is not None:
if self.return_content:
if self.format == 'pem':
self.crl_content = result
else:
self.crl_content = base64.b64encode(result)
if self.backup:
self.backup_file = self.module.backup_local(self.path)
write_file(self.module, result)
self.changed = True
file_args = self.module.load_file_common_arguments(self.module.params)
if self.module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'format': self.format,
'last_update': None,
'next_update': None,
'digest': None,
'issuer_ordered': None,
'issuer': None,
'revoked_certificates': [],
}
if self.backup_file:
result['backup_file'] = self.backup_file
if check_mode:
result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
# result['digest'] = cryptography_oid_to_name(self.crl.signature_algorithm_oid)
result['digest'] = self.module.params['digest']
result['issuer_ordered'] = self.issuer
result['issuer'] = {}
for k, v in self.issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for entry in self.revoked_certificates:
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
elif self.crl:
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
result['digest'] = cryptography_oid_to_name(cryptography_get_signature_algorithm_oid_from_crl(self.crl))
issuer = []
for attribute in self.crl.issuer:
issuer.append([cryptography_oid_to_name(attribute.oid), attribute.value])
result['issuer_ordered'] = issuer
result['issuer'] = {}
for k, v in issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for cert in self.crl:
entry = cryptography_decode_revoked_certificate(cert)
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
if self.return_content:
result['crl'] = self.crl_content
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
mode=dict(type='str', default='generate', choices=['generate', 'update']),
force=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
path=dict(type='path', required=True),
format=dict(type='str', default='pem', choices=['pem', 'der']),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str'),
privatekey_passphrase=dict(type='str', no_log=True),
issuer=dict(type='dict'),
last_update=dict(type='str', default='+0s'),
next_update=dict(type='str'),
digest=dict(type='str', default='sha256'),
ignore_timestamps=dict(type='bool', default=False),
return_content=dict(type='bool', default=False),
revoked_certificates=dict(
type='list',
elements='dict',
options=dict(
path=dict(type='path'),
content=dict(type='str'),
serial_number=dict(type='int'),
revocation_date=dict(type='str', default='+0s'),
issuer=dict(type='list', elements='str'),
issuer_critical=dict(type='bool', default=False),
reason=dict(
type='str',
choices=[
'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
'superseded', 'cessation_of_operation', 'certificate_hold',
'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
]
),
reason_critical=dict(type='bool', default=False),
invalidity_date=dict(type='str'),
invalidity_date_critical=dict(type='bool', default=False),
),
required_one_of=[['path', 'content', 'serial_number']],
mutually_exclusive=[['path', 'content', 'serial_number']],
),
),
required_if=[
('state', 'present', ['privatekey_path', 'privatekey_content'], True),
('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
add_file_common_args=True,
)
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
try:
crl = CRL(module)
if module.params['state'] == 'present':
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = module.params['force'] or not crl.check() or not crl.check(ignore_conversion=False)
module.exit_json(**result)
crl.generate()
else:
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
crl.remove()
result = crl.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Felix Fontein <felix@fontein.de>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: x509_crl
version_added: '1.0.0'
short_description: Generate Certificate Revocation Lists (CRLs)
description:
- This module allows one to (re)generate or update Certificate Revocation Lists (CRLs).
- Certificates on the revocation list can be either specified via serial number and (optionally) their issuer,
or as a path to a certificate file in PEM format.
requirements:
- cryptography >= 1.2
author:
- Felix Fontein (@felixfontein)
options:
state:
description:
- Whether the CRL file should exist or not, taking action if the state is different from what is stated.
type: str
default: present
choices: [ absent, present ]
mode:
description:
- Defines how to process entries of existing CRLs.
- If set to C(generate), makes sure that the CRL has the exact set of revoked certificates
as specified in I(revoked_certificates).
- If set to C(update), makes sure that the CRL contains the revoked certificates from
I(revoked_certificates), but can also contain other revoked certificates. If the CRL file
already exists, all entries from the existing CRL will also be included in the new CRL.
When using C(update), you might be interested in setting I(ignore_timestamps) to C(yes).
type: str
default: generate
choices: [ generate, update ]
force:
description:
- Should the CRL be forced to be regenerated.
type: bool
default: no
backup:
description:
- Create a backup file including a timestamp so you can get the original
CRL back if you overwrote it with a new one by accident.
type: bool
default: no
path:
description:
- Remote absolute path where the generated CRL file should be created or is already located.
type: path
required: yes
format:
description:
- Whether the CRL file should be in PEM or DER format.
- If an existing CRL file does match everything but I(format), it will be converted to the correct format
instead of regenerated.
type: str
choices: [pem, der]
default: pem
privatekey_path:
description:
- Path to the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: path
privatekey_content:
description:
- The content of the CA's private key to use when signing the CRL.
- Either I(privatekey_path) or I(privatekey_content) must be specified if I(state) is C(present), but not both.
type: str
privatekey_passphrase:
description:
- The passphrase for the I(privatekey_path).
- This is required if the private key is password protected.
type: str
issuer:
description:
- Key/value pairs that will be present in the issuer name field of the CRL.
- If you need to specify more than one value with the same key, use a list as value.
- Required if I(state) is C(present).
type: dict
last_update:
description:
- The point in time from which this CRL can be trusted.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
next_update:
description:
- "The absolute latest point in time by which this I(issuer) is expected to have issued
another CRL. Many clients will treat a CRL as expired once I(next_update) occurs."
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
- Required if I(state) is C(present).
type: str
digest:
description:
- Digest algorithm to be used when signing the CRL.
type: str
default: sha256
revoked_certificates:
description:
- List of certificates to be revoked.
- Required if I(state) is C(present).
type: list
elements: dict
suboptions:
path:
description:
- Path to a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(content) and I(serial_number). One of these three options
must be specified.
type: path
content:
description:
- Content of a certificate in PEM format.
- The serial number and issuer will be extracted from the certificate.
- Mutually exclusive with I(path) and I(serial_number). One of these three options
must be specified.
type: str
serial_number:
description:
- Serial number of the certificate.
- Mutually exclusive with I(path) and I(content). One of these three options must
be specified.
type: int
revocation_date:
description:
- The point in time the certificate was revoked.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent, except when
I(ignore_timestamps) is set to C(yes).
type: str
default: "+0s"
issuer:
description:
- The certificate's issuer.
- "Example: C(DNS:ca.example.org)"
type: list
elements: str
issuer_critical:
description:
- Whether the certificate issuer extension should be critical.
type: bool
default: no
reason:
description:
- The value for the revocation reason extension.
type: str
choices:
- unspecified
- key_compromise
- ca_compromise
- affiliation_changed
- superseded
- cessation_of_operation
- certificate_hold
- privilege_withdrawn
- aa_compromise
- remove_from_crl
reason_critical:
description:
- Whether the revocation reason extension should be critical.
type: bool
default: no
invalidity_date:
description:
- The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid.
- Time can be specified either as relative time or as absolute timestamp.
- Time will always be interpreted as UTC.
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h).
- Note that if using relative time this module is NOT idempotent. This will NOT
change when I(ignore_timestamps) is set to C(yes).
type: str
invalidity_date_critical:
description:
- Whether the invalidity date extension should be critical.
type: bool
default: no
ignore_timestamps:
description:
- Whether the timestamps I(last_update), I(next_update) and I(revocation_date) (in
I(revoked_certificates)) should be ignored for idempotency checks. The timestamp
I(invalidity_date) in I(revoked_certificates) will never be ignored.
- Use this in combination with relative timestamps for these values to get idempotency.
type: bool
default: no
return_content:
description:
- If set to C(yes), will return the (current or generated) CRL's content as I(crl).
type: bool
default: no
extends_documentation_fragment:
- files
notes:
- All ASN.1 TIME values should be specified following the YYYYMMDDHHMMSSZ pattern.
- Date specified should be UTC. Minutes and seconds are mandatory.
'''
EXAMPLES = r'''
- name: Generate a CRL
community.crypto.x509_crl:
path: /etc/ssl/my-ca.crl
privatekey_path: /etc/ssl/private/my-ca.pem
issuer:
CN: My CA
last_update: "+0s"
next_update: "+7d"
revoked_certificates:
- serial_number: 1234
revocation_date: 20190331202428Z
issuer:
CN: My CA
- serial_number: 2345
revocation_date: 20191013152910Z
reason: affiliation_changed
invalidity_date: 20191001000000Z
- path: /etc/ssl/crt/revoked-cert.pem
revocation_date: 20191010010203Z
'''
RETURN = r'''
filename:
description: Path to the generated CRL
returned: changed or success
type: str
sample: /path/to/my-ca.crl
backup_file:
description: Name of backup file created.
returned: changed and if I(backup) is C(yes)
type: str
sample: /path/to/my-ca.crl.2019-03-09@11:22~
privatekey:
description: Path to the private CA key
returned: changed or success
type: str
sample: /path/to/my-ca.pem
format:
description:
- Whether the CRL is in PEM format (C(pem)) or in DER format (C(der)).
returned: success
type: str
sample: pem
issuer:
description:
- The CRL's issuer.
- Note that for repeated values, only the last one will be returned.
returned: success
type: dict
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
issuer_ordered:
description: The CRL's issuer as an ordered list of tuples.
returned: success
type: list
elements: list
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
last_update:
description: The point in time from which this CRL can be trusted as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
next_update:
description: The point in time from which a new CRL will be issued and the client has to check for it as ASN.1 TIME.
returned: success
type: str
sample: 20190413202428Z
digest:
description: The signature algorithm used to sign the CRL.
returned: success
type: str
sample: sha256WithRSAEncryption
revoked_certificates:
description: List of certificates to be revoked.
returned: success
type: list
elements: dict
contains:
serial_number:
description: Serial number of the certificate.
type: int
sample: 1234
revocation_date:
description: The point in time the certificate was revoked as ASN.1 TIME.
type: str
sample: 20190413202428Z
issuer:
description: The certificate's issuer.
type: list
elements: str
sample: '["DNS:ca.example.org"]'
issuer_critical:
description: Whether the certificate issuer extension is critical.
type: bool
sample: no
reason:
description:
- The value for the revocation reason extension.
- One of C(unspecified), C(key_compromise), C(ca_compromise), C(affiliation_changed), C(superseded),
C(cessation_of_operation), C(certificate_hold), C(privilege_withdrawn), C(aa_compromise), and
C(remove_from_crl).
type: str
sample: key_compromise
reason_critical:
description: Whether the revocation reason extension is critical.
type: bool
sample: no
invalidity_date:
description: |
The point in time it was known/suspected that the private key was compromised
or that the certificate otherwise became invalid as ASN.1 TIME.
type: str
sample: 20190413202428Z
invalidity_date_critical:
description: Whether the invalidity date extension is critical.
type: bool
sample: no
crl:
description:
- The (current or generated) CRL's content.
- Will be the CRL itself if I(format) is C(pem), and Base64 of the
CRL if I(format) is C(der).
returned: if I(state) is C(present) and I(return_content) is C(yes)
type: str
'''
import base64
import os
import traceback
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native, to_text
from ansible_collections.community.crypto.plugins.module_utils.io import (
write_file,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.basic import (
OpenSSLObjectError,
OpenSSLBadPassphraseError,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.support import (
OpenSSLObject,
load_privatekey,
load_certificate,
parse_name_field,
get_relative_time_option,
select_message_digest,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_support import (
cryptography_get_name,
cryptography_name_to_oid,
cryptography_oid_to_name,
cryptography_serial_number_of_cert,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.cryptography_crl import (
REVOCATION_REASON_MAP,
TIMESTAMP_FORMAT,
cryptography_decode_revoked_certificate,
cryptography_dump_revoked,
cryptography_get_signature_algorithm_oid_from_crl,
)
from ansible_collections.community.crypto.plugins.module_utils.crypto.identify import (
identify_pem_format,
)
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2'
CRYPTOGRAPHY_IMP_ERR = None
try:
import cryptography
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import Encoding
from cryptography.x509 import (
CertificateRevocationListBuilder,
RevokedCertificateBuilder,
NameAttribute,
Name,
)
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
except ImportError:
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
CRYPTOGRAPHY_FOUND = False
else:
CRYPTOGRAPHY_FOUND = True
class CRLError(OpenSSLObjectError):
pass
class CRL(OpenSSLObject):
def __init__(self, module):
super(CRL, self).__init__(
module.params['path'],
module.params['state'],
module.params['force'],
module.check_mode
)
self.format = module.params['format']
self.update = module.params['mode'] == 'update'
self.ignore_timestamps = module.params['ignore_timestamps']
self.return_content = module.params['return_content']
self.crl_content = None
self.privatekey_path = module.params['privatekey_path']
self.privatekey_content = module.params['privatekey_content']
if self.privatekey_content is not None:
self.privatekey_content = self.privatekey_content.encode('utf-8')
self.privatekey_passphrase = module.params['privatekey_passphrase']
self.issuer = parse_name_field(module.params['issuer'])
self.issuer = [(entry[0], entry[1]) for entry in self.issuer if entry[1]]
self.last_update = get_relative_time_option(module.params['last_update'], 'last_update')
self.next_update = get_relative_time_option(module.params['next_update'], 'next_update')
self.digest = select_message_digest(module.params['digest'])
if self.digest is None:
raise CRLError('The digest "{0}" is not supported'.format(module.params['digest']))
self.revoked_certificates = []
for i, rc in enumerate(module.params['revoked_certificates']):
result = {
'serial_number': None,
'revocation_date': None,
'issuer': None,
'issuer_critical': False,
'reason': None,
'reason_critical': False,
'invalidity_date': None,
'invalidity_date_critical': False,
}
path_prefix = 'revoked_certificates[{0}].'.format(i)
if rc['path'] is not None or rc['content'] is not None:
# Load certificate from file or content
try:
if rc['content'] is not None:
rc['content'] = rc['content'].encode('utf-8')
cert = load_certificate(rc['path'], content=rc['content'], backend='cryptography')
result['serial_number'] = cryptography_serial_number_of_cert(cert)
except OpenSSLObjectError as e:
if rc['content'] is not None:
module.fail_json(
msg='Cannot parse certificate from {0}content: {1}'.format(path_prefix, to_native(e))
)
else:
module.fail_json(
msg='Cannot read certificate "{1}" from {0}path: {2}'.format(path_prefix, rc['path'], to_native(e))
)
else:
# Specify serial_number (and potentially issuer) directly
result['serial_number'] = rc['serial_number']
# All other options
if rc['issuer']:
result['issuer'] = [cryptography_get_name(issuer) for issuer in rc['issuer']]
result['issuer_critical'] = rc['issuer_critical']
result['revocation_date'] = get_relative_time_option(
rc['revocation_date'],
path_prefix + 'revocation_date'
)
if rc['reason']:
result['reason'] = REVOCATION_REASON_MAP[rc['reason']]
result['reason_critical'] = rc['reason_critical']
if rc['invalidity_date']:
result['invalidity_date'] = get_relative_time_option(
rc['invalidity_date'],
path_prefix + 'invalidity_date'
)
result['invalidity_date_critical'] = rc['invalidity_date_critical']
self.revoked_certificates.append(result)
self.module = module
self.backup = module.params['backup']
self.backup_file = None
try:
self.privatekey = load_privatekey(
path=self.privatekey_path,
content=self.privatekey_content,
passphrase=self.privatekey_passphrase,
backend='cryptography'
)
except OpenSSLBadPassphraseError as exc:
raise CRLError(exc)
self.crl = None
try:
with open(self.path, 'rb') as f:
data = f.read()
self.actual_format = 'pem' if identify_pem_format(data) else 'der'
if self.actual_format == 'pem':
self.crl = x509.load_pem_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = data
else:
self.crl = x509.load_der_x509_crl(data, default_backend())
if self.return_content:
self.crl_content = base64.b64encode(data)
except Exception as dummy:
self.crl_content = None
self.actual_format = self.format
def remove(self):
if self.backup:
self.backup_file = self.module.backup_local(self.path)
super(CRL, self).remove(self.module)
def _compress_entry(self, entry):
if self.ignore_timestamps:
# Throw out revocation_date
return (
entry['serial_number'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
else:
return (
entry['serial_number'],
entry['revocation_date'],
tuple(entry['issuer']) if entry['issuer'] is not None else None,
entry['issuer_critical'],
entry['reason'],
entry['reason_critical'],
entry['invalidity_date'],
entry['invalidity_date_critical'],
)
def check(self, perms_required=True, ignore_conversion=True):
"""Ensure the resource is in its desired state."""
state_and_perms = super(CRL, self).check(self.module, perms_required)
if not state_and_perms:
return False
if self.crl is None:
return False
if self.last_update != self.crl.last_update and not self.ignore_timestamps:
return False
if self.next_update != self.crl.next_update and not self.ignore_timestamps:
return False
if self.digest.name != self.crl.signature_hash_algorithm.name:
return False
want_issuer = [(cryptography_name_to_oid(entry[0]), entry[1]) for entry in self.issuer]
if want_issuer != [(sub.oid, sub.value) for sub in self.crl.issuer]:
return False
old_entries = [self._compress_entry(cryptography_decode_revoked_certificate(cert)) for cert in self.crl]
new_entries = [self._compress_entry(cert) for cert in self.revoked_certificates]
if self.update:
# We don't simply use a set so that duplicate entries are treated correctly
for entry in new_entries:
try:
old_entries.remove(entry)
except ValueError:
return False
else:
if old_entries != new_entries:
return False
if self.format != self.actual_format and not ignore_conversion:
return False
return True
def _generate_crl(self):
backend = default_backend()
crl = CertificateRevocationListBuilder()
try:
crl = crl.issuer_name(Name([
NameAttribute(cryptography_name_to_oid(entry[0]), to_text(entry[1]))
for entry in self.issuer
]))
except ValueError as e:
raise CRLError(e)
crl = crl.last_update(self.last_update)
crl = crl.next_update(self.next_update)
if self.update and self.crl:
new_entries = set([self._compress_entry(entry) for entry in self.revoked_certificates])
for entry in self.crl:
decoded_entry = self._compress_entry(cryptography_decode_revoked_certificate(entry))
if decoded_entry not in new_entries:
crl = crl.add_revoked_certificate(entry)
for entry in self.revoked_certificates:
revoked_cert = RevokedCertificateBuilder()
revoked_cert = revoked_cert.serial_number(entry['serial_number'])
revoked_cert = revoked_cert.revocation_date(entry['revocation_date'])
if entry['issuer'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CertificateIssuer([
cryptography_get_name(name) for name in entry['issuer']
]),
entry['issuer_critical']
)
if entry['reason'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.CRLReason(entry['reason']),
entry['reason_critical']
)
if entry['invalidity_date'] is not None:
revoked_cert = revoked_cert.add_extension(
x509.InvalidityDate(entry['invalidity_date']),
entry['invalidity_date_critical']
)
crl = crl.add_revoked_certificate(revoked_cert.build(backend))
self.crl = crl.sign(self.privatekey, self.digest, backend=backend)
if self.format == 'pem':
return self.crl.public_bytes(Encoding.PEM)
else:
return self.crl.public_bytes(Encoding.DER)
def generate(self):
result = None
if not self.check(perms_required=False, ignore_conversion=True) or self.force:
result = self._generate_crl()
elif not self.check(perms_required=False, ignore_conversion=False) and self.crl:
if self.format == 'pem':
result = self.crl.public_bytes(Encoding.PEM)
else:
result = self.crl.public_bytes(Encoding.DER)
if result is not None:
if self.return_content:
if self.format == 'pem':
self.crl_content = result
else:
self.crl_content = base64.b64encode(result)
if self.backup:
self.backup_file = self.module.backup_local(self.path)
write_file(self.module, result)
self.changed = True
file_args = self.module.load_file_common_arguments(self.module.params)
if self.module.set_fs_attributes_if_different(file_args, False):
self.changed = True
def dump(self, check_mode=False):
result = {
'changed': self.changed,
'filename': self.path,
'privatekey': self.privatekey_path,
'format': self.format,
'last_update': None,
'next_update': None,
'digest': None,
'issuer_ordered': None,
'issuer': None,
'revoked_certificates': [],
}
if self.backup_file:
result['backup_file'] = self.backup_file
if check_mode:
result['last_update'] = self.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.next_update.strftime(TIMESTAMP_FORMAT)
# result['digest'] = cryptography_oid_to_name(self.crl.signature_algorithm_oid)
result['digest'] = self.module.params['digest']
result['issuer_ordered'] = self.issuer
result['issuer'] = {}
for k, v in self.issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for entry in self.revoked_certificates:
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
elif self.crl:
result['last_update'] = self.crl.last_update.strftime(TIMESTAMP_FORMAT)
result['next_update'] = self.crl.next_update.strftime(TIMESTAMP_FORMAT)
result['digest'] = cryptography_oid_to_name(cryptography_get_signature_algorithm_oid_from_crl(self.crl))
issuer = []
for attribute in self.crl.issuer:
issuer.append([cryptography_oid_to_name(attribute.oid), attribute.value])
result['issuer_ordered'] = issuer
result['issuer'] = {}
for k, v in issuer:
result['issuer'][k] = v
result['revoked_certificates'] = []
for cert in self.crl:
entry = cryptography_decode_revoked_certificate(cert)
result['revoked_certificates'].append(cryptography_dump_revoked(entry))
if self.return_content:
result['crl'] = self.crl_content
return result
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
mode=dict(type='str', default='generate', choices=['generate', 'update']),
force=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
path=dict(type='path', required=True),
format=dict(type='str', default='pem', choices=['pem', 'der']),
privatekey_path=dict(type='path'),
privatekey_content=dict(type='str', no_log=True),
privatekey_passphrase=dict(type='str', no_log=True),
issuer=dict(type='dict'),
last_update=dict(type='str', default='+0s'),
next_update=dict(type='str'),
digest=dict(type='str', default='sha256'),
ignore_timestamps=dict(type='bool', default=False),
return_content=dict(type='bool', default=False),
revoked_certificates=dict(
type='list',
elements='dict',
options=dict(
path=dict(type='path'),
content=dict(type='str'),
serial_number=dict(type='int'),
revocation_date=dict(type='str', default='+0s'),
issuer=dict(type='list', elements='str'),
issuer_critical=dict(type='bool', default=False),
reason=dict(
type='str',
choices=[
'unspecified', 'key_compromise', 'ca_compromise', 'affiliation_changed',
'superseded', 'cessation_of_operation', 'certificate_hold',
'privilege_withdrawn', 'aa_compromise', 'remove_from_crl'
]
),
reason_critical=dict(type='bool', default=False),
invalidity_date=dict(type='str'),
invalidity_date_critical=dict(type='bool', default=False),
),
required_one_of=[['path', 'content', 'serial_number']],
mutually_exclusive=[['path', 'content', 'serial_number']],
),
),
required_if=[
('state', 'present', ['privatekey_path', 'privatekey_content'], True),
('state', 'present', ['issuer', 'next_update', 'revoked_certificates'], False),
],
mutually_exclusive=(
['privatekey_path', 'privatekey_content'],
),
supports_check_mode=True,
add_file_common_args=True,
)
if not CRYPTOGRAPHY_FOUND:
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
exception=CRYPTOGRAPHY_IMP_ERR)
try:
crl = CRL(module)
if module.params['state'] == 'present':
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = module.params['force'] or not crl.check() or not crl.check(ignore_conversion=False)
module.exit_json(**result)
crl.generate()
else:
if module.check_mode:
result = crl.dump(check_mode=True)
result['changed'] = os.path.exists(module.params['path'])
module.exit_json(**result)
crl.remove()
result = crl.dump()
module.exit_json(**result)
except OpenSSLObjectError as exc:
module.fail_json(msg=to_native(exc))
if __name__ == "__main__":
main()
|
4295_7
|
crossvul
|
py
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
javascript
|
const {escapeRegExp, size, isString} = require('lodash');
const {SECRET_REPLACEMENT, SECRET_MIN_SIZE} = require('./definitions/constants');
module.exports = (env) => {
const toReplace = Object.keys(env).filter((envVar) => {
// https://github.com/semantic-release/semantic-release/issues/1558
if (envVar === 'GOPRIVATE') {
return false;
}
return /token|password|credential|secret|private/i.test(envVar) && size(env[envVar].trim()) >= SECRET_MIN_SIZE;
});
const regexp = new RegExp(toReplace.map((envVar) => escapeRegExp(env[envVar])).join('|'), 'g');
return (output) =>
output && isString(output) && toReplace.length > 0 ? output.toString().replace(regexp, SECRET_REPLACEMENT) : output;
};
|
const {escapeRegExp, size, isString} = require('lodash');
const {SECRET_REPLACEMENT, SECRET_MIN_SIZE} = require('./definitions/constants');
module.exports = (env) => {
const toReplace = Object.keys(env).filter((envVar) => {
// https://github.com/semantic-release/semantic-release/issues/1558
if (envVar === 'GOPRIVATE') {
return false;
}
return /token|password|credential|secret|private/i.test(envVar) && size(env[envVar].trim()) >= SECRET_MIN_SIZE;
});
const regexp = new RegExp(
toReplace
.map((envVar) => `${escapeRegExp(env[envVar])}|${encodeURI(escapeRegExp(env[envVar]))}`)
.join('|'),
'g'
);
return (output) =>
output && isString(output) && toReplace.length > 0 ? output.toString().replace(regexp, SECRET_REPLACEMENT) : output;
};
|
4339_0
|
crossvul
|
js
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
php
|
<?php
/**
* edih_csv_inc.php
*
* Copyright 2012 Kevin McCormick
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 3 or later. You should have
* received a copy of the GNU General Public License along with this program;
* if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* <http://opensource.org/licenses/gpl-license.php>
*
* @author Kevin McCormick
* @link: http://www.open-emr.org
* @package OpenEMR
* @subpackage ediHistory
*/
/*
* The purpose of this file is to hold functions of general utility for
* my edi_claim_history project. It began as a php "class" but I am now
* thinking that instantiating the class is too much bother and probably
* a waste of memory, since the contents of the file have to be read into
* memory anyway.
*
* <pre>
* ******* important *********
* function csv_parameters($type="ALL")
* This function must have the correct values or nothing will work
* function csv_verify_file( $file_path, $type, $val_array=FALSE )
* critical for file verification and x12 parsing
* function (in ibr_uploads.php) ibr_upload_match_file($param_ar, $fidx, &$html_str)
* contains a regular expression that must be correct
*
* Also, the constant IBR_HISTORY_DIR must be correct
* **************************
* </pre>
*
* The claim_history x12 files are claim (837) acknowledgement (997/999) claim status (277) and claim payment (835)
* Also eligibility request (270) and eligibility response (271)
*
* <pre>
* Basic workflow:
* Each file type has a row in the array from csv_paramaters()
* type directory files_csv claims_csv column regex
*
* 1. open submitted file in edih_x12_class to verify and produce properties
* 2. Read the parameters array and choose the parameters using 'type'
* 2. Search the matched type 'directory' for the filename files matching the 'regex' regular expressions and
* compare the results to the files listed in the 'files_csv' files.csv record -- unmatched files are "new"
* 3. Each "new" x12 file should be read by csv_x12_segments -- returns array('path', 'delimiters', 'segments')
* ibr, ebr, ack -- basically Availity formats have their own read functions
* 4. Pass the array to various functions which parse for claims information
* 5. Write the results to files.csv or claims.csv and create html output for display
*
* 6. Other outputs as called for in ibr_history.php -- from user input from claim_history.html
* </pre>
*
* Key usability issue is the "new" files are in the users home directory -- downloaded there
* while the OpenEMR is on the server -- so there is a basic issue of access to the files
*
* The ibr_uploads.php script handles uploads of zip archives or multiple file uploads
*
* The csv data files are just php written .csv files, so anything different may cause errors
* You can open and edit them in OpenOffice, but you must save them in "original format"
*
* TO_DO Some type of "find in files" search would be helpful for locating all references to a claim, patient, etc.
* [ grep -nHIrF 'findtext']
*
* TO_DO functions to zip old files, put them aside, and remove them from csv tables
*/
///**
// * a security measure to prevent direct web access to this file
// */
// if (!defined('SITE_IN')) die('Direct access not allowed!');
// $GLOBALS['OE_EDIH_DIR'] $GLOBALS['OE_SITE_DIR']
/* *********** GLOBALS used for testing only **********
*/
// //$GLOBALS['OE_SITE_DIR'].'/edi/history';
//$OE_SITES_BASE = $GLOBALS['OE_SITE_DIR'];
//$OE_SITE_DIR = $OE_SITES_BASE.'/testing';
//$OE_EDIH_DIR = $OE_SITE_DIR.'/edi/history';
/* ***********
*/
/**
* Constant that is checked in included files to prevent direct access.
* concept taken from Joomla
*/
define('_EDIH', 1);
//DIRECTORY_SEPARATOR;
if (!defined('DS')) define('DS', DIRECTORY_SEPARATOR);
/**
* Log messages to the log file
*
* @param string $msg_str the log message
* @return int number of characters written
*/
function csv_edihist_log ( $msg_str ) {
//
//$dir = dirname(__FILE__).DS.'log';
//$dir = $GLOBALS['OE_EDIH_DIR'].DS.'log';
//$logfile = $GLOBALS['OE_EDIH_DIR'] . "/log/edi_history_log.txt";
$logfile = 'edih_log_'.date('Y-m-d').'.txt';
$dir = csv_edih_basedir().DS.'log';
$rslt = 0;
if ( is_string($msg_str) && strlen($msg_str) ) {
$tm = date('Ymd:Hms') . ' ' . $msg_str . PHP_EOL;
//
$rslt = file_put_contents($dir.DS.$logfile, $tm, FILE_APPEND);
} else {
//
$fnctn = debug_backtrace(DEBUG_BACKTRACE_IGNORE_ARGS, 2)[1]['function'];
csv_edihist_log ('invalid message string '.$fnctn);
}
//
return $rslt; // number of characters written
}
/**
* read the edi_history_log.txt file into an
* html formatted ordered list
*
* @return string
*/
function csv_log_html($logname='') {
$html_str = "<div class='filetext'>".PHP_EOL."<ol class='logview'>".PHP_EOL;
$fp = csv_edih_basedir().DS.'log'.DS.$logname;
if ( is_file($fp) ) {
$fh = fopen( $fp, 'r');
if ($fh !== FALSE) {
while (($buffer = fgets($fh)) !== false) {
$html_str .= "<li>".$buffer."</li>".PHP_EOL;
}
$html_str .= "</ol>".PHP_EOL."</div>".PHP_EOL;
if (!feof($fh)) {
$html_str .= "<p>Error in logfile: unexpected file ending</p>".PHP_EOL;
}
fclose($fh);
} else {
$html_str = "<p>Error: unable to open log file</p>".PHP_EOL;
}
}
return $html_str;
}
/**
* list log files and store old logs in an archive
*
* @param bool
* @return array (json)
*/
function csv_log_manage($list=true) {
//
//$dir = dirname(__FILE__).DS.'log';
$dir = csv_edih_basedir().DS.'log';
$list_ar = array();
$old_ar = array();
$lognames = scandir($dir);
if ($list) {
foreach($lognames as $log) {
if (!strpos($log, '_log_')) { continue; }
$list_ar[] = $log;
}
$s = (count($list_ar)) ? rsort($list_ar) : false;
//
return json_encode($list_ar);
//
} else {
// list is false, must be archive
$datetime1 = date_create(date('Y-m-d'));
//
foreach($lognames as $log) {
if ($log == '.' || $log == '..') { continue; }
//
$pos1 = strrpos($log, '_');
if ($pos1) {
$ldate = substr($log, $pos1+1, 10);
$datetime2 = date_create($ldate);
$interval = date_diff($datetime1, $datetime2);
//echo '== date difference '.$ldate.' '.$interval->format('%R%a days').PHP_EOL;
if ($interval->format('%R%a') < -7) {
// older log files are put in zip archive
if ( is_file($dir.DS.$log) ) { $old_ar[] = $log; }
}
}
}
}
//
$ok = false;
$archname = $dir.DS.'edih-log-archive.zip';
$filelimit = 200;
//
if (count($old_ar)) {
$zip = new ZipArchive;
if (is_file($archname)) {
$ok = $zip->open($archname, ZipArchive::CHECKCONS);
} else {
$ok = $zip->open($archname, ZipArchive::CREATE);
}
//
if ($ok) {
if ($zip->numFiles >= $filelimit) {
$zip->close();
$dte = $datetime1->format('Ymd');
$ok = rename($dir.DS.$archname, $dir.DS.$dte.'_'.$archname);
csv_edihist_log('csv_log_archive: rename full archive '.$dte.'_'.$archname );
if ($ok) {
$ok = $zip->open($archname, ZipArchive::CREATE);
if (!$ok) {
csv_edihist_log('csv_log_archive: cannot create '.$archname);
}
} else {
csv_edihist_log('csv_log_archive: cannot rename '.$archname);
}
}
//
if ($ok) {
foreach($old_ar as $lg) {
if (is_file($dir.DS.$lg)) {
$a = $zip->addFile($dir.DS.$lg, $lg);
if ($a) {
csv_edihist_log('csv_log_archive: add to archive '.$lg );
} else {
csv_edihist_log('csv_log_archive: error archiving '.$lg );
}
}
}
$c = $zip->close();
if ($c) {
foreach($old_ar as $lg) {
$u = unlink($dir.DS.$lg);
if ($u) {
continue;
} else {
csv_edihist_log('csv_log_archive: error removing '.$dir.DS.$lg);
}
}
} else {
csv_edihist_log('csv_log_archive: error closing log file archive');
}
} else {
csv_edihist_log('csv_log_manage: error failed to open '.$archname);
}
}
}
//
return json_encode($old_ar);
}
/**
* open or save a user notes file
*
* @param string
* @param bool
* @return string
*/
function csv_notes_file($content='', $open=true) {
//
$str_html = '';
//$fp = $GLOBALS['OE_EDIH_DIR'].'/edi_notes.txt';
$fp = csv_edih_basedir().DS.'archive'.DS.'edi_notes.txt';
if (! is_writable($fp) ) {
$fh = fopen( $fp, 'a+b');
fclose($fh);
}
// for retrieving notes
if ($open) {
// if contents were previously deleted by user and file is empty,
// the text 'empty' is put in content in save operation
$ftxt = file_get_contents($fp);
if ($ftxt === false) {
$str_html .= 'csv_notes_file: file error <br>'.PHP_EOL;
csv_edihist_log('csv_notes_file: file error');
}
if (substr($ftxt, 0, 5) == 'empty' && strlen($ftxt) == 5) {
$ftxt = '## '. date("F j, Y, g:i a");
} elseif (!$ftxt) {
$ftxt = '## '. date("F j, Y, g:i a");
}
$str_html .= PHP_EOL.$ftxt.PHP_EOL;
// next stanza for saving content
} elseif (strlen($content)) {
//echo "csv_notes_file: we have content<br>".PHP_EOL;
// use finfo php class
if ( class_exists('finfo') ) {
$finfo = new finfo(FILEINFO_MIME);
$mimeinfo = $finfo->buffer($content);
if ( strncmp($mimeinfo, 'text/plain; charset=us-ascii', 28) !== 0 ) {
csv_edihist_log('csv_notes_file: invalid mime-type '.$mimeinfo);
$str_html = 'csv_notes_file: invalid mime-type <br>'.$mimeinfo;
//
return $str_html;
}
} elseif (preg_match('/[^\x20-\x7E\x0A\x0D]|(<\?)|(<%)|(<asp)|(<ASP)|(#!)|(\$\{)|(<scr)|(<SCR)/', $content, $matches, PREG_OFFSET_CAPTURE)) {
csv_edihist_log('csv_notes_file: Filtered character in file content -- character: '.$matches[0][0].' position: '.$matches[0][1]);
$str_html .= 'Filtered character in file content not accepted <br>'. PHP_EOL;
$str_html .= ' character: ' . $matches[0][0] . ' position: ' . $matches[0][1] . '<br>' . PHP_EOL;
//
return $str_html;
}
} else {
$ftxt = ($content) ? $content : 'empty';
$saved = file_put_contents($fp, $ftxt);
$str_html .= ($saved) ? '<p>Save Error with notes file</p>' : '<p>Notes content saved</p>';
}
//
return $str_html;
}
/**
* generates path to edi history files
*
* @return string|bool directory path
*/
function csv_edih_basedir() {
// should be something like /var/www/htdocs/openemr/sites/default
if (isset($GLOBALS['OE_SITE_DIR'])) {
// debug
//echo 'csv_edih_basedir OE_SITE_DIR '.$GLOBALS['OE_SITE_DIR'].'<br>'.PHP_EOL;
return $GLOBALS['OE_SITE_DIR'].DS.'edi'.DS.'history';
} else {
csv_edihist_log('csv_edih_basedir: failed to obtain OpenEMR Site directory');
return false;
}
}
/**
* generates path to edi_history tmp dir for file upload operations
*
* @uses csv_edih_basedir()
* @return string directory path
*/
function csv_edih_tmpdir() {
//
$bdir = csv_edih_basedir();
$tdir = ($bdir) ? $bdir.DS.'tmp' : false;
//$systmp = sys_get_temp_dir();
//$systmp = stripcslashes($systmp);
//$systdir = $systmp.DS.'edihist';
//if ( $tdir && (is_dir($tdir) || mkdir($tdir, 0755) ) ) {
if ( $tdir) {
return $tdir;
} else {
return false;
}
}
/**
* Initial setup function
*
* Create the directory tree and write the column headers into the csv files
* This function will accept a directory argument and it appends the value
* from IBR_HISTORY_DIR to the path. Then a directory for each type of file
* and the csv files are created under that.
*
* @uses csv_parameters()
* @uses csv_table_header()
* @uses csv_edih_basedir()
*
* @param string &$out_str referenced, should be created in calling function
* @return boolean
*/
function csv_setup() {
//
$isOK = false;
$out_str = '';
$chr = 0;
// $GLOBALS['OE_SITE_DIR'] should be like /var/www/htdocs/openemr/sites/default
$sitedir = $GLOBALS['OE_SITE_DIR'];
//$sitedir = csv_edih_basedir();
//
if (is_readable($sitedir)) {
$basedir = $sitedir.DS.'edi';
$edihist_dir = $basedir.DS.'history';
$csv_dir = $edihist_dir.DS.'csv';
$archive_dir = $edihist_dir.DS.'archive';
$log_dir = $edihist_dir.DS.'log';
$tmp_dir = $edihist_dir.DS.'tmp';
} else {
//csv_edihist_log('setup: failed to obtain OpenEMR Site directory');
echo 'setup: failed to obtain OpenEMR Site directory<br>'.PHP_EOL;
return false;
}
//
if (is_writable($basedir) ) {
$isOK = true;
//csv_edihist_log('setup: directory '.$basedir);
$out_str .= 'EDI_History Setup should not overwrite existing data.<br>'.PHP_EOL;
$out_str .= 'Setup: directory '.$basedir.'<br>'.PHP_EOL;
//
if (is_dir($edihist_dir) || mkdir($edihist_dir, 0755)) {
$out_str .= 'created folder '.$edihist_dir.'<br>'.PHP_EOL;
$isOK = true;
if (is_dir($csv_dir) || mkdir($csv_dir, 0755) ) {
$out_str .= 'created folder '.$csv_dir.'<br>'.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create csv folder... '.'<br>'.PHP_EOL;
die('Failed to create csv folder... '.$archive_dir);
}
if (is_dir($archive_dir) || mkdir($archive_dir, 0755) ) {
$out_str .= 'created folder '.$archive_dir.'<br>'.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create archive folder... '.'<br>'.PHP_EOL;
die('Failed to create archive folder... ');
}
if (is_dir($log_dir) || mkdir($log_dir, 0755) ) {
$out_str .= 'created folder '.$log_dir.'<br>'.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create log folder... '.'<br>'.PHP_EOL;
die('Failed to create log folder... ');
}
if (is_dir($tmp_dir) || mkdir($tmp_dir, 0755) ) {
$out_str .= 'created folder '.$tmp_dir.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create tmp folder... '.'<br>'.PHP_EOL;
die('Failed to create tmp folder... ');
}
} else {
$isOK = false;
$out_str .= 'Setup failed: cannot write to folder '.$basedir.'<br>'.PHP_EOL;
die('Setup failed: cannot write to '.$basedir);
}
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create history folder... '.'<br>'.PHP_EOL;
die('Failed to create history folder... '.$edihist_dir);
}
if ($isOK) {
$p_ar = csv_parameters('ALL');
$old_csv = array('f837'=>'batch', 'f835'=>'era');
foreach ($p_ar as $key=>$val) {
// rename existing csv files to old_filename
if (is_dir($csv_dir)) {
if ($dh = opendir($csv_dir)) {
while (($file = readdir($dh)) !== false) {
if (is_file($csv_dir.DS.$file) && strpos($file, 'csv')) {
$rn = rename($csv_dir.DS.$file, $csv_dir.DS.'old_'.$file);
if ($rn) {
$out_str .= 'renamed csv/'.$file.' to old_'.$file.'<br />'.PHP_EOL;
} else {
$out_str .= 'attempt to rename csv/'.$file.' failed<br />'.PHP_EOL;
}
}
}
}
}
//;
// make the edi files storage subdirs
$tp = $p_ar[$key]['type'];
$type_dir = $p_ar[$key]['directory'];
//
if (is_dir($type_dir)) {
$out_str .= 'folder for '.$tp.' exists '.$type_dir.'<br>'.PHP_EOL;
} elseif (mkdir($type_dir, 0755)) {
if ($tp == 'f835') {
// in upgrade case the f835 directory should not exist
// move 'era' files from /era to /f835
if (is_dir($edihist_dir.DS.'era')) {
$fct = 0; $rct = 0;
if ($dh = opendir($edihist_dir.DS.'era')) {
while (($file = readdir($dh)) !== false) {
if (is_file($edihist_dir.DS.'era'.DS.$file)) {
$rct++;
$rn = rename($edihist_dir.DS.'era'.DS.$file, $type_dir.DS.$file);
$fct = ($rn) ? $fct + 1 : $fct;
}
}
}
$out_str .= 'created type folder '.$type_dir.' and moved '.$fct.' of '.$rct.' files from /era<br>'.PHP_EOL;
}
} else {
$out_str .= 'created type folder '.$type_dir.'<br>'.PHP_EOL;
}
} else {
$out_str .= 'Setup failed to create directory for '.$tp.'<br>'.PHP_EOL;
}
}
} else {
$out_str .= 'Setup failed: Can not create directories <br>'.PHP_EOL;
}
if ($isOK) {
csv_edihist_log($out_str);
return true;
} else {
return $out_str;
}
}
/**
* Empty all contents of tmp dir /edi/history/tmp
*
* @uses csv_edih_tmpdir()
* @param none
* @return bool
*/
function csv_clear_tmpdir() {
//
$tmpdir = csv_edih_tmpdir();
if ( basename($tmpdir) != 'tmp' ) {
csv_edihist_log ( 'tmp dir not /edi/history/tmp');
return false;
}
$tmp_files = scandir($tmpdir);
if (count($tmp_files) > 2) {
foreach($tmp_files as $idx=>$tmpf) {
if ($tmpf == "." || $tmpf == "..") {
// can't delete . and ..
continue;
} elseif (is_file($tmpdir.DS.$tmpf) ) {
unlink($tmpdir.DS.$tmpf);
} elseif(is_dir($tmpdir.DS.$tmpf)) {
$tdir_ar = scandir($tmpdir.DS.$tmpf);
foreach($tdir_ar as $tfn) {
if ($tfn == "." || $tfn == "..") {
continue;
} elseif (is_file($tmpdir.DS.$tmpf.DS.$tfn)) {
unlink($tmpdir.DS.$tmpf.DS.$tfn);
}
}
rmdir($tmpdir.DS.$tmpf);
}
}
}
$tmp_files = scandir($tmpdir);
if (count($tmp_files) > 2) {
csv_edihist_log ('tmp dir contents remain in ... /edi/history/tmp');
return false;
} else {
return true;
}
}
/**
* open and verify a default edih_x12_file object
*
* @uses csv_check_filepath()
*
* @param string filepath or filename
* @parm string file x12 type
* @return object edih_x12_file class
*/
function csv_check_x12_obj($filepath, $type='') {
//
$x12obj = false;
$ok = false;
//
$fp = csv_check_filepath($filepath, $type);
//
if ($fp) {
$x12obj = new edih_x12_file($fp);
if ( 'edih_x12_file' == get_class($x12obj) ) {
if ($x12obj->edih_valid() == 'ovigs') {
$ok = count( $x12obj->edih_segments() );
$ok = ($ok) ? count( $x12obj->edih_envelopes() ) : false;
$ok = ($ok) ? count( $x12obj->edih_delimiters() ) : false;
if (!$ok) {
csv_edihist_log("csv_check_x12_obj: object missing properties [$filepath]");
csv_edihist_log( $x12obj->edih_message() );
return false;
}
} else {
csv_edihist_log("csv_check_x12_obj: invalid object $filepath");
return false;
}
} else {
csv_edihist_log("csv_check_x12_obj: object not edih_x12_file $filepath");
return false;
}
} else {
csv_edihist_log("csv_check_x12_obj: invalid file path $filepath");
return false;
}
//
return $x12obj;
}
/**
* Check that the file path we are working with is a readable file.
*
* If it is a file we have uploaded and we have only the file name
* this function will type the file and find it in the uploaded files directories
* and return the complete path.
*
* @uses csv_parameters()
* @param string $filename name of a file that is one of our types
* @param string $type optional; one of our file types
* @return string either an empty string or a readable filepath
*/
function csv_check_filepath($filename, $type='ALL') {
//
// if file is readable, just return it
if ( is_file($filename) && is_readable($filename) ) {
return $filename;
}
//
$goodpath = '';
$fp = '';
$fn = basename($filename);
//
if ($type && $type != 'ALL') {
$p = csv_parameters($type);
if (is_array($p) && array_key_exists('type', $p) ) {
$fp = $p['directory'].DS.$fn;
}
} else {
$p_ar = csv_parameters("ALL");
foreach ($p_ar as $tp=>$par) {
if ( !$p_ar[$tp]['regex'] || !preg_match($p_ar[$tp]['regex'], $fn) ) {
continue;
} else {
$fp = $p_ar[$tp]['directory'].DS.$fn;
break;
}
}
}
if ( is_file($fp) && is_readable($fp) ) { $goodpath = realpath($fp); }
//
return $goodpath;
}
/**
* verify file type parameter
*
* @param string file type
* @param bool return GS02 code or fXXX
* @return string file type or empty
*/
function csv_file_type($type, $gs_code=false) {
//
if (!$type) {
csv_edihist_log('csv_file_type: invalid or missing type argument '.$type);
return false;
} else {
$tp_type = (string)$type;
}
//
if ( strpos('|f837|batch|HC', $tp_type) ) {
$tp = ($gs_code) ? 'HC' : 'f837';
} elseif ( strpos('|f835|era|HP', $tp_type) ) {
$tp = ($gs_code) ? 'HP' : 'f835';
} elseif ( strpos('|f999|f997|ack|ta1|FA', $tp_type) ) {
$tp = ($gs_code) ? 'FA' : 'f997';
} elseif ( strpos('|f277|HN', $tp_type) ) {
$tp = ($gs_code) ? 'HN' : 'f277';
} elseif ( strpos('|f276|HR', $tp_type) ) {
$tp = ($gs_code) ? 'HR' : 'f276';
} elseif ( strpos('|f271|HB', $tp_type) ) {
$tp = ($gs_code) ? 'HB' : 'f271';
} elseif ( strpos('|f270|HS', $tp_type) ) {
$tp = ($gs_code) ? 'HS' : 'f270';
} elseif ( strpos('|f278|HI', $tp_type) ) {
$tp = ($gs_code) ? 'HI' : 'f278';
} else {
$tp = '';
}
//
if ( !$tp) {
csv_edihist_log('csv_file_type error: incorrect type '.$tp_type);
}
return $tp;
}
/**
* The array that holds the various parameters used in dealing with files
*
* A key function since it holds the paths, columns, etc.
* Unfortunately, there is an issue with matching the type in * the case of the
* values '997', '277', '999', etc, becasue these strings may be recast
* from strings to integers, so the 'type' originally supplied is lost.
* This introduces an inconsistency when the 'type' is used in comparison tests.
* We call the csv_file_type() function to return a usable file type identifier.
* The 'datecolumn' and 'fncolumn' entries are used in csv_to_html() to filter by date
* or place links to files.
*
* @param string $type -- default = ALL or one of batch, ibr, ebr, dpr, f997, f277, era, ack, text
* @return array
*/
function csv_parameters($type='ALL') {
//
// This will need the OpenEMR 'oe_site_dir' to replace global
//
$p_ar = array();
$tp = ($type === 'ALL') ? $type : csv_file_type($type);
if (!$tp) {
csv_edihist_log('csv_parameters() error: incorrect type '.$type);
return $p_ar;
}
//$edihist_dir = $GLOBALS['OE_SITE_DIR'].'/edi/history';
$edihist_dir = csv_edih_basedir();
//
// the batch file directory is a special case - decide whether to use OpenEMR batch files or make our own copies
// OpenEMR copies each batch file to sites/default/edi and this project never writes to that directory
// batch reg ex -- '/20[01][0-9]-[01][0-9]-[0-3][0-9]-[0-9]{4}-batch*\.txt/' '/\d{4}-\d{2}-\d{2}-batch*\.txt$/'
//
$p_ar['f837'] = array('type'=>'f837', 'directory'=>$GLOBALS['OE_SITE_DIR'].DS.'edi', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f837.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f837.csv', 'filedate'=>'Date', 'claimdate'=>'SvcDate', 'regex'=>'/\-batch(.*)\.txt$/');
//
//$p_ar['csv'] = array("type"=>'csv', "directory"=>$edihist_dir.'/csv', "claims_csv"=>'ibr_parameters.csv',
// "files_csv"=>'', "column"=>'', "regex"=>'/\.csv$/');
$p_ar['f997'] = array('type'=>'f997', 'directory'=>$edihist_dir.DS.'f997', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f997.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f997.csv', 'filedate'=>'Date', 'claimdate'=>'RspDate', 'regex'=>'/\.(99[79]|ta1|ack)$/i');
$p_ar['f276'] = array('type'=>'f276', 'directory'=>$edihist_dir.DS.'f276', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f276.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f276.csv', 'filedate'=>'Date', 'claimdate'=>'ReqDate', 'regex'=>'/\.276([ei]br)?$/');
$p_ar['f277'] = array('type'=>'f277', 'directory'=>$edihist_dir.DS.'f277', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f277.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f277.csv', 'filedate'=>'Date', 'claimdate'=>'SvcDate', 'regex'=>'/\.277([ei]br)?$/i');
$p_ar['f270'] = array('type'=>'f270', 'directory'=>$edihist_dir.DS.'f270', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f270.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f270.csv', 'filedate'=>'Date', 'claimdate'=>'ReqDate', 'regex'=>'/\.270([ei]br)?$/i');
$p_ar['f271'] = array('type'=>'f271', 'directory'=>$edihist_dir.DS.'f271', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f271.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f271.csv', 'filedate'=>'Date', 'claimdate'=>'RspDate', 'regex'=>'/\.271([ei]br)?$/i');
$p_ar['f278'] = array('type'=>'f278', 'directory'=>$edihist_dir.DS.'f278', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f278.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f278.csv', 'filedate'=>'Date', 'claimdate'=>'FileDate', 'regex'=>'/\.278/');
// OpenEMR stores era files, but the naming scheme is confusing, so we will just use our own directory for them
$p_ar['f835'] = array('type'=>'f835', 'directory'=>$edihist_dir.DS.'f835', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f835.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f835.csv', 'filedate'=>'Date', 'claimdate'=>'SvcDate', 'regex'=>'/835[0-9]{5}\.835*|\.(era|ERA|835)$/i');
//
if ( array_key_exists($tp, $p_ar) ) {
return $p_ar[$tp];
} else {
return $p_ar;
}
}
/**
* determine if a csv table has data for select dropdown
*
* @param string default 'json'
* @return array json if argument is 'json'
*/
function csv_table_select_list($outtp='json') {
$optlist = array();
$labels = array('f835'=>'Payments', 'f837'=>'Claims', 'batch'=>'Claims', 'f277'=>'Status', 'f276'=>'Status Req',
'f997'=>'Ack','f271'=>'Benefit', 'f270'=>'Benefit Req', 'f278'=>'Auth');
$edihist_dir = csv_edih_basedir(); // $GLOBALS['OE_SITE_DIR'].'/edi/history'
$csvdir = $edihist_dir.DS.'csv';
$tbllist = scandir($csvdir);
$idx = 0;
foreach($tbllist as $csvf) {
if ($csvf == "." || $csvf == ".." ) { continue; }
if (strpos($csvf, 'old') === 0) { continue; }
if (filesize($csvdir.DS.$csvf) < 70) { continue; }
if (substr($csvf, -1) == '~') { continue; }
$finfo = pathinfo($csvdir.DS.$csvf);
$fn = $finfo['filename'];
// e.g. files_f997
$tp = explode('_', $fn);
//$lblkey = $labels[$tp[1]];
$optlist[$tp[0]][$tp[1]]['fname'] = $fn;
$optlist[$tp[0]][$tp[1]]['desc'] = $tp[0].'-'.$labels[$tp[1]]; //$tp[1] .' '.$tp[0];
$idx++;
}
if ($outtp == 'json') {
return json_encode($optlist);
} else {
return $optlist;
}
}
/**
* list existing archive files
*
* @param string default 'json'
* @return array json if argument is 'json'
*/
function csv_archive_select_list($outtp='json') {
//
$flist = array();
$archdir = csv_edih_basedir().DS.'archive';
//
// debug
csv_edihist_log("csv_archive_select_list: using $archdir");
//
$scan = scandir($archdir);
if (is_array($scan) && count($scan)) {
foreach($scan as $s) {
if ($s == '.' || $s == '..') {
continue;
} elseif (strpos($s, 'note')) {
continue;
} else {
$flist[] = $s;
}
}
}
if ($outtp == 'json') {
return json_encode($flist);
} else {
return $flist;
}
}
/**
* List files in the directory for the given type
*
* Write an entry in the log if an file is in the directory
* that does not match the type
*
* @uses csv_parameters()
* @param string $type a type from our list
* @return array
*/
function csv_dirfile_list($type) {
// return false if location is not appropriate
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log("csv_dirfile_list error: incorrect type $type");
return false;
}
$params = csv_parameters($tp);
if (empty($params) || csv_singlerecord_test($params) == false ) {
csv_edihist_log("csv_dirfile_list() error: incorrect type $type");
return false;
}
$search_dir = $params['directory'];
$ext_re = $params['regex'];
$dirfiles = array();
//
if (is_dir($search_dir)) {
if ($dh = opendir($search_dir)) {
while (($file = readdir($dh)) !== false) {
if ($file == '.' || $file == '..') {
continue;
} elseif ($tp == 'f837' && ($file == 'history' || $file == 'README.txt')) {
continue;
} elseif (is_file($search_dir.DS.$file) ) {
$dirfiles[] = $file;
} else {
if ($tp == 'f837' && $file == 'history') { continue; }
csv_edihist_log("csv_dirfile_list $type : not a file $file");
}
}
} else {
csv_edihist_log("csv_dirfile_list $type : error in scan $search_dir");
}
} else {
csv_edihist_log("csv_dirfile_list $type : not a directory $search_dir");
}
//
return $dirfiles;
} // end function
/**
* List files that are in the csv record
*
* @uses csv_parameters()
* @uses csv_table_header()
*
* @param string $type -- one of our types
* @return array
*/
function csv_processed_files_list($type) {
//
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log("csv_processed_files_list: incorrect type $type");
return false;
}
$processed_files = array();
$param = csv_parameters($tp);
$hdr_ar = csv_table_header($tp, 'file');
if ( is_array($hdr_ar) ) {
foreach($hdr_ar as $k=>$hd) {
if ($hd == 'FileName') { $csv_col = $k; break; }
}
}
$csv_col = (isset($csv_col)) ? $csv_col : 1;
$csv_file = $param['files_csv'];
//if ($tp == 'dpr') {
//$csv_file = $param['claims_csv'];
//$csv_col = '5';
//} else {
//$csv_file = $param['files_csv'];
//}
//
//$idx = 0;
if (is_file($csv_file)) {
if (($fh1 = fopen( $csv_file, "r" )) !== FALSE) {
while (($data = fgetcsv($fh1, 1024, ",")) !== FALSE) {
$processed_files[] = $data[$csv_col];
//
//if ($idx) { $processed_files[] = $data[$csv_col]; }
// skip the header row
//$idx++;
}
fclose($fh1);
} else {
csv_edihist_log ("csv_list_processed_files: failed to access $csv_file" );
return false;
}
} else {
// first run - no file exists
csv_edihist_log("csv_processed_files_list: csv file does not exist ".basename($csv_file));
}
// remove the header row, but avoid NULL or false
$ret_ar = (empty($processed_files)) ? $processed_files : array_slice($processed_files, 1);
return $ret_ar;
} // end function
/**
* Give an array of files in the storage directories that are not in the csv record
*
* @param string $type -- one of our types
* @return array
*/
function csv_newfile_list($type) {
//
$ar_new = array();
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log('csv_newfile_list: incorrect type '.$type);
return false;
}
//
$dir_files = csv_dirfile_list($tp);
$csv_files = csv_processed_files_list($tp);
//
// $dir_files should come first in array_diff()
if (empty($dir_files)) {
$ar_new = array();
} elseif (empty($csv_files) || is_null($csv_files)) {
$ar_new = $dir_files;
} else {
$ar_new = array_diff($dir_files, $csv_files);
}
//
return $ar_new;
}
/**
* Parse 997 IK3 error segment to identify segment causing rejection
* The error segment string is specially created in edih_997_csv_data()
* Simple analysis, but the idea is just to identify the bad segment
*
* @param string error segment from edih_997_csv_data()
* @param bool true if only the 1st segmentID is wanted
* return array|string
*/
function edih_errseg_parse($err_seg, $id=false) {
// ['err_seg'] = '|IK3*segID*segpos*loop*errcode*bht03syn|CTX-IK3*transID*segID*segpos*elempos
// |IK4*elempos*errcode*elem*|CTX-IK4*segID*segpos*elempos
//
// note: multiple IK3 segments are allowed in 997/999 x12
//
$ret_ar = array();
if ( !$err_seg || strpos($err_seg, 'IK3') === false) {
csv_edihist_log('edih_errseg_parse: invalid argument');
return $ret_ar;
}
//'|IK3*segID*segpos*loop*errcode*bht03syn|CTX-IK3*segID*segPos*loopLS*elemPos:compositePos:repPos
// revised: 123456789004*IK3*segID*segpos[*segID*segpos*segID*segpos]
$ik = explode('*', $err_seg);
foreach($ik as $i=>$k) {
switch((int)$i) {
case 0:$ret_ar['trace'] = $k; break;
case 1: break; // IK3
case 2: $ret_ar['id'][] = $k; break; // segment ID
case 3: $ret_ar['err'][] = $k; break; // segment position
case 4: $ret_ar['id'][] = $k; break;
case 5: $ret_ar['err'][] = $k; break;
case 6: $ret_ar['id'][] = $k; break;
case 7: $ret_ar['err'][] = $k; break;
}
}
//
return $ret_ar;
}
/**
* Order the csv data array according to the csv table heading row
* so the data to be added to csv table rows are correctly ordered
* the supplied data should be in an array with thie structure
* array['icn'] ['file'][i]['key'] ['claim'][i]['key'] ['type']['type']
*
* @uses csv_table_header()
*
* @param array data_ar data array from edih_XXX_csv_data()
* @return array|bool ordered array or false on error
*/
function edih_csv_order($csvdata) {
//
$wrcsv = array();
$order_ar = array();
//
foreach($csvdata as $icn=>$data) {
// [icn]['type']['file']['claim']
$ft = $data['type'];
$wrcsv[$icn]['type'] = $ft;
//
foreach($data as $key=>$val) {
if ($key == 'type') { continue; }
$order_ar[$icn][$key] = csv_table_header($ft, $key);
$ct = count($order_ar[$icn][$key]);
foreach($val as $k=>$rcrd) {
//
foreach($order_ar[$icn][$key] as $ky=>$vl) {
$wrcsv[$icn][$key][$k][$ky] = $rcrd[$vl];
}
}
}
}
return $wrcsv;
}
/**
* insert dashes in ten-digit telephone numbers
*
* @param string $str_val the telephone number
* @return string the telephone number with dashes
*/
function edih_format_telephone ($str_val) {
$strtel = (string)$str_val;
$strtel = preg_replace('/\D/', '', $strtel);
if ( strlen($strtel) != 10 ) {
csv_edihist_log('edih_format_telephone: invalid argument: '.$str_val);
return $str_val;
} else {
$tel = substr($strtel,0,3) . "-" . substr($strtel,3,3) . "-" . substr($strtel,6);
}
return $tel;
}
/**
* order MM DD YYYY values and insert slashes in eight-digit dates
*
* US MM/DD/YYYY or general YYYY-MM-DD
*
* @param string $str_val the eight-digit date
* @param string $pref if 'US' (default) anything else means YYYY-MM-DD
* @return string the date with slashes
*/
function edih_format_date ($str_val, $pref = "Y-m-d") {
$strdt = (string)$str_val;
$strdt = preg_replace('/\D/', '', $strdt);
$dt = '';
if (strlen($strdt) == 6) {
$tdy = date('Ymd');
if ($pref == "US") {
// assume mmddyy
$strdt = substr($tdy,0,2).substr($strdt,-2).substr($strdt,0,4);
} else {
// assume yymmdd
$strdt = substr($tdy,0,2).$strdt;
}
}
if ($pref == "US") {
$dt = substr($strdt,4,2) . "/" . substr($strdt,6) . "/" . substr($strdt,0,4);
} else {
$dt = substr($strdt,0,4) . "-" . substr($strdt,4,2) . "-" . substr($strdt,6);
}
return $dt;
}
/**
* format monetary amounts with two digits after the decimal place
*
* @todo add other formats
* @param string $str_val the amount string
* @return string the telephone number with dashes
*/
function edih_format_money ($str_val) {
//
if ($str_val || $str_val === '0') {
$mny = sprintf("$%01.2f", $str_val);
} else {
$mny = $str_val;
}
return $mny;
}
/**
* format percentage amounts with % sign
* typical example ".50" from x12 edi segment element
*
* @param string $str_val the amount string
* @return string the value as a percentage
*/
function edih_format_percent ($str_val) {
$val = (float)$str_val;
if (is_float($val)) {
$pct = $val*100 . '%';
} else {
$pct = $str_val.'%';
}
return $pct;
}
/**
* HTML string for table thead element
*
* @uses csv_table_header()
* @param string
* @param string
* @return string
*/
function csv_thead_html($file_type, $csv_type, $tblhd=null) {
//
if (is_array($tblhd) & count($tblhd) ) {
$hvals = $tblhd;
} else {
$hvals = csv_table_header($file_type, $csv_type);
}
if ( is_array($hvals) && count($hvals) ) {
$str_html = '';
} else {
return false;
}
$str_html .= "<thead>".PHP_EOL."<tr>".PHP_EOL;
foreach($hvals as $val) {
$str_html .="<th>$val</th>";
}
$str_html .= PHP_EOL."</tr>".PHP_EOL."</thead>".PHP_EOL;
//
return $str_html;
}
/**
* Give the column headings for the csv files
*
* @uses csv_file_type()
* @param string $file_type one of our edi types
* @param string $csv_type either 'file' or 'claim'
* @return array
*/
function csv_table_header($file_type, $csv_type) {
//
$ft = csv_file_type($file_type);
$ct = strpos('|file', $csv_type) ? 'file' : $csv_type;
$ct = strpos('|claim', $ct) ? 'claim' : $ct;
//
$hdr = array();
if (!$ft || !$ct ) {
csv_edihist_log ('csv_table_header error: incorrect file ['.$file_type.']or csv ['.$csv_type.'] type');
return $hdr;
}
//
if ($ct === 'file') {
switch((string)$ft) {
//case 'ack': $hdr = array('Date', 'FileName', 'isa13', 'ta1ctrl', 'Code'); break;
//case 'ebr': $hdr = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch'); break;
//case 'ibr': $hdr = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch'); break;
//
case 'f837': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'x12Partner'); break;
case 'ta1': $hdr = array('Date', 'FileName', 'Control', 'Trace', 'Code'); break;
case 'f997': $hdr = array('Date', 'FileName', 'Control', 'Trace', 'RspType', 'RejCt'); break;
case 'f276': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'x12Partner'); break;
case 'f277': $hdr = array('Date', 'FileName', 'Control', 'Accept', 'AccAmt', 'Reject', 'RejAmt'); break;
case 'f270': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'x12Partner'); break;
case 'f271': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'Reject', 'Payer'); break;
case 'f278': $hdr = array('Date', 'FileName', 'Control', 'TrnCount', 'Auth', 'Payer'); break;
case 'f835': $hdr = array('Date', 'FileName', 'Control', 'Trace', 'Claim_ct', 'Denied', 'Payer'); break;
}
} elseif ($ct === 'claim') {
switch((string)$ft) {
//case 'ebr': $hdr = array('PtName','SvcDate', 'CLM01', 'Status', 'Batch', 'FileName', 'Payer'); break;
//case 'ibr': $hdr = array('PtName','SvcDate', 'CLM01', 'Status', 'Batch', 'FileName', 'Payer'); break;
//case 'dpr': $hdr = array('PtName','SvcDate', 'CLM01', 'Status', 'Batch', 'FileName', 'Payer'); break;
//
case 'f837': $hdr = array('PtName', 'SvcDate', 'CLM01', 'InsLevel', 'BHT03', 'FileName', 'Fee', 'PtPaid', 'Provider' ); break;
case 'f997': $hdr = array('PtName', 'RspDate', 'Trace', 'Status', 'Control', 'FileName', 'RspType', 'err_seg'); break;
case 'f276': $hdr = array('PtName', 'SvcDate', 'CLM01', 'ClaimID', 'BHT03', 'FileName', 'Payer', 'Trace'); break;
case 'f277': $hdr = array('PtName', 'SvcDate', 'CLM01', 'Status', 'BHT03', 'FileName', 'Payer', 'Trace'); break;
case 'f270': $hdr = array('PtName', 'ReqDate', 'Trace', 'InsBnft', 'BHT03', 'FileName', 'Payer'); break;
case 'f271': $hdr = array('PtName', 'RspDate', 'Trace', 'Status', 'BHT03', 'FileName', 'Payer'); break;
case 'f278': $hdr = array('PtName', 'FileDate', 'Trace', 'Status', 'BHT03', 'FileName', 'Auth', 'Payer'); break;
case 'f835': $hdr = array('PtName', 'SvcDate', 'CLM01', 'Status', 'Trace', 'FileName', 'ClaimID', 'Pmt', 'PtResp', 'Payer'); break;
}
} else {
// unexpected error
csv_edihist_log ('edih_csv_table_header() error: failed to match file type ['.$ft.'] or csv type ['.$ct.']');
return false;
}
if (count($hdr) ) {
return $hdr;
} else {
return false;
}
}
/*
function csv_files_header($file_type, $csv_type) {
//
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log('csv_files_header: incorrect type '.$file_type);
return false;
}
if (!strpos('|file|claim', $csv_type) ) {
csv_edihist_log('csv_files_header error: incorrect csv type '.$csv_type);
return false;
}
//
$ft = strpos('|277', $file_type) ? 'f277' : $file_type;
$ft = strpos('|835', $file_type) ? 'era' : $ft;
$ft = strpos('|837', $file_type) ? 'batch' : $ft;
$ft = strpos('|999|997|ack|ta1', $file_type) ? 'f997' : $ft;
//
$csv_hd_ar = array();
// dataTables: | 'date' | 'file_name (link)' | 'file_text (link fmt)' | 'claim_ct' | 'reject_ct' |
$csv_hd_ar['ack']['file'] = array('Date', 'FileName', 'isa13', 'ta1ctrl', 'Code');
$csv_hd_ar['ebr']['file'] = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch');
$csv_hd_ar['ibr']['file'] = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch');
//
// dataTables: | 'date' | 'file_name (link)' | 'file_text (link fmt)' | 'claim_ct' | 'partner' |
$csv_hd_ar['batch']['file'] = array('Date', 'FileName', 'Ctn_837', 'claim_ct', 'x12_partner');
$csv_hd_ar['ta1']['file'] = array('Date', 'FileName', 'Ctn_ta1', 'ta1ctrl', 'Code');
$csv_hd_ar['f997']['file'] = array('Date', 'FileName', 'Ctn_999', 'ta1ctrl', 'RejCt');
$csv_hd_ar['f277']['file'] = array('Date', 'FileName', 'Ctn_277', 'Accept', 'AccAmt', 'Reject', 'RejAmt');
$csv_hd_ar['f270']['file'] = array('Date', 'FileName', 'Ctn_270', 'claim_ct', 'x12_partner');
$csv_hd_ar['f271']['file'] = array('Date', 'FileName', 'Ctn_271', 'claim_ct', 'Denied', 'Payer');
$csv_hd_ar['era']['file'] = array('Date', 'FileName', 'Trace', 'claim_ct', 'Denied', 'Payer');
//
// dataTables: | 'pt_name' | 'svc_date' | 'clm01 (link clm)' | 'status (mouseover)' | b f t (links to files) | message (mouseover) |
$csv_hd_ar['ebr']['claim'] = array('PtName','SvcDate', 'clm01', 'Status', 'Batch', 'FileName', 'Payer');
$csv_hd_ar['ibr']['claim'] = array('PtName','SvcDate', 'clm01', 'Status', 'Batch', 'FileName', 'Payer');
$csv_hd_ar['dpr']['claim'] = array('PtName','SvcDate', 'clm01', 'Status', 'Batch', 'FileName', 'Payer');
//
// dataTables: | 'pt_name' | 'svc_date' | 'clm01 (link clm)' | 'status (mouseover)' | 'bht03_837 (link rsp)' | message (mouseover) |
$csv_hd_ar['batch']['claim'] = array('PtName', 'SvcDate', 'clm01', 'InsLevel', 'Ctn_837', 'File_837', 'Fee', 'PtPaid', 'Provider' );
$csv_hd_ar['f997']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'ak_num', 'File_997', 'Ctn_837', 'err_seg');
$csv_hd_ar['f277']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'st_277', 'File_277', 'payer_name', 'claim_id', 'bht03_837');
$csv_hd_ar['f270']['claim'] = array('PtName', 'SvcDate', 'clm01', 'InsLevel', 'st_270', 'File_270', 'payer_name', 'bht03_270');
$csv_hd_ar['f271']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'st_271', 'File_271', 'payer_name', 'bht03_270');
$csv_hd_ar['era']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'trace', 'File_835', 'claimID', 'Pmt', 'PtResp', 'Payer');
//
return $csv_hd_ar[$ft][$csv_type];
}
*/
/**
* adapted from http://scratch99.com/web-development/javascript/convert-bytes-to-mb-kb/
*
* @param int
*
* @return string
*/
function csv_convert_bytes($bytes) {
$sizes = array('Bytes', 'KB', 'MB', 'GB', 'TB');
if ($bytes == 0) { return 'n/a'; }
$i = floor( log($bytes) / log(1024) );
//$i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)));
if ($i == 0) {
return $bytes.' '.$sizes[$i];
} else {
return round($bytes / pow(1024, $i), 1).' '.$sizes[$i];
}
}
/**
* Determine whether an array is multidimensional
*
* @param array
* @return bool false if arrayis multidimensional
*/
function csv_singlerecord_test ( $array ) {
// the two versions of count() are compared
// if the array has a sub-array, count recursive is greater
if ( is_array($array) ) {
$is_sngl = count($array, COUNT_RECURSIVE ) == count( $array, COUNT_NORMAL);
} else {
$is_sngl = false;
}
//
return $is_sngl;
}
/*
* give first and last index keys for an array
*
* @param array
* @return array
*/
function csv_array_bounds($array) {
// get the segment array bounds
$ret_ar = array();
if (is_array($array) && count($array)) {
if (reset($array) !== false) { $ret_ar[0] = key($array); }
if (end($array) !== false) { $ret_ar[1] = key($array); }
}
return $ret_ar;
}
/*
* return a csv file as an associative array
* the first row is the header or array keys for the row
* array structure:
* array[i]=>array(hdr0=>csvrow[0], hdr1=>csvrow[1], hdr2=>csvrow[2], ...)
*
* @param string file type e.g. f837
* @param string csv type claim or file
* @return array
*/
function csv_assoc_array($file_type, $csv_type) {
//
if (!$file_type || !$csv_type) {
csv_edihist_log('csv_assoc_array; invalid arguments ft: '.$file_type.' csvt: '.$csv_type);
return false;
}
$csv_ar = array();
$h = array();
$fp = '';
//
$param = csv_parameters($file_type);
$fcsv = (strpos($csv_type, 'aim')) ? 'claims_csv' : 'files_csv';
//
$fp = (isset($param[$fcsv])) ? $param[$fcsv] : '';
if (!is_file($fp)) {
csv_edihist_log('csv_assoc_array; invalid csv file '.basename($fp));
return $csv_ar;
}
$ct = 0;
$row = 0;
$ky = -1;
if (($fh = fopen($fp, "rb")) !== false) {
while (($data = fgetcsv($fh, 2048, ",")) !== false) {
if ( is_null($data) ) { continue; }
if ($row) {
for($i=0; $i<$ct; $i++) {
$csv_ar[$ky][$h[$i]] = $data[$i];
}
} else {
$ct = count($data);
$h = $data;
}
$row++;
$ky++;
}
fclose($fh);
} else {
// invalid file path
csv_edihist_log('csv_assoc_array; invalid file path '.$fp);
return false;
}
//
return $csv_ar;
}
/**
* A multidimensional array will be flattened to a single row.
*
* @param array $array array to be flattened
* @return array
*/
function csv_array_flatten($array) {
//
if (!is_array($array)) {return FALSE;}
$result = array();
foreach ($array as $key => $value) {
if (is_array($value)) {
$result = array_merge($result, csv_array_flatten($value));
} else {
$result[$key] = $value;
}
}
return $result;
}
/**
* Write parsed data from edi x12 files to csv file
*
* @uses csv_parameters()
* @usescsv_table_header()
*
* @param array data array from parse functions
* @return bool true if no error
*/
function edih_csv_write($csv_data) {
//
if ( ! (is_array($csv_data) && count($csv_data)) ){
csv_edihist_log('edih_csv_write(): invalid data array');
return false;
}
//
foreach($csv_data as $icn=>$isa) {
// should be array[icn] => [file][j][key] [claim][j][key] [type]
$ft = ( isset($isa['type']) ) ? $isa['type'] : '';
if (!$ft) {
csv_edihist_log('edih_csv_write(): invalid file type');
continue;
}
//
$param = csv_parameters($ft);
$f_hdr = csv_table_header($ft, 'file');
$c_hdr = csv_table_header($ft, 'claim');
if (is_array($param)) {
// if either csv files does not exist, create them both
// all unlisted files in type directory will be processed on next process round
if (is_file($param['files_csv']) && (filesize($param['files_csv']) > 20)) {
csv_edihist_log('edih_csv_write: csv check for files csv '.$ft);
} else {
$nfcsv = $param['files_csv'];
$fh = fopen($nfcsv, 'wb');
if ($fh !== false) {
fputcsv($fh, $f_hdr);
fclose($fh);
chmod($nfcsv, 0600);
}
csv_edihist_log('edih_csv_write: created files_csv file for '.$ft);
}
if (is_file($param['claims_csv']) && filesize($param['claims_csv'])) {
csv_edihist_log('edih_csv_write: csv check for claims csv '.$ft);
} else {
$nfcsv = $param['claims_csv'];
$fh = fopen($nfcsv, 'wb');
if ($fh !== false) {
fputcsv($fh, $c_hdr);
fclose($fh);
chmod($nfcsv, 0600);
}
csv_edihist_log('edih_csv_write: created claims_csv file for '.$ft);
}
} else {
csv_edihist_log('edih_csv_write: parameters error for type '.$ft);
return false;
}
//
foreach($isa as $key=>$data) {
if ($key == 'type') { continue; }
// get the csv file path from parameters
$fp = ($key == 'file') ? $param['files_csv'] : $param['claims_csv'];
// get the csv row header
$order_ar = ($key == 'file') ? $f_hdr : $c_hdr;
$ct = count($order_ar);
$chrs = 0;
$rws = 0;
//
$fh = fopen( $fp, 'ab');
if (is_resource($fh)) {
// to assure proper order of data in each row, the
// csv row is assembled by matching keys to the header row
foreach($data as $ky=>$row) {
$csvrow = array();
for ($i=0; $i<$ct; $i++) {
$csvrow[$i] = $row[$order_ar[$i]];
}
$chrs += fputcsv ( $fh , $csvrow );
$rws++;
}
} else {
csv_edihist_log('edih_csv_write(): failed to open '.$fp);
return false;
}
//
csv_edihist_log('edih_csv_write() wrote '.$rws.' rows to '.basename($fp));
}
}
//
return $rws;
}
/**
* Search a csv record file and return the row or values from selected columns
*
* This function requires that the $search_ar parameter be an array
* with keys ['s_val']['s_col']['r_cols'], and 'r_cols' is an array
* 's_val' is the search value, s_col is the column to check, r_cols is an array
* of column numbers from which values are returned. If r_cols is not an array,
* then the entire row will be returned. If the 'expect' parameter is 1, then
* the search will stop after the first success and return the result. Otherwise, the
* entire file will be searched.
* ex: csv_search_record('batch', 'claim', array('s_val'=>'0024', 's_col'=>9, 'r_cols'=>array(1, 2, 7)), "1" )
*
* @uses csv_parameters()
* @param string $file_type
* @param string $csv_type
* @param array $search_ar
* @param mixed $expect
* @return array
*/
function csv_search_record($file_type, $csv_type, $search_ar, $expect='1') {
//
csv_edihist_log("csv_search_record: ".strval($file_type)." ".strval($csv_type)." ".strval($search_ar['s_val']));
//
$tp = csv_file_type($file_type);
if (!$tp) {
csv_edihist_log("csv_search_record: incorrect type $file_type");
return false;
}
//
$params = csv_parameters($tp);
//
if ($csv_type == 'claim') {
$fp = $params['claims_csv'];
} elseif ($csv_type == 'file') {
$fp = $params['files_csv'];
} else {
csv_edihist_log('csv_search_record: incorrect csv type '.$csv_type);
return FALSE;
}
//
if (!is_array($search_ar) || array_keys($search_ar) != array('s_val', 's_col', 'r_cols')) {
csv_edihist_log('csv_search_record: invalid search criteria');
return FALSE;
}
$sv = $search_ar['s_val'];
$sc = $search_ar['s_col'];
$rv = (is_array($search_ar['r_cols']) && count($search_ar['r_cols'])) ? $search_ar['r_cols'] : 'all';
$ret_ar = array();
$idx = 0;
if (($fh1 = fopen($fp, "r")) !== false) {
while (($data = fgetcsv($fh1)) !== false) {
// check for a match
if ($data[$sc] == $sv) {
if ($rv == 'all') {
$ret_ar[$idx] = $data;
} else {
// now loop through the 'r_cols' array for data index
$dct = count($data);
foreach($rv as $c) {
// make sure we don't access a non-existing index
if ($c >= $dct) { continue; }
//
$ret_ar[$idx][] = $data[$c];
}
}
$idx++;
if ($expect == '1') { break; }
}
}
fclose($fh1);
} else {
csv_edihist_log('csv_search_record: failed to open '.$fp);
return false;
}
if (empty($ret_ar) ) {
return false;
} else {
return $ret_ar;
}
}
/**
* Search the 'claims' csv table for the patient control and find the associated file name
*
* Searchtype
* In 'claims' csv tables, clm01 is position 2, ISA13 number is pos 4, and filename is pos 5;
* Since we are interested usually in the filename, ISA13 is irrelevant usually.
*
* @uses csv_parameters()
* @uses csv_pid_enctr_parse()
* @param string patient control-- pid-encounter, encounter, or pid
* @param string filetype -- x12 type or f837, f277, etc
* @param string search type encounter, pid, or clm01
* @return array|bool [i] data row array or empty on error
*/
function csv_file_by_enctr($clm01, $filetype='f837') {
//
// return array of [i](pid_encounter, filename), there may be more than one file
//
if (!$clm01) {
return 'invalid encounter data<br>' . PHP_EOL;
}
//
$ret_ar = array();
$ft = csv_file_type($filetype);
//
if (!$ft) {
csv_edihist_log('csv_file_by_enctr: incorrect file type '.$filetype);
return $ret_ar;
} else {
$params = csv_parameters($ft);
//$fp = isset($params['claims_csv']) ? dirname(__FILE__).$params['claims_csv'] : false;
$fp = isset($params['claims_csv']) ? $params['claims_csv'] : false;
$h_ar = csv_table_header($ft, 'claim');
$hct = count($h_ar);
if (!$fp) {
csv_edihist_log('csv_file_by_enctr: incorrect file type '.$filetype);
return $ret_ar;
}
}
//
$enct = csv_pid_enctr_parse(strval($clm01));
$p = (isset($enct['pid'])) ? $enct['pid'] : '';
$e = (isset($enct['enctr'])) ? $enct['enctr'] : '';
if ($p && $e) {
$pe = $p.'-'.$e;
$srchtype = '';
} elseif ($e) {
$srchtype = 'encounter';
} elseif ($p) {
$srchtype = 'pid';
} else {
csv_edihist_log('csv_file_by_enctr: unable to determine encounter value '.$clm01);
return 'unable to determine encounter value '.$clm01.'<br />'.PHP_EOL;
}
// OpenEMR creates CLM01 as nnn-nnn in genX12 batch
//$pm = preg_match('/\D/', $enctr, $match2, PREG_OFFSET_CAPTURE);
$val = array();
//array_combine ( array $keys , array $values )
// in 'claims' csv tables, clm01 is position 2 and filename is position 5
if (($fh1 = fopen($fp, "r")) !== FALSE) {
if ($srchtype == 'encounter') {
while (($data = fgetcsv($fh1, 1024, ",")) !== FALSE) {
// check for a match
if (strpos($data[2], $e)) {
$te = substr($data[2], strpos($data[2],'-')+1);
if (strcmp($te, $e) === 0) {
for ($i=0; $i<$hct; $i++) { $val[$h_ar[$i]] = $data[$i]; }
$ret_ar[] = $val; // array_combine($h_ar, $data);
}
}
}
} elseif ($srchtype == 'pid') {
while (($data = fgetcsv($fh1, 1024, ',')) !== FALSE) {
if (strpos($data[2], $p) !== false) {
$te = (strpos($data[2], '-')) ? substr($data[2], 0, strpos($data[2],'-')) : '';
if (strcmp($te, $p) === 0) {
for ($i=0; $i<$hct; $i++) { $val[$h_ar[$i]] = $data[$i]; }
$ret_ar[] = $val; // $ret_ar[] = array_combine($h_ar, $data);
}
}
}
} else {
while (($data = fgetcsv($fh1, 1024, ",")) !== FALSE) {
// check for a match
if ( strcmp($data[2], $pe) === 0 ) {
for ($i=0; $i<$hct; $i++) { $val[$h_ar[$i]] = $data[$i]; }
$ret_ar[] = $val; // $ret_ar[] = array_combine($h_ar, $data);
}
}
}
fclose($fh1);
} else {
csv_edihist_log('csv_file_by_enctr: failed to open csv file '.basename($fp));
return false;
}
return $ret_ar;
}
/**
* get the x12 file containing the control_num ISA13
*
* @todo the csv for x12 files 999, 277, 835, 837 must have the control number
*
* @uses csv_search_record()
* @param string $control_num the interchange control number, isa13
* @return string the file name
*/
function csv_file_by_controlnum($type, $control_num) {
// get the batch file containing the control_num
//
$tp = csv_file_type($type);
//
$hdr = csv_table_header($tp, 'file');
$scol = array_search('Control', $hdr);
$rcol = array_search('FileName', $hdr);
//
// $search_ar should have keys ['s_val']['s_col'] array(['r_cols'][])
// like "batch', 'claim, array(9, '0024', array(1, 2, 7))
//$csv_hd_ar['batch']['file'] = array('time', 'file_name', 'control_num', 'claims', 'x12_partner', 'x12_version');
//
$fn = '';
$ctln = (strlen($control_num) >= 9) ? substr($control_num, 0, 9) : $control_num;
$search = array('s_val'=>$ctln, 's_col'=>$scol, 'r_cols'=>array($rcol));
$result = csv_search_record($tp, 'file', $search, "1");
if (is_array($result) && count($result[0]) == 1) {
$fn = $result[0][0];
}
return $fn;
}
/**
* Search the csv table to obtain the file name for a given
* trace value (835 / 997 999 type only)
*
* Note: the 997/999 trace is the ISA13 of a batch file
*
*
* @param string trace value (TRN02, TA101, or BHT03)
* @param string from type (default is f835)
* @param string to type (default is f835)
* @return string file name or empty string
*/
function csv_file_by_trace($trace, $from_type='f835', $to_type='f837') {
// get the file referenced by the trace value
//
$ft = ($from_type) ? csv_file_type($from_type) : '';
$tt = ($to_type) ? csv_file_type($to_type) : '';
$fn = '';
$csv_type = '';
$type = '';
$search = array();
//
csv_edihist_log("csv_file_by_trace: $trace from $ft to $tt");
//
// $search_ar should have keys ['s_val']['s_col'] array(['r_cols'])
// like "f837', 'claim, array(9, '0024', array(1, 2, 7))
//
if ($ft == 'f835') {
// trace payment to status or claim
$search = array('s_val'=>$trace, 's_col'=>3, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'file';
} elseif ($ft == 'f997') {
// trace ACK to batch file
$icn = (is_numeric($trace) && strlen($trace) >= 9) ? substr($trace, 0, 9) : $trace;
$search = array('s_val'=>$icn, 's_col'=>2, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'file';
} elseif ($ft == 'f277') {
// trace status to status req or claim
if ($tt == 'f276') {
$search = array('s_val'=>$trace, 's_col'=>7, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'claim';
} elseif ($tt == 'f837') {
// expect CLM01 for trace value
$search = array('s_val'=>$trace, 's_col'=>2, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'claim';
}
} elseif ($ft == 'f271') {
// trace benefit to benefit req
if ($tt == 'f270') {
$search = array('s_val'=>$trace, 's_col'=>2, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'claim';
}
} elseif ($ft == 'f278') {
// trace auth to auth req
$search = array('s_val'=>$trace, 's_col'=>2, 'r_cols'=>'All');
$type = 'f278';
$csv_type = 'claim';
} else {
csv_edihist_log('csv_file_by_trace: incorrect file type '.$file_type);
return $fn;
}
//
if ($type && $csv_type && $search) {
$result = csv_search_record($type, $csv_type, $search, false);
if (is_array($result) && count($result)) {
if ($ft == 'f278') {
foreach($result as $r) {
if ($r[6] == 'Rsp' || $r[6] == 'Reply') {
$fn = $result[0][5];
break;
}
}
} elseif ($csv_type == 'claim') {
$fn = $result[0][5];
} else {
$fn = $result[0][1];
}
} else {
csv_edihist_log("csv_file_by_trace: search failed $type csv $csv_type for trace $trace $from_type $to_type");
}
} else {
csv_edihist_log("csv_file_by_trace: error type $type csv $csv_type for trace $trace $from_type $to_type");
}
return $fn;
}
/**
* list claim records with Denied or Reject status in given file
*
* @param string
* @param string
*
* @return array
*/
function csv_denied_by_file($filetype, $filename, $trace='') {
//
$ret_ar = array();
$ft = csv_file_type($filetype);
if (strpos('|f997|f271|f277|f835', $ft)) {
$param = csv_parameters($ft);
$csv_file = $param['claims_csv'];
} else {
csv_edihist_log("csv_errors_by_file: incorrect file type $filetype");
return $ret_ar;
}
//
csv_edihist_log("csv_errors_by_file: $ft searching $filename with trace $trace");
//
if (($fh1 = fopen($csv_file, "r")) !== false) {
if ($ft == 'f835') {
while (($data = fgetcsv($fh1, 1024, ",")) !== false) {
// check filename, then status
if ($trace) {
if ($data[4] == $trace) {
if (!in_array($data[3], array('1', '2', '3', '19', '20', '21')) ) { $ret_ar[] = $data; }
}
} elseif ($data[5] == $filename) {
if (!in_array($data[3], array('1', '2', '3', '19', '20', '21')) ) { $ret_ar[] = $data; }
}
}
} elseif ($ft == 'f277') {
while (($data = fgetcsv($fh1, 1024, ",")) !== false) {
if ($data[5] == $filename) {
if ( !strpos('|A1|A2|A5', substr($data[3], 0, 2))) {
$ret_ar[] = $data;
}
}
}
} elseif (strpos('|f997|f999|f271', $ft)) {
while (($data = fgetcsv($fh1, 1024, ",")) !== false) {
if ($data[5] == $filename) {
if ($data[3] !== 'A') {
$ret_ar[] = $data;
}
}
}
} else {
csv_edihist_log("csv_errors_by_file: file type did not match $filetype");
}
fclose($fh1);
}
//
return $ret_ar;
}
/**
* A function to try and assure the pid-encounter is correctly parsed
*
* assume a format of pid-encounter, since that is sent in the OpenEMR x12 837
*
* @param string $pid_enctr the value from element CLM01
* return array array('pid' => $pid, 'enctr' => $enc)
*/
function csv_pid_enctr_parse( $pid_enctr ) {
// evaluate the patient account field
//
if (!$pid_enctr || !is_string($pid_enctr) ) {
csv_edihist_log("csv_pid_enctr_parse: invalid argument");
return false;
}
$pval = trim($pid_enctr);
if ( strpos($pval, '-') ) {
$pid = substr($pval, 0, strpos($pval, '-'));
$enc = substr($pval, strpos($pval, '-')+1);
} elseif ( ctype_digit($pval) ) {
if ( preg_match('/(19|20)\d{2}[01]\d{1}[0-3]\d{1}/', $pval) ) {
$enc = $pval;
} else {
$enc = ( strlen($pval) ) >= ENCOUNTER_MIN_DIGIT_LENGTH ? $pval : '';
$pid = '';
}
} elseif ( preg_match('/\D/', $pval, $match2, PREG_OFFSET_CAPTURE) ) {
$inv_split = (count($match2)) ? preg_split('/\D/', $pval, 2, PREG_SPLIT_NO_EMPTY) : false;
if ($inv_split) {
$pid = $inv_split[0];
$enc = $inv_split[1];
}
} else {
$enc = ( strlen($pval) ) >= ENCOUNTER_MIN_DIGIT_LENGTH ? $pval : '';
$pid = '';
}
return array('pid' => $pid, 'enctr' => $enc);
}
|
<?php
/**
* edih_csv_inc.php
*
* Copyright 2012 Kevin McCormick
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 3 or later. You should have
* received a copy of the GNU General Public License along with this program;
* if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* <http://opensource.org/licenses/gpl-license.php>
*
* @author Kevin McCormick
* @link: http://www.open-emr.org
* @package OpenEMR
* @subpackage ediHistory
*/
/*
* The purpose of this file is to hold functions of general utility for
* my edi_claim_history project. It began as a php "class" but I am now
* thinking that instantiating the class is too much bother and probably
* a waste of memory, since the contents of the file have to be read into
* memory anyway.
*
* <pre>
* ******* important *********
* function csv_parameters($type="ALL")
* This function must have the correct values or nothing will work
* function csv_verify_file( $file_path, $type, $val_array=FALSE )
* critical for file verification and x12 parsing
* function (in ibr_uploads.php) ibr_upload_match_file($param_ar, $fidx, &$html_str)
* contains a regular expression that must be correct
*
* Also, the constant IBR_HISTORY_DIR must be correct
* **************************
* </pre>
*
* The claim_history x12 files are claim (837) acknowledgement (997/999) claim status (277) and claim payment (835)
* Also eligibility request (270) and eligibility response (271)
*
* <pre>
* Basic workflow:
* Each file type has a row in the array from csv_paramaters()
* type directory files_csv claims_csv column regex
*
* 1. open submitted file in edih_x12_class to verify and produce properties
* 2. Read the parameters array and choose the parameters using 'type'
* 2. Search the matched type 'directory' for the filename files matching the 'regex' regular expressions and
* compare the results to the files listed in the 'files_csv' files.csv record -- unmatched files are "new"
* 3. Each "new" x12 file should be read by csv_x12_segments -- returns array('path', 'delimiters', 'segments')
* ibr, ebr, ack -- basically Availity formats have their own read functions
* 4. Pass the array to various functions which parse for claims information
* 5. Write the results to files.csv or claims.csv and create html output for display
*
* 6. Other outputs as called for in ibr_history.php -- from user input from claim_history.html
* </pre>
*
* Key usability issue is the "new" files are in the users home directory -- downloaded there
* while the OpenEMR is on the server -- so there is a basic issue of access to the files
*
* The ibr_uploads.php script handles uploads of zip archives or multiple file uploads
*
* The csv data files are just php written .csv files, so anything different may cause errors
* You can open and edit them in OpenOffice, but you must save them in "original format"
*
* TO_DO Some type of "find in files" search would be helpful for locating all references to a claim, patient, etc.
* [ grep -nHIrF 'findtext']
*
* TO_DO functions to zip old files, put them aside, and remove them from csv tables
*/
///**
// * a security measure to prevent direct web access to this file
// */
// if (!defined('SITE_IN')) die('Direct access not allowed!');
// $GLOBALS['OE_EDIH_DIR'] $GLOBALS['OE_SITE_DIR']
/* *********** GLOBALS used for testing only **********
*/
// //$GLOBALS['OE_SITE_DIR'].'/edi/history';
//$OE_SITES_BASE = $GLOBALS['OE_SITE_DIR'];
//$OE_SITE_DIR = $OE_SITES_BASE.'/testing';
//$OE_EDIH_DIR = $OE_SITE_DIR.'/edi/history';
/* ***********
*/
/**
* Constant that is checked in included files to prevent direct access.
* concept taken from Joomla
*/
define('_EDIH', 1);
//DIRECTORY_SEPARATOR;
if (!defined('DS')) define('DS', DIRECTORY_SEPARATOR);
/**
* Log messages to the log file
*
* @param string $msg_str the log message
* @return int number of characters written
*/
function csv_edihist_log ( $msg_str ) {
//
//$dir = dirname(__FILE__).DS.'log';
//$dir = $GLOBALS['OE_EDIH_DIR'].DS.'log';
//$logfile = $GLOBALS['OE_EDIH_DIR'] . "/log/edi_history_log.txt";
$logfile = 'edih_log_'.date('Y-m-d').'.txt';
$dir = csv_edih_basedir().DS.'log';
$rslt = 0;
if ( is_string($msg_str) && strlen($msg_str) ) {
$tm = date('Ymd:Hms') . ' ' . $msg_str . PHP_EOL;
//
$rslt = file_put_contents($dir.DS.$logfile, $tm, FILE_APPEND);
} else {
//
$fnctn = debug_backtrace(DEBUG_BACKTRACE_IGNORE_ARGS, 2)[1]['function'];
csv_edihist_log ('invalid message string '.$fnctn);
}
//
return $rslt; // number of characters written
}
/**
* read the edi_history_log.txt file into an
* html formatted ordered list
*
* @return string
*/
function csv_log_html($logname='') {
check_file_dir_name($logname);
$html_str = "<div class='filetext'>".PHP_EOL."<ol class='logview'>".PHP_EOL;
$fp = csv_edih_basedir().DS.'log'.DS.$logname;
if ( is_file($fp) ) {
$fh = fopen( $fp, 'r');
if ($fh !== FALSE) {
while (($buffer = fgets($fh)) !== false) {
$html_str .= "<li>".$buffer."</li>".PHP_EOL;
}
$html_str .= "</ol>".PHP_EOL."</div>".PHP_EOL;
if (!feof($fh)) {
$html_str .= "<p>Error in logfile: unexpected file ending</p>".PHP_EOL;
}
fclose($fh);
} else {
$html_str = "<p>Error: unable to open log file</p>".PHP_EOL;
}
}
return $html_str;
}
/**
* list log files and store old logs in an archive
*
* @param bool
* @return array (json)
*/
function csv_log_manage($list=true) {
//
//$dir = dirname(__FILE__).DS.'log';
$dir = csv_edih_basedir().DS.'log';
$list_ar = array();
$old_ar = array();
$lognames = scandir($dir);
if ($list) {
foreach($lognames as $log) {
if (!strpos($log, '_log_')) { continue; }
$list_ar[] = $log;
}
$s = (count($list_ar)) ? rsort($list_ar) : false;
//
return json_encode($list_ar);
//
} else {
// list is false, must be archive
$datetime1 = date_create(date('Y-m-d'));
//
foreach($lognames as $log) {
if ($log == '.' || $log == '..') { continue; }
//
$pos1 = strrpos($log, '_');
if ($pos1) {
$ldate = substr($log, $pos1+1, 10);
$datetime2 = date_create($ldate);
$interval = date_diff($datetime1, $datetime2);
//echo '== date difference '.$ldate.' '.$interval->format('%R%a days').PHP_EOL;
if ($interval->format('%R%a') < -7) {
// older log files are put in zip archive
if ( is_file($dir.DS.$log) ) { $old_ar[] = $log; }
}
}
}
}
//
$ok = false;
$archname = $dir.DS.'edih-log-archive.zip';
$filelimit = 200;
//
if (count($old_ar)) {
$zip = new ZipArchive;
if (is_file($archname)) {
$ok = $zip->open($archname, ZipArchive::CHECKCONS);
} else {
$ok = $zip->open($archname, ZipArchive::CREATE);
}
//
if ($ok) {
if ($zip->numFiles >= $filelimit) {
$zip->close();
$dte = $datetime1->format('Ymd');
$ok = rename($dir.DS.$archname, $dir.DS.$dte.'_'.$archname);
csv_edihist_log('csv_log_archive: rename full archive '.$dte.'_'.$archname );
if ($ok) {
$ok = $zip->open($archname, ZipArchive::CREATE);
if (!$ok) {
csv_edihist_log('csv_log_archive: cannot create '.$archname);
}
} else {
csv_edihist_log('csv_log_archive: cannot rename '.$archname);
}
}
//
if ($ok) {
foreach($old_ar as $lg) {
if (is_file($dir.DS.$lg)) {
$a = $zip->addFile($dir.DS.$lg, $lg);
if ($a) {
csv_edihist_log('csv_log_archive: add to archive '.$lg );
} else {
csv_edihist_log('csv_log_archive: error archiving '.$lg );
}
}
}
$c = $zip->close();
if ($c) {
foreach($old_ar as $lg) {
$u = unlink($dir.DS.$lg);
if ($u) {
continue;
} else {
csv_edihist_log('csv_log_archive: error removing '.$dir.DS.$lg);
}
}
} else {
csv_edihist_log('csv_log_archive: error closing log file archive');
}
} else {
csv_edihist_log('csv_log_manage: error failed to open '.$archname);
}
}
}
//
return json_encode($old_ar);
}
/**
* open or save a user notes file
*
* @param string
* @param bool
* @return string
*/
function csv_notes_file($content='', $open=true) {
//
$str_html = '';
//$fp = $GLOBALS['OE_EDIH_DIR'].'/edi_notes.txt';
$fp = csv_edih_basedir().DS.'archive'.DS.'edi_notes.txt';
if (! is_writable($fp) ) {
$fh = fopen( $fp, 'a+b');
fclose($fh);
}
// for retrieving notes
if ($open) {
// if contents were previously deleted by user and file is empty,
// the text 'empty' is put in content in save operation
$ftxt = file_get_contents($fp);
if ($ftxt === false) {
$str_html .= 'csv_notes_file: file error <br>'.PHP_EOL;
csv_edihist_log('csv_notes_file: file error');
}
if (substr($ftxt, 0, 5) == 'empty' && strlen($ftxt) == 5) {
$ftxt = '## '. date("F j, Y, g:i a");
} elseif (!$ftxt) {
$ftxt = '## '. date("F j, Y, g:i a");
}
$str_html .= PHP_EOL.$ftxt.PHP_EOL;
// next stanza for saving content
} elseif (strlen($content)) {
//echo "csv_notes_file: we have content<br>".PHP_EOL;
// use finfo php class
if ( class_exists('finfo') ) {
$finfo = new finfo(FILEINFO_MIME);
$mimeinfo = $finfo->buffer($content);
if ( strncmp($mimeinfo, 'text/plain; charset=us-ascii', 28) !== 0 ) {
csv_edihist_log('csv_notes_file: invalid mime-type '.$mimeinfo);
$str_html = 'csv_notes_file: invalid mime-type <br>'.$mimeinfo;
//
return $str_html;
}
} elseif (preg_match('/[^\x20-\x7E\x0A\x0D]|(<\?)|(<%)|(<asp)|(<ASP)|(#!)|(\$\{)|(<scr)|(<SCR)/', $content, $matches, PREG_OFFSET_CAPTURE)) {
csv_edihist_log('csv_notes_file: Filtered character in file content -- character: '.$matches[0][0].' position: '.$matches[0][1]);
$str_html .= 'Filtered character in file content not accepted <br>'. PHP_EOL;
$str_html .= ' character: ' . $matches[0][0] . ' position: ' . $matches[0][1] . '<br>' . PHP_EOL;
//
return $str_html;
}
} else {
$ftxt = ($content) ? $content : 'empty';
$saved = file_put_contents($fp, $ftxt);
$str_html .= ($saved) ? '<p>Save Error with notes file</p>' : '<p>Notes content saved</p>';
}
//
return $str_html;
}
/**
* generates path to edi history files
*
* @return string|bool directory path
*/
function csv_edih_basedir() {
// should be something like /var/www/htdocs/openemr/sites/default
if (isset($GLOBALS['OE_SITE_DIR'])) {
// debug
//echo 'csv_edih_basedir OE_SITE_DIR '.$GLOBALS['OE_SITE_DIR'].'<br>'.PHP_EOL;
return $GLOBALS['OE_SITE_DIR'].DS.'edi'.DS.'history';
} else {
csv_edihist_log('csv_edih_basedir: failed to obtain OpenEMR Site directory');
return false;
}
}
/**
* generates path to edi_history tmp dir for file upload operations
*
* @uses csv_edih_basedir()
* @return string directory path
*/
function csv_edih_tmpdir() {
//
$bdir = csv_edih_basedir();
$tdir = ($bdir) ? $bdir.DS.'tmp' : false;
//$systmp = sys_get_temp_dir();
//$systmp = stripcslashes($systmp);
//$systdir = $systmp.DS.'edihist';
//if ( $tdir && (is_dir($tdir) || mkdir($tdir, 0755) ) ) {
if ( $tdir) {
return $tdir;
} else {
return false;
}
}
/**
* Initial setup function
*
* Create the directory tree and write the column headers into the csv files
* This function will accept a directory argument and it appends the value
* from IBR_HISTORY_DIR to the path. Then a directory for each type of file
* and the csv files are created under that.
*
* @uses csv_parameters()
* @uses csv_table_header()
* @uses csv_edih_basedir()
*
* @param string &$out_str referenced, should be created in calling function
* @return boolean
*/
function csv_setup() {
//
$isOK = false;
$out_str = '';
$chr = 0;
// $GLOBALS['OE_SITE_DIR'] should be like /var/www/htdocs/openemr/sites/default
$sitedir = $GLOBALS['OE_SITE_DIR'];
//$sitedir = csv_edih_basedir();
//
if (is_readable($sitedir)) {
$basedir = $sitedir.DS.'edi';
$edihist_dir = $basedir.DS.'history';
$csv_dir = $edihist_dir.DS.'csv';
$archive_dir = $edihist_dir.DS.'archive';
$log_dir = $edihist_dir.DS.'log';
$tmp_dir = $edihist_dir.DS.'tmp';
} else {
//csv_edihist_log('setup: failed to obtain OpenEMR Site directory');
echo 'setup: failed to obtain OpenEMR Site directory<br>'.PHP_EOL;
return false;
}
//
if (is_writable($basedir) ) {
$isOK = true;
//csv_edihist_log('setup: directory '.$basedir);
$out_str .= 'EDI_History Setup should not overwrite existing data.<br>'.PHP_EOL;
$out_str .= 'Setup: directory '.$basedir.'<br>'.PHP_EOL;
//
if (is_dir($edihist_dir) || mkdir($edihist_dir, 0755)) {
$out_str .= 'created folder '.$edihist_dir.'<br>'.PHP_EOL;
$isOK = true;
if (is_dir($csv_dir) || mkdir($csv_dir, 0755) ) {
$out_str .= 'created folder '.$csv_dir.'<br>'.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create csv folder... '.'<br>'.PHP_EOL;
die('Failed to create csv folder... '.$archive_dir);
}
if (is_dir($archive_dir) || mkdir($archive_dir, 0755) ) {
$out_str .= 'created folder '.$archive_dir.'<br>'.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create archive folder... '.'<br>'.PHP_EOL;
die('Failed to create archive folder... ');
}
if (is_dir($log_dir) || mkdir($log_dir, 0755) ) {
$out_str .= 'created folder '.$log_dir.'<br>'.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create log folder... '.'<br>'.PHP_EOL;
die('Failed to create log folder... ');
}
if (is_dir($tmp_dir) || mkdir($tmp_dir, 0755) ) {
$out_str .= 'created folder '.$tmp_dir.PHP_EOL;
$isOK = true;
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create tmp folder... '.'<br>'.PHP_EOL;
die('Failed to create tmp folder... ');
}
} else {
$isOK = false;
$out_str .= 'Setup failed: cannot write to folder '.$basedir.'<br>'.PHP_EOL;
die('Setup failed: cannot write to '.$basedir);
}
} else {
$isOK = false;
$out_str .= 'Setup: Failed to create history folder... '.'<br>'.PHP_EOL;
die('Failed to create history folder... '.$edihist_dir);
}
if ($isOK) {
$p_ar = csv_parameters('ALL');
$old_csv = array('f837'=>'batch', 'f835'=>'era');
foreach ($p_ar as $key=>$val) {
// rename existing csv files to old_filename
if (is_dir($csv_dir)) {
if ($dh = opendir($csv_dir)) {
while (($file = readdir($dh)) !== false) {
if (is_file($csv_dir.DS.$file) && strpos($file, 'csv')) {
$rn = rename($csv_dir.DS.$file, $csv_dir.DS.'old_'.$file);
if ($rn) {
$out_str .= 'renamed csv/'.$file.' to old_'.$file.'<br />'.PHP_EOL;
} else {
$out_str .= 'attempt to rename csv/'.$file.' failed<br />'.PHP_EOL;
}
}
}
}
}
//;
// make the edi files storage subdirs
$tp = $p_ar[$key]['type'];
$type_dir = $p_ar[$key]['directory'];
//
if (is_dir($type_dir)) {
$out_str .= 'folder for '.$tp.' exists '.$type_dir.'<br>'.PHP_EOL;
} elseif (mkdir($type_dir, 0755)) {
if ($tp == 'f835') {
// in upgrade case the f835 directory should not exist
// move 'era' files from /era to /f835
if (is_dir($edihist_dir.DS.'era')) {
$fct = 0; $rct = 0;
if ($dh = opendir($edihist_dir.DS.'era')) {
while (($file = readdir($dh)) !== false) {
if (is_file($edihist_dir.DS.'era'.DS.$file)) {
$rct++;
$rn = rename($edihist_dir.DS.'era'.DS.$file, $type_dir.DS.$file);
$fct = ($rn) ? $fct + 1 : $fct;
}
}
}
$out_str .= 'created type folder '.$type_dir.' and moved '.$fct.' of '.$rct.' files from /era<br>'.PHP_EOL;
}
} else {
$out_str .= 'created type folder '.$type_dir.'<br>'.PHP_EOL;
}
} else {
$out_str .= 'Setup failed to create directory for '.$tp.'<br>'.PHP_EOL;
}
}
} else {
$out_str .= 'Setup failed: Can not create directories <br>'.PHP_EOL;
}
if ($isOK) {
csv_edihist_log($out_str);
return true;
} else {
return $out_str;
}
}
/**
* Empty all contents of tmp dir /edi/history/tmp
*
* @uses csv_edih_tmpdir()
* @param none
* @return bool
*/
function csv_clear_tmpdir() {
//
$tmpdir = csv_edih_tmpdir();
if ( basename($tmpdir) != 'tmp' ) {
csv_edihist_log ( 'tmp dir not /edi/history/tmp');
return false;
}
$tmp_files = scandir($tmpdir);
if (count($tmp_files) > 2) {
foreach($tmp_files as $idx=>$tmpf) {
if ($tmpf == "." || $tmpf == "..") {
// can't delete . and ..
continue;
} elseif (is_file($tmpdir.DS.$tmpf) ) {
unlink($tmpdir.DS.$tmpf);
} elseif(is_dir($tmpdir.DS.$tmpf)) {
$tdir_ar = scandir($tmpdir.DS.$tmpf);
foreach($tdir_ar as $tfn) {
if ($tfn == "." || $tfn == "..") {
continue;
} elseif (is_file($tmpdir.DS.$tmpf.DS.$tfn)) {
unlink($tmpdir.DS.$tmpf.DS.$tfn);
}
}
rmdir($tmpdir.DS.$tmpf);
}
}
}
$tmp_files = scandir($tmpdir);
if (count($tmp_files) > 2) {
csv_edihist_log ('tmp dir contents remain in ... /edi/history/tmp');
return false;
} else {
return true;
}
}
/**
* open and verify a default edih_x12_file object
*
* @uses csv_check_filepath()
*
* @param string filepath or filename
* @parm string file x12 type
* @return object edih_x12_file class
*/
function csv_check_x12_obj($filepath, $type='') {
//
$x12obj = false;
$ok = false;
//
$fp = csv_check_filepath($filepath, $type);
//
if ($fp) {
$x12obj = new edih_x12_file($fp);
if ( 'edih_x12_file' == get_class($x12obj) ) {
if ($x12obj->edih_valid() == 'ovigs') {
$ok = count( $x12obj->edih_segments() );
$ok = ($ok) ? count( $x12obj->edih_envelopes() ) : false;
$ok = ($ok) ? count( $x12obj->edih_delimiters() ) : false;
if (!$ok) {
csv_edihist_log("csv_check_x12_obj: object missing properties [$filepath]");
csv_edihist_log( $x12obj->edih_message() );
return false;
}
} else {
csv_edihist_log("csv_check_x12_obj: invalid object $filepath");
return false;
}
} else {
csv_edihist_log("csv_check_x12_obj: object not edih_x12_file $filepath");
return false;
}
} else {
csv_edihist_log("csv_check_x12_obj: invalid file path $filepath");
return false;
}
//
return $x12obj;
}
/**
* Check that the file path we are working with is a readable file.
*
* If it is a file we have uploaded and we have only the file name
* this function will type the file and find it in the uploaded files directories
* and return the complete path.
*
* @uses csv_parameters()
* @param string $filename name of a file that is one of our types
* @param string $type optional; one of our file types
* @return string either an empty string or a readable filepath
*/
function csv_check_filepath($filename, $type='ALL') {
//
// if file is readable, just return it
if ( is_file($filename) && is_readable($filename) ) {
return $filename;
}
//
$goodpath = '';
$fp = '';
$fn = basename($filename);
//
if ($type && $type != 'ALL') {
$p = csv_parameters($type);
if (is_array($p) && array_key_exists('type', $p) ) {
$fp = $p['directory'].DS.$fn;
}
} else {
$p_ar = csv_parameters("ALL");
foreach ($p_ar as $tp=>$par) {
if ( !$p_ar[$tp]['regex'] || !preg_match($p_ar[$tp]['regex'], $fn) ) {
continue;
} else {
$fp = $p_ar[$tp]['directory'].DS.$fn;
break;
}
}
}
if ( is_file($fp) && is_readable($fp) ) { $goodpath = realpath($fp); }
//
return $goodpath;
}
/**
* verify file type parameter
*
* @param string file type
* @param bool return GS02 code or fXXX
* @return string file type or empty
*/
function csv_file_type($type, $gs_code=false) {
//
if (!$type) {
csv_edihist_log('csv_file_type: invalid or missing type argument '.$type);
return false;
} else {
$tp_type = (string)$type;
}
//
if ( strpos('|f837|batch|HC', $tp_type) ) {
$tp = ($gs_code) ? 'HC' : 'f837';
} elseif ( strpos('|f835|era|HP', $tp_type) ) {
$tp = ($gs_code) ? 'HP' : 'f835';
} elseif ( strpos('|f999|f997|ack|ta1|FA', $tp_type) ) {
$tp = ($gs_code) ? 'FA' : 'f997';
} elseif ( strpos('|f277|HN', $tp_type) ) {
$tp = ($gs_code) ? 'HN' : 'f277';
} elseif ( strpos('|f276|HR', $tp_type) ) {
$tp = ($gs_code) ? 'HR' : 'f276';
} elseif ( strpos('|f271|HB', $tp_type) ) {
$tp = ($gs_code) ? 'HB' : 'f271';
} elseif ( strpos('|f270|HS', $tp_type) ) {
$tp = ($gs_code) ? 'HS' : 'f270';
} elseif ( strpos('|f278|HI', $tp_type) ) {
$tp = ($gs_code) ? 'HI' : 'f278';
} else {
$tp = '';
}
//
if ( !$tp) {
csv_edihist_log('csv_file_type error: incorrect type '.$tp_type);
}
return $tp;
}
/**
* The array that holds the various parameters used in dealing with files
*
* A key function since it holds the paths, columns, etc.
* Unfortunately, there is an issue with matching the type in * the case of the
* values '997', '277', '999', etc, becasue these strings may be recast
* from strings to integers, so the 'type' originally supplied is lost.
* This introduces an inconsistency when the 'type' is used in comparison tests.
* We call the csv_file_type() function to return a usable file type identifier.
* The 'datecolumn' and 'fncolumn' entries are used in csv_to_html() to filter by date
* or place links to files.
*
* @param string $type -- default = ALL or one of batch, ibr, ebr, dpr, f997, f277, era, ack, text
* @return array
*/
function csv_parameters($type='ALL') {
//
// This will need the OpenEMR 'oe_site_dir' to replace global
//
$p_ar = array();
$tp = ($type === 'ALL') ? $type : csv_file_type($type);
if (!$tp) {
csv_edihist_log('csv_parameters() error: incorrect type '.$type);
return $p_ar;
}
//$edihist_dir = $GLOBALS['OE_SITE_DIR'].'/edi/history';
$edihist_dir = csv_edih_basedir();
//
// the batch file directory is a special case - decide whether to use OpenEMR batch files or make our own copies
// OpenEMR copies each batch file to sites/default/edi and this project never writes to that directory
// batch reg ex -- '/20[01][0-9]-[01][0-9]-[0-3][0-9]-[0-9]{4}-batch*\.txt/' '/\d{4}-\d{2}-\d{2}-batch*\.txt$/'
//
$p_ar['f837'] = array('type'=>'f837', 'directory'=>$GLOBALS['OE_SITE_DIR'].DS.'edi', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f837.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f837.csv', 'filedate'=>'Date', 'claimdate'=>'SvcDate', 'regex'=>'/\-batch(.*)\.txt$/');
//
//$p_ar['csv'] = array("type"=>'csv', "directory"=>$edihist_dir.'/csv', "claims_csv"=>'ibr_parameters.csv',
// "files_csv"=>'', "column"=>'', "regex"=>'/\.csv$/');
$p_ar['f997'] = array('type'=>'f997', 'directory'=>$edihist_dir.DS.'f997', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f997.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f997.csv', 'filedate'=>'Date', 'claimdate'=>'RspDate', 'regex'=>'/\.(99[79]|ta1|ack)$/i');
$p_ar['f276'] = array('type'=>'f276', 'directory'=>$edihist_dir.DS.'f276', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f276.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f276.csv', 'filedate'=>'Date', 'claimdate'=>'ReqDate', 'regex'=>'/\.276([ei]br)?$/');
$p_ar['f277'] = array('type'=>'f277', 'directory'=>$edihist_dir.DS.'f277', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f277.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f277.csv', 'filedate'=>'Date', 'claimdate'=>'SvcDate', 'regex'=>'/\.277([ei]br)?$/i');
$p_ar['f270'] = array('type'=>'f270', 'directory'=>$edihist_dir.DS.'f270', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f270.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f270.csv', 'filedate'=>'Date', 'claimdate'=>'ReqDate', 'regex'=>'/\.270([ei]br)?$/i');
$p_ar['f271'] = array('type'=>'f271', 'directory'=>$edihist_dir.DS.'f271', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f271.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f271.csv', 'filedate'=>'Date', 'claimdate'=>'RspDate', 'regex'=>'/\.271([ei]br)?$/i');
$p_ar['f278'] = array('type'=>'f278', 'directory'=>$edihist_dir.DS.'f278', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f278.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f278.csv', 'filedate'=>'Date', 'claimdate'=>'FileDate', 'regex'=>'/\.278/');
// OpenEMR stores era files, but the naming scheme is confusing, so we will just use our own directory for them
$p_ar['f835'] = array('type'=>'f835', 'directory'=>$edihist_dir.DS.'f835', 'claims_csv'=>$edihist_dir.DS.'csv'.DS.'claims_f835.csv',
'files_csv'=>$edihist_dir.DS.'csv'.DS.'files_f835.csv', 'filedate'=>'Date', 'claimdate'=>'SvcDate', 'regex'=>'/835[0-9]{5}\.835*|\.(era|ERA|835)$/i');
//
if ( array_key_exists($tp, $p_ar) ) {
return $p_ar[$tp];
} else {
return $p_ar;
}
}
/**
* determine if a csv table has data for select dropdown
*
* @param string default 'json'
* @return array json if argument is 'json'
*/
function csv_table_select_list($outtp='json') {
$optlist = array();
$labels = array('f835'=>'Payments', 'f837'=>'Claims', 'batch'=>'Claims', 'f277'=>'Status', 'f276'=>'Status Req',
'f997'=>'Ack','f271'=>'Benefit', 'f270'=>'Benefit Req', 'f278'=>'Auth');
$edihist_dir = csv_edih_basedir(); // $GLOBALS['OE_SITE_DIR'].'/edi/history'
$csvdir = $edihist_dir.DS.'csv';
$tbllist = scandir($csvdir);
$idx = 0;
foreach($tbllist as $csvf) {
if ($csvf == "." || $csvf == ".." ) { continue; }
if (strpos($csvf, 'old') === 0) { continue; }
if (filesize($csvdir.DS.$csvf) < 70) { continue; }
if (substr($csvf, -1) == '~') { continue; }
$finfo = pathinfo($csvdir.DS.$csvf);
$fn = $finfo['filename'];
// e.g. files_f997
$tp = explode('_', $fn);
//$lblkey = $labels[$tp[1]];
$optlist[$tp[0]][$tp[1]]['fname'] = $fn;
$optlist[$tp[0]][$tp[1]]['desc'] = $tp[0].'-'.$labels[$tp[1]]; //$tp[1] .' '.$tp[0];
$idx++;
}
if ($outtp == 'json') {
return json_encode($optlist);
} else {
return $optlist;
}
}
/**
* list existing archive files
*
* @param string default 'json'
* @return array json if argument is 'json'
*/
function csv_archive_select_list($outtp='json') {
//
$flist = array();
$archdir = csv_edih_basedir().DS.'archive';
//
// debug
csv_edihist_log("csv_archive_select_list: using $archdir");
//
$scan = scandir($archdir);
if (is_array($scan) && count($scan)) {
foreach($scan as $s) {
if ($s == '.' || $s == '..') {
continue;
} elseif (strpos($s, 'note')) {
continue;
} else {
$flist[] = $s;
}
}
}
if ($outtp == 'json') {
return json_encode($flist);
} else {
return $flist;
}
}
/**
* List files in the directory for the given type
*
* Write an entry in the log if an file is in the directory
* that does not match the type
*
* @uses csv_parameters()
* @param string $type a type from our list
* @return array
*/
function csv_dirfile_list($type) {
// return false if location is not appropriate
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log("csv_dirfile_list error: incorrect type $type");
return false;
}
$params = csv_parameters($tp);
if (empty($params) || csv_singlerecord_test($params) == false ) {
csv_edihist_log("csv_dirfile_list() error: incorrect type $type");
return false;
}
$search_dir = $params['directory'];
$ext_re = $params['regex'];
$dirfiles = array();
//
if (is_dir($search_dir)) {
if ($dh = opendir($search_dir)) {
while (($file = readdir($dh)) !== false) {
if ($file == '.' || $file == '..') {
continue;
} elseif ($tp == 'f837' && ($file == 'history' || $file == 'README.txt')) {
continue;
} elseif (is_file($search_dir.DS.$file) ) {
$dirfiles[] = $file;
} else {
if ($tp == 'f837' && $file == 'history') { continue; }
csv_edihist_log("csv_dirfile_list $type : not a file $file");
}
}
} else {
csv_edihist_log("csv_dirfile_list $type : error in scan $search_dir");
}
} else {
csv_edihist_log("csv_dirfile_list $type : not a directory $search_dir");
}
//
return $dirfiles;
} // end function
/**
* List files that are in the csv record
*
* @uses csv_parameters()
* @uses csv_table_header()
*
* @param string $type -- one of our types
* @return array
*/
function csv_processed_files_list($type) {
//
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log("csv_processed_files_list: incorrect type $type");
return false;
}
$processed_files = array();
$param = csv_parameters($tp);
$hdr_ar = csv_table_header($tp, 'file');
if ( is_array($hdr_ar) ) {
foreach($hdr_ar as $k=>$hd) {
if ($hd == 'FileName') { $csv_col = $k; break; }
}
}
$csv_col = (isset($csv_col)) ? $csv_col : 1;
$csv_file = $param['files_csv'];
//if ($tp == 'dpr') {
//$csv_file = $param['claims_csv'];
//$csv_col = '5';
//} else {
//$csv_file = $param['files_csv'];
//}
//
//$idx = 0;
if (is_file($csv_file)) {
if (($fh1 = fopen( $csv_file, "r" )) !== FALSE) {
while (($data = fgetcsv($fh1, 1024, ",")) !== FALSE) {
$processed_files[] = $data[$csv_col];
//
//if ($idx) { $processed_files[] = $data[$csv_col]; }
// skip the header row
//$idx++;
}
fclose($fh1);
} else {
csv_edihist_log ("csv_list_processed_files: failed to access $csv_file" );
return false;
}
} else {
// first run - no file exists
csv_edihist_log("csv_processed_files_list: csv file does not exist ".basename($csv_file));
}
// remove the header row, but avoid NULL or false
$ret_ar = (empty($processed_files)) ? $processed_files : array_slice($processed_files, 1);
return $ret_ar;
} // end function
/**
* Give an array of files in the storage directories that are not in the csv record
*
* @param string $type -- one of our types
* @return array
*/
function csv_newfile_list($type) {
//
$ar_new = array();
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log('csv_newfile_list: incorrect type '.$type);
return false;
}
//
$dir_files = csv_dirfile_list($tp);
$csv_files = csv_processed_files_list($tp);
//
// $dir_files should come first in array_diff()
if (empty($dir_files)) {
$ar_new = array();
} elseif (empty($csv_files) || is_null($csv_files)) {
$ar_new = $dir_files;
} else {
$ar_new = array_diff($dir_files, $csv_files);
}
//
return $ar_new;
}
/**
* Parse 997 IK3 error segment to identify segment causing rejection
* The error segment string is specially created in edih_997_csv_data()
* Simple analysis, but the idea is just to identify the bad segment
*
* @param string error segment from edih_997_csv_data()
* @param bool true if only the 1st segmentID is wanted
* return array|string
*/
function edih_errseg_parse($err_seg, $id=false) {
// ['err_seg'] = '|IK3*segID*segpos*loop*errcode*bht03syn|CTX-IK3*transID*segID*segpos*elempos
// |IK4*elempos*errcode*elem*|CTX-IK4*segID*segpos*elempos
//
// note: multiple IK3 segments are allowed in 997/999 x12
//
$ret_ar = array();
if ( !$err_seg || strpos($err_seg, 'IK3') === false) {
csv_edihist_log('edih_errseg_parse: invalid argument');
return $ret_ar;
}
//'|IK3*segID*segpos*loop*errcode*bht03syn|CTX-IK3*segID*segPos*loopLS*elemPos:compositePos:repPos
// revised: 123456789004*IK3*segID*segpos[*segID*segpos*segID*segpos]
$ik = explode('*', $err_seg);
foreach($ik as $i=>$k) {
switch((int)$i) {
case 0:$ret_ar['trace'] = $k; break;
case 1: break; // IK3
case 2: $ret_ar['id'][] = $k; break; // segment ID
case 3: $ret_ar['err'][] = $k; break; // segment position
case 4: $ret_ar['id'][] = $k; break;
case 5: $ret_ar['err'][] = $k; break;
case 6: $ret_ar['id'][] = $k; break;
case 7: $ret_ar['err'][] = $k; break;
}
}
//
return $ret_ar;
}
/**
* Order the csv data array according to the csv table heading row
* so the data to be added to csv table rows are correctly ordered
* the supplied data should be in an array with thie structure
* array['icn'] ['file'][i]['key'] ['claim'][i]['key'] ['type']['type']
*
* @uses csv_table_header()
*
* @param array data_ar data array from edih_XXX_csv_data()
* @return array|bool ordered array or false on error
*/
function edih_csv_order($csvdata) {
//
$wrcsv = array();
$order_ar = array();
//
foreach($csvdata as $icn=>$data) {
// [icn]['type']['file']['claim']
$ft = $data['type'];
$wrcsv[$icn]['type'] = $ft;
//
foreach($data as $key=>$val) {
if ($key == 'type') { continue; }
$order_ar[$icn][$key] = csv_table_header($ft, $key);
$ct = count($order_ar[$icn][$key]);
foreach($val as $k=>$rcrd) {
//
foreach($order_ar[$icn][$key] as $ky=>$vl) {
$wrcsv[$icn][$key][$k][$ky] = $rcrd[$vl];
}
}
}
}
return $wrcsv;
}
/**
* insert dashes in ten-digit telephone numbers
*
* @param string $str_val the telephone number
* @return string the telephone number with dashes
*/
function edih_format_telephone ($str_val) {
$strtel = (string)$str_val;
$strtel = preg_replace('/\D/', '', $strtel);
if ( strlen($strtel) != 10 ) {
csv_edihist_log('edih_format_telephone: invalid argument: '.$str_val);
return $str_val;
} else {
$tel = substr($strtel,0,3) . "-" . substr($strtel,3,3) . "-" . substr($strtel,6);
}
return $tel;
}
/**
* order MM DD YYYY values and insert slashes in eight-digit dates
*
* US MM/DD/YYYY or general YYYY-MM-DD
*
* @param string $str_val the eight-digit date
* @param string $pref if 'US' (default) anything else means YYYY-MM-DD
* @return string the date with slashes
*/
function edih_format_date ($str_val, $pref = "Y-m-d") {
$strdt = (string)$str_val;
$strdt = preg_replace('/\D/', '', $strdt);
$dt = '';
if (strlen($strdt) == 6) {
$tdy = date('Ymd');
if ($pref == "US") {
// assume mmddyy
$strdt = substr($tdy,0,2).substr($strdt,-2).substr($strdt,0,4);
} else {
// assume yymmdd
$strdt = substr($tdy,0,2).$strdt;
}
}
if ($pref == "US") {
$dt = substr($strdt,4,2) . "/" . substr($strdt,6) . "/" . substr($strdt,0,4);
} else {
$dt = substr($strdt,0,4) . "-" . substr($strdt,4,2) . "-" . substr($strdt,6);
}
return $dt;
}
/**
* format monetary amounts with two digits after the decimal place
*
* @todo add other formats
* @param string $str_val the amount string
* @return string the telephone number with dashes
*/
function edih_format_money ($str_val) {
//
if ($str_val || $str_val === '0') {
$mny = sprintf("$%01.2f", $str_val);
} else {
$mny = $str_val;
}
return $mny;
}
/**
* format percentage amounts with % sign
* typical example ".50" from x12 edi segment element
*
* @param string $str_val the amount string
* @return string the value as a percentage
*/
function edih_format_percent ($str_val) {
$val = (float)$str_val;
if (is_float($val)) {
$pct = $val*100 . '%';
} else {
$pct = $str_val.'%';
}
return $pct;
}
/**
* HTML string for table thead element
*
* @uses csv_table_header()
* @param string
* @param string
* @return string
*/
function csv_thead_html($file_type, $csv_type, $tblhd=null) {
//
if (is_array($tblhd) & count($tblhd) ) {
$hvals = $tblhd;
} else {
$hvals = csv_table_header($file_type, $csv_type);
}
if ( is_array($hvals) && count($hvals) ) {
$str_html = '';
} else {
return false;
}
$str_html .= "<thead>".PHP_EOL."<tr>".PHP_EOL;
foreach($hvals as $val) {
$str_html .="<th>$val</th>";
}
$str_html .= PHP_EOL."</tr>".PHP_EOL."</thead>".PHP_EOL;
//
return $str_html;
}
/**
* Give the column headings for the csv files
*
* @uses csv_file_type()
* @param string $file_type one of our edi types
* @param string $csv_type either 'file' or 'claim'
* @return array
*/
function csv_table_header($file_type, $csv_type) {
//
$ft = csv_file_type($file_type);
$ct = strpos('|file', $csv_type) ? 'file' : $csv_type;
$ct = strpos('|claim', $ct) ? 'claim' : $ct;
//
$hdr = array();
if (!$ft || !$ct ) {
csv_edihist_log ('csv_table_header error: incorrect file ['.$file_type.']or csv ['.$csv_type.'] type');
return $hdr;
}
//
if ($ct === 'file') {
switch((string)$ft) {
//case 'ack': $hdr = array('Date', 'FileName', 'isa13', 'ta1ctrl', 'Code'); break;
//case 'ebr': $hdr = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch'); break;
//case 'ibr': $hdr = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch'); break;
//
case 'f837': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'x12Partner'); break;
case 'ta1': $hdr = array('Date', 'FileName', 'Control', 'Trace', 'Code'); break;
case 'f997': $hdr = array('Date', 'FileName', 'Control', 'Trace', 'RspType', 'RejCt'); break;
case 'f276': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'x12Partner'); break;
case 'f277': $hdr = array('Date', 'FileName', 'Control', 'Accept', 'AccAmt', 'Reject', 'RejAmt'); break;
case 'f270': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'x12Partner'); break;
case 'f271': $hdr = array('Date', 'FileName', 'Control', 'Claim_ct', 'Reject', 'Payer'); break;
case 'f278': $hdr = array('Date', 'FileName', 'Control', 'TrnCount', 'Auth', 'Payer'); break;
case 'f835': $hdr = array('Date', 'FileName', 'Control', 'Trace', 'Claim_ct', 'Denied', 'Payer'); break;
}
} elseif ($ct === 'claim') {
switch((string)$ft) {
//case 'ebr': $hdr = array('PtName','SvcDate', 'CLM01', 'Status', 'Batch', 'FileName', 'Payer'); break;
//case 'ibr': $hdr = array('PtName','SvcDate', 'CLM01', 'Status', 'Batch', 'FileName', 'Payer'); break;
//case 'dpr': $hdr = array('PtName','SvcDate', 'CLM01', 'Status', 'Batch', 'FileName', 'Payer'); break;
//
case 'f837': $hdr = array('PtName', 'SvcDate', 'CLM01', 'InsLevel', 'BHT03', 'FileName', 'Fee', 'PtPaid', 'Provider' ); break;
case 'f997': $hdr = array('PtName', 'RspDate', 'Trace', 'Status', 'Control', 'FileName', 'RspType', 'err_seg'); break;
case 'f276': $hdr = array('PtName', 'SvcDate', 'CLM01', 'ClaimID', 'BHT03', 'FileName', 'Payer', 'Trace'); break;
case 'f277': $hdr = array('PtName', 'SvcDate', 'CLM01', 'Status', 'BHT03', 'FileName', 'Payer', 'Trace'); break;
case 'f270': $hdr = array('PtName', 'ReqDate', 'Trace', 'InsBnft', 'BHT03', 'FileName', 'Payer'); break;
case 'f271': $hdr = array('PtName', 'RspDate', 'Trace', 'Status', 'BHT03', 'FileName', 'Payer'); break;
case 'f278': $hdr = array('PtName', 'FileDate', 'Trace', 'Status', 'BHT03', 'FileName', 'Auth', 'Payer'); break;
case 'f835': $hdr = array('PtName', 'SvcDate', 'CLM01', 'Status', 'Trace', 'FileName', 'ClaimID', 'Pmt', 'PtResp', 'Payer'); break;
}
} else {
// unexpected error
csv_edihist_log ('edih_csv_table_header() error: failed to match file type ['.$ft.'] or csv type ['.$ct.']');
return false;
}
if (count($hdr) ) {
return $hdr;
} else {
return false;
}
}
/*
function csv_files_header($file_type, $csv_type) {
//
$tp = csv_file_type($type);
if (!$tp) {
csv_edihist_log('csv_files_header: incorrect type '.$file_type);
return false;
}
if (!strpos('|file|claim', $csv_type) ) {
csv_edihist_log('csv_files_header error: incorrect csv type '.$csv_type);
return false;
}
//
$ft = strpos('|277', $file_type) ? 'f277' : $file_type;
$ft = strpos('|835', $file_type) ? 'era' : $ft;
$ft = strpos('|837', $file_type) ? 'batch' : $ft;
$ft = strpos('|999|997|ack|ta1', $file_type) ? 'f997' : $ft;
//
$csv_hd_ar = array();
// dataTables: | 'date' | 'file_name (link)' | 'file_text (link fmt)' | 'claim_ct' | 'reject_ct' |
$csv_hd_ar['ack']['file'] = array('Date', 'FileName', 'isa13', 'ta1ctrl', 'Code');
$csv_hd_ar['ebr']['file'] = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch');
$csv_hd_ar['ibr']['file'] = array('Date', 'FileName', 'clrhsid', 'claim_ct', 'reject_ct', 'Batch');
//
// dataTables: | 'date' | 'file_name (link)' | 'file_text (link fmt)' | 'claim_ct' | 'partner' |
$csv_hd_ar['batch']['file'] = array('Date', 'FileName', 'Ctn_837', 'claim_ct', 'x12_partner');
$csv_hd_ar['ta1']['file'] = array('Date', 'FileName', 'Ctn_ta1', 'ta1ctrl', 'Code');
$csv_hd_ar['f997']['file'] = array('Date', 'FileName', 'Ctn_999', 'ta1ctrl', 'RejCt');
$csv_hd_ar['f277']['file'] = array('Date', 'FileName', 'Ctn_277', 'Accept', 'AccAmt', 'Reject', 'RejAmt');
$csv_hd_ar['f270']['file'] = array('Date', 'FileName', 'Ctn_270', 'claim_ct', 'x12_partner');
$csv_hd_ar['f271']['file'] = array('Date', 'FileName', 'Ctn_271', 'claim_ct', 'Denied', 'Payer');
$csv_hd_ar['era']['file'] = array('Date', 'FileName', 'Trace', 'claim_ct', 'Denied', 'Payer');
//
// dataTables: | 'pt_name' | 'svc_date' | 'clm01 (link clm)' | 'status (mouseover)' | b f t (links to files) | message (mouseover) |
$csv_hd_ar['ebr']['claim'] = array('PtName','SvcDate', 'clm01', 'Status', 'Batch', 'FileName', 'Payer');
$csv_hd_ar['ibr']['claim'] = array('PtName','SvcDate', 'clm01', 'Status', 'Batch', 'FileName', 'Payer');
$csv_hd_ar['dpr']['claim'] = array('PtName','SvcDate', 'clm01', 'Status', 'Batch', 'FileName', 'Payer');
//
// dataTables: | 'pt_name' | 'svc_date' | 'clm01 (link clm)' | 'status (mouseover)' | 'bht03_837 (link rsp)' | message (mouseover) |
$csv_hd_ar['batch']['claim'] = array('PtName', 'SvcDate', 'clm01', 'InsLevel', 'Ctn_837', 'File_837', 'Fee', 'PtPaid', 'Provider' );
$csv_hd_ar['f997']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'ak_num', 'File_997', 'Ctn_837', 'err_seg');
$csv_hd_ar['f277']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'st_277', 'File_277', 'payer_name', 'claim_id', 'bht03_837');
$csv_hd_ar['f270']['claim'] = array('PtName', 'SvcDate', 'clm01', 'InsLevel', 'st_270', 'File_270', 'payer_name', 'bht03_270');
$csv_hd_ar['f271']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'st_271', 'File_271', 'payer_name', 'bht03_270');
$csv_hd_ar['era']['claim'] = array('PtName', 'SvcDate', 'clm01', 'Status', 'trace', 'File_835', 'claimID', 'Pmt', 'PtResp', 'Payer');
//
return $csv_hd_ar[$ft][$csv_type];
}
*/
/**
* adapted from http://scratch99.com/web-development/javascript/convert-bytes-to-mb-kb/
*
* @param int
*
* @return string
*/
function csv_convert_bytes($bytes) {
$sizes = array('Bytes', 'KB', 'MB', 'GB', 'TB');
if ($bytes == 0) { return 'n/a'; }
$i = floor( log($bytes) / log(1024) );
//$i = parseInt(Math.floor(Math.log(bytes) / Math.log(1024)));
if ($i == 0) {
return $bytes.' '.$sizes[$i];
} else {
return round($bytes / pow(1024, $i), 1).' '.$sizes[$i];
}
}
/**
* Determine whether an array is multidimensional
*
* @param array
* @return bool false if arrayis multidimensional
*/
function csv_singlerecord_test ( $array ) {
// the two versions of count() are compared
// if the array has a sub-array, count recursive is greater
if ( is_array($array) ) {
$is_sngl = count($array, COUNT_RECURSIVE ) == count( $array, COUNT_NORMAL);
} else {
$is_sngl = false;
}
//
return $is_sngl;
}
/*
* give first and last index keys for an array
*
* @param array
* @return array
*/
function csv_array_bounds($array) {
// get the segment array bounds
$ret_ar = array();
if (is_array($array) && count($array)) {
if (reset($array) !== false) { $ret_ar[0] = key($array); }
if (end($array) !== false) { $ret_ar[1] = key($array); }
}
return $ret_ar;
}
/*
* return a csv file as an associative array
* the first row is the header or array keys for the row
* array structure:
* array[i]=>array(hdr0=>csvrow[0], hdr1=>csvrow[1], hdr2=>csvrow[2], ...)
*
* @param string file type e.g. f837
* @param string csv type claim or file
* @return array
*/
function csv_assoc_array($file_type, $csv_type) {
//
if (!$file_type || !$csv_type) {
csv_edihist_log('csv_assoc_array; invalid arguments ft: '.$file_type.' csvt: '.$csv_type);
return false;
}
$csv_ar = array();
$h = array();
$fp = '';
//
$param = csv_parameters($file_type);
$fcsv = (strpos($csv_type, 'aim')) ? 'claims_csv' : 'files_csv';
//
$fp = (isset($param[$fcsv])) ? $param[$fcsv] : '';
if (!is_file($fp)) {
csv_edihist_log('csv_assoc_array; invalid csv file '.basename($fp));
return $csv_ar;
}
$ct = 0;
$row = 0;
$ky = -1;
if (($fh = fopen($fp, "rb")) !== false) {
while (($data = fgetcsv($fh, 2048, ",")) !== false) {
if ( is_null($data) ) { continue; }
if ($row) {
for($i=0; $i<$ct; $i++) {
$csv_ar[$ky][$h[$i]] = $data[$i];
}
} else {
$ct = count($data);
$h = $data;
}
$row++;
$ky++;
}
fclose($fh);
} else {
// invalid file path
csv_edihist_log('csv_assoc_array; invalid file path '.$fp);
return false;
}
//
return $csv_ar;
}
/**
* A multidimensional array will be flattened to a single row.
*
* @param array $array array to be flattened
* @return array
*/
function csv_array_flatten($array) {
//
if (!is_array($array)) {return FALSE;}
$result = array();
foreach ($array as $key => $value) {
if (is_array($value)) {
$result = array_merge($result, csv_array_flatten($value));
} else {
$result[$key] = $value;
}
}
return $result;
}
/**
* Write parsed data from edi x12 files to csv file
*
* @uses csv_parameters()
* @usescsv_table_header()
*
* @param array data array from parse functions
* @return bool true if no error
*/
function edih_csv_write($csv_data) {
//
if ( ! (is_array($csv_data) && count($csv_data)) ){
csv_edihist_log('edih_csv_write(): invalid data array');
return false;
}
//
foreach($csv_data as $icn=>$isa) {
// should be array[icn] => [file][j][key] [claim][j][key] [type]
$ft = ( isset($isa['type']) ) ? $isa['type'] : '';
if (!$ft) {
csv_edihist_log('edih_csv_write(): invalid file type');
continue;
}
//
$param = csv_parameters($ft);
$f_hdr = csv_table_header($ft, 'file');
$c_hdr = csv_table_header($ft, 'claim');
if (is_array($param)) {
// if either csv files does not exist, create them both
// all unlisted files in type directory will be processed on next process round
if (is_file($param['files_csv']) && (filesize($param['files_csv']) > 20)) {
csv_edihist_log('edih_csv_write: csv check for files csv '.$ft);
} else {
$nfcsv = $param['files_csv'];
$fh = fopen($nfcsv, 'wb');
if ($fh !== false) {
fputcsv($fh, $f_hdr);
fclose($fh);
chmod($nfcsv, 0600);
}
csv_edihist_log('edih_csv_write: created files_csv file for '.$ft);
}
if (is_file($param['claims_csv']) && filesize($param['claims_csv'])) {
csv_edihist_log('edih_csv_write: csv check for claims csv '.$ft);
} else {
$nfcsv = $param['claims_csv'];
$fh = fopen($nfcsv, 'wb');
if ($fh !== false) {
fputcsv($fh, $c_hdr);
fclose($fh);
chmod($nfcsv, 0600);
}
csv_edihist_log('edih_csv_write: created claims_csv file for '.$ft);
}
} else {
csv_edihist_log('edih_csv_write: parameters error for type '.$ft);
return false;
}
//
foreach($isa as $key=>$data) {
if ($key == 'type') { continue; }
// get the csv file path from parameters
$fp = ($key == 'file') ? $param['files_csv'] : $param['claims_csv'];
// get the csv row header
$order_ar = ($key == 'file') ? $f_hdr : $c_hdr;
$ct = count($order_ar);
$chrs = 0;
$rws = 0;
//
$fh = fopen( $fp, 'ab');
if (is_resource($fh)) {
// to assure proper order of data in each row, the
// csv row is assembled by matching keys to the header row
foreach($data as $ky=>$row) {
$csvrow = array();
for ($i=0; $i<$ct; $i++) {
$csvrow[$i] = $row[$order_ar[$i]];
}
$chrs += fputcsv ( $fh , $csvrow );
$rws++;
}
} else {
csv_edihist_log('edih_csv_write(): failed to open '.$fp);
return false;
}
//
csv_edihist_log('edih_csv_write() wrote '.$rws.' rows to '.basename($fp));
}
}
//
return $rws;
}
/**
* Search a csv record file and return the row or values from selected columns
*
* This function requires that the $search_ar parameter be an array
* with keys ['s_val']['s_col']['r_cols'], and 'r_cols' is an array
* 's_val' is the search value, s_col is the column to check, r_cols is an array
* of column numbers from which values are returned. If r_cols is not an array,
* then the entire row will be returned. If the 'expect' parameter is 1, then
* the search will stop after the first success and return the result. Otherwise, the
* entire file will be searched.
* ex: csv_search_record('batch', 'claim', array('s_val'=>'0024', 's_col'=>9, 'r_cols'=>array(1, 2, 7)), "1" )
*
* @uses csv_parameters()
* @param string $file_type
* @param string $csv_type
* @param array $search_ar
* @param mixed $expect
* @return array
*/
function csv_search_record($file_type, $csv_type, $search_ar, $expect='1') {
//
csv_edihist_log("csv_search_record: ".strval($file_type)." ".strval($csv_type)." ".strval($search_ar['s_val']));
//
$tp = csv_file_type($file_type);
if (!$tp) {
csv_edihist_log("csv_search_record: incorrect type $file_type");
return false;
}
//
$params = csv_parameters($tp);
//
if ($csv_type == 'claim') {
$fp = $params['claims_csv'];
} elseif ($csv_type == 'file') {
$fp = $params['files_csv'];
} else {
csv_edihist_log('csv_search_record: incorrect csv type '.$csv_type);
return FALSE;
}
//
if (!is_array($search_ar) || array_keys($search_ar) != array('s_val', 's_col', 'r_cols')) {
csv_edihist_log('csv_search_record: invalid search criteria');
return FALSE;
}
$sv = $search_ar['s_val'];
$sc = $search_ar['s_col'];
$rv = (is_array($search_ar['r_cols']) && count($search_ar['r_cols'])) ? $search_ar['r_cols'] : 'all';
$ret_ar = array();
$idx = 0;
if (($fh1 = fopen($fp, "r")) !== false) {
while (($data = fgetcsv($fh1)) !== false) {
// check for a match
if ($data[$sc] == $sv) {
if ($rv == 'all') {
$ret_ar[$idx] = $data;
} else {
// now loop through the 'r_cols' array for data index
$dct = count($data);
foreach($rv as $c) {
// make sure we don't access a non-existing index
if ($c >= $dct) { continue; }
//
$ret_ar[$idx][] = $data[$c];
}
}
$idx++;
if ($expect == '1') { break; }
}
}
fclose($fh1);
} else {
csv_edihist_log('csv_search_record: failed to open '.$fp);
return false;
}
if (empty($ret_ar) ) {
return false;
} else {
return $ret_ar;
}
}
/**
* Search the 'claims' csv table for the patient control and find the associated file name
*
* Searchtype
* In 'claims' csv tables, clm01 is position 2, ISA13 number is pos 4, and filename is pos 5;
* Since we are interested usually in the filename, ISA13 is irrelevant usually.
*
* @uses csv_parameters()
* @uses csv_pid_enctr_parse()
* @param string patient control-- pid-encounter, encounter, or pid
* @param string filetype -- x12 type or f837, f277, etc
* @param string search type encounter, pid, or clm01
* @return array|bool [i] data row array or empty on error
*/
function csv_file_by_enctr($clm01, $filetype='f837') {
//
// return array of [i](pid_encounter, filename), there may be more than one file
//
if (!$clm01) {
return 'invalid encounter data<br>' . PHP_EOL;
}
//
$ret_ar = array();
$ft = csv_file_type($filetype);
//
if (!$ft) {
csv_edihist_log('csv_file_by_enctr: incorrect file type '.$filetype);
return $ret_ar;
} else {
$params = csv_parameters($ft);
//$fp = isset($params['claims_csv']) ? dirname(__FILE__).$params['claims_csv'] : false;
$fp = isset($params['claims_csv']) ? $params['claims_csv'] : false;
$h_ar = csv_table_header($ft, 'claim');
$hct = count($h_ar);
if (!$fp) {
csv_edihist_log('csv_file_by_enctr: incorrect file type '.$filetype);
return $ret_ar;
}
}
//
$enct = csv_pid_enctr_parse(strval($clm01));
$p = (isset($enct['pid'])) ? $enct['pid'] : '';
$e = (isset($enct['enctr'])) ? $enct['enctr'] : '';
if ($p && $e) {
$pe = $p.'-'.$e;
$srchtype = '';
} elseif ($e) {
$srchtype = 'encounter';
} elseif ($p) {
$srchtype = 'pid';
} else {
csv_edihist_log('csv_file_by_enctr: unable to determine encounter value '.$clm01);
return 'unable to determine encounter value '.$clm01.'<br />'.PHP_EOL;
}
// OpenEMR creates CLM01 as nnn-nnn in genX12 batch
//$pm = preg_match('/\D/', $enctr, $match2, PREG_OFFSET_CAPTURE);
$val = array();
//array_combine ( array $keys , array $values )
// in 'claims' csv tables, clm01 is position 2 and filename is position 5
if (($fh1 = fopen($fp, "r")) !== FALSE) {
if ($srchtype == 'encounter') {
while (($data = fgetcsv($fh1, 1024, ",")) !== FALSE) {
// check for a match
if (strpos($data[2], $e)) {
$te = substr($data[2], strpos($data[2],'-')+1);
if (strcmp($te, $e) === 0) {
for ($i=0; $i<$hct; $i++) { $val[$h_ar[$i]] = $data[$i]; }
$ret_ar[] = $val; // array_combine($h_ar, $data);
}
}
}
} elseif ($srchtype == 'pid') {
while (($data = fgetcsv($fh1, 1024, ',')) !== FALSE) {
if (strpos($data[2], $p) !== false) {
$te = (strpos($data[2], '-')) ? substr($data[2], 0, strpos($data[2],'-')) : '';
if (strcmp($te, $p) === 0) {
for ($i=0; $i<$hct; $i++) { $val[$h_ar[$i]] = $data[$i]; }
$ret_ar[] = $val; // $ret_ar[] = array_combine($h_ar, $data);
}
}
}
} else {
while (($data = fgetcsv($fh1, 1024, ",")) !== FALSE) {
// check for a match
if ( strcmp($data[2], $pe) === 0 ) {
for ($i=0; $i<$hct; $i++) { $val[$h_ar[$i]] = $data[$i]; }
$ret_ar[] = $val; // $ret_ar[] = array_combine($h_ar, $data);
}
}
}
fclose($fh1);
} else {
csv_edihist_log('csv_file_by_enctr: failed to open csv file '.basename($fp));
return false;
}
return $ret_ar;
}
/**
* get the x12 file containing the control_num ISA13
*
* @todo the csv for x12 files 999, 277, 835, 837 must have the control number
*
* @uses csv_search_record()
* @param string $control_num the interchange control number, isa13
* @return string the file name
*/
function csv_file_by_controlnum($type, $control_num) {
// get the batch file containing the control_num
//
$tp = csv_file_type($type);
//
$hdr = csv_table_header($tp, 'file');
$scol = array_search('Control', $hdr);
$rcol = array_search('FileName', $hdr);
//
// $search_ar should have keys ['s_val']['s_col'] array(['r_cols'][])
// like "batch', 'claim, array(9, '0024', array(1, 2, 7))
//$csv_hd_ar['batch']['file'] = array('time', 'file_name', 'control_num', 'claims', 'x12_partner', 'x12_version');
//
$fn = '';
$ctln = (strlen($control_num) >= 9) ? substr($control_num, 0, 9) : $control_num;
$search = array('s_val'=>$ctln, 's_col'=>$scol, 'r_cols'=>array($rcol));
$result = csv_search_record($tp, 'file', $search, "1");
if (is_array($result) && count($result[0]) == 1) {
$fn = $result[0][0];
}
return $fn;
}
/**
* Search the csv table to obtain the file name for a given
* trace value (835 / 997 999 type only)
*
* Note: the 997/999 trace is the ISA13 of a batch file
*
*
* @param string trace value (TRN02, TA101, or BHT03)
* @param string from type (default is f835)
* @param string to type (default is f835)
* @return string file name or empty string
*/
function csv_file_by_trace($trace, $from_type='f835', $to_type='f837') {
// get the file referenced by the trace value
//
$ft = ($from_type) ? csv_file_type($from_type) : '';
$tt = ($to_type) ? csv_file_type($to_type) : '';
$fn = '';
$csv_type = '';
$type = '';
$search = array();
//
csv_edihist_log("csv_file_by_trace: $trace from $ft to $tt");
//
// $search_ar should have keys ['s_val']['s_col'] array(['r_cols'])
// like "f837', 'claim, array(9, '0024', array(1, 2, 7))
//
if ($ft == 'f835') {
// trace payment to status or claim
$search = array('s_val'=>$trace, 's_col'=>3, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'file';
} elseif ($ft == 'f997') {
// trace ACK to batch file
$icn = (is_numeric($trace) && strlen($trace) >= 9) ? substr($trace, 0, 9) : $trace;
$search = array('s_val'=>$icn, 's_col'=>2, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'file';
} elseif ($ft == 'f277') {
// trace status to status req or claim
if ($tt == 'f276') {
$search = array('s_val'=>$trace, 's_col'=>7, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'claim';
} elseif ($tt == 'f837') {
// expect CLM01 for trace value
$search = array('s_val'=>$trace, 's_col'=>2, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'claim';
}
} elseif ($ft == 'f271') {
// trace benefit to benefit req
if ($tt == 'f270') {
$search = array('s_val'=>$trace, 's_col'=>2, 'r_cols'=>'All');
$type = $tt;
$csv_type = 'claim';
}
} elseif ($ft == 'f278') {
// trace auth to auth req
$search = array('s_val'=>$trace, 's_col'=>2, 'r_cols'=>'All');
$type = 'f278';
$csv_type = 'claim';
} else {
csv_edihist_log('csv_file_by_trace: incorrect file type '.$file_type);
return $fn;
}
//
if ($type && $csv_type && $search) {
$result = csv_search_record($type, $csv_type, $search, false);
if (is_array($result) && count($result)) {
if ($ft == 'f278') {
foreach($result as $r) {
if ($r[6] == 'Rsp' || $r[6] == 'Reply') {
$fn = $result[0][5];
break;
}
}
} elseif ($csv_type == 'claim') {
$fn = $result[0][5];
} else {
$fn = $result[0][1];
}
} else {
csv_edihist_log("csv_file_by_trace: search failed $type csv $csv_type for trace $trace $from_type $to_type");
}
} else {
csv_edihist_log("csv_file_by_trace: error type $type csv $csv_type for trace $trace $from_type $to_type");
}
return $fn;
}
/**
* list claim records with Denied or Reject status in given file
*
* @param string
* @param string
*
* @return array
*/
function csv_denied_by_file($filetype, $filename, $trace='') {
//
$ret_ar = array();
$ft = csv_file_type($filetype);
if (strpos('|f997|f271|f277|f835', $ft)) {
$param = csv_parameters($ft);
$csv_file = $param['claims_csv'];
} else {
csv_edihist_log("csv_errors_by_file: incorrect file type $filetype");
return $ret_ar;
}
//
csv_edihist_log("csv_errors_by_file: $ft searching $filename with trace $trace");
//
if (($fh1 = fopen($csv_file, "r")) !== false) {
if ($ft == 'f835') {
while (($data = fgetcsv($fh1, 1024, ",")) !== false) {
// check filename, then status
if ($trace) {
if ($data[4] == $trace) {
if (!in_array($data[3], array('1', '2', '3', '19', '20', '21')) ) { $ret_ar[] = $data; }
}
} elseif ($data[5] == $filename) {
if (!in_array($data[3], array('1', '2', '3', '19', '20', '21')) ) { $ret_ar[] = $data; }
}
}
} elseif ($ft == 'f277') {
while (($data = fgetcsv($fh1, 1024, ",")) !== false) {
if ($data[5] == $filename) {
if ( !strpos('|A1|A2|A5', substr($data[3], 0, 2))) {
$ret_ar[] = $data;
}
}
}
} elseif (strpos('|f997|f999|f271', $ft)) {
while (($data = fgetcsv($fh1, 1024, ",")) !== false) {
if ($data[5] == $filename) {
if ($data[3] !== 'A') {
$ret_ar[] = $data;
}
}
}
} else {
csv_edihist_log("csv_errors_by_file: file type did not match $filetype");
}
fclose($fh1);
}
//
return $ret_ar;
}
/**
* A function to try and assure the pid-encounter is correctly parsed
*
* assume a format of pid-encounter, since that is sent in the OpenEMR x12 837
*
* @param string $pid_enctr the value from element CLM01
* return array array('pid' => $pid, 'enctr' => $enc)
*/
function csv_pid_enctr_parse( $pid_enctr ) {
// evaluate the patient account field
//
if (!$pid_enctr || !is_string($pid_enctr) ) {
csv_edihist_log("csv_pid_enctr_parse: invalid argument");
return false;
}
$pval = trim($pid_enctr);
if ( strpos($pval, '-') ) {
$pid = substr($pval, 0, strpos($pval, '-'));
$enc = substr($pval, strpos($pval, '-')+1);
} elseif ( ctype_digit($pval) ) {
if ( preg_match('/(19|20)\d{2}[01]\d{1}[0-3]\d{1}/', $pval) ) {
$enc = $pval;
} else {
$enc = ( strlen($pval) ) >= ENCOUNTER_MIN_DIGIT_LENGTH ? $pval : '';
$pid = '';
}
} elseif ( preg_match('/\D/', $pval, $match2, PREG_OFFSET_CAPTURE) ) {
$inv_split = (count($match2)) ? preg_split('/\D/', $pval, 2, PREG_SPLIT_NO_EMPTY) : false;
if ($inv_split) {
$pid = $inv_split[0];
$enc = $inv_split[1];
}
} else {
$enc = ( strlen($pval) ) >= ENCOUNTER_MIN_DIGIT_LENGTH ? $pval : '';
$pid = '';
}
return array('pid' => $pid, 'enctr' => $enc);
}
|
2597_0
|
crossvul
|
php
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
shell
|
# bash/zsh git prompt support
#
# Copyright (C) 2006,2007 Shawn O. Pearce <spearce@spearce.org>
# Distributed under the GNU General Public License, version 2.0.
#
# This script allows you to see repository status in your prompt.
#
# To enable:
#
# 1) Copy this file to somewhere (e.g. ~/.git-prompt.sh).
# 2) Add the following line to your .bashrc/.zshrc:
# source ~/.git-prompt.sh
# 3a) Change your PS1 to call __git_ps1 as
# command-substitution:
# Bash: PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
# ZSH: setopt PROMPT_SUBST ; PS1='[%n@%m %c$(__git_ps1 " (%s)")]\$ '
# the optional argument will be used as format string.
# 3b) Alternatively, for a slightly faster prompt, __git_ps1 can
# be used for PROMPT_COMMAND in Bash or for precmd() in Zsh
# with two parameters, <pre> and <post>, which are strings
# you would put in $PS1 before and after the status string
# generated by the git-prompt machinery. e.g.
# Bash: PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
# will show username, at-sign, host, colon, cwd, then
# various status string, followed by dollar and SP, as
# your prompt.
# ZSH: precmd () { __git_ps1 "%n" ":%~$ " "|%s" }
# will show username, pipe, then various status string,
# followed by colon, cwd, dollar and SP, as your prompt.
# Optionally, you can supply a third argument with a printf
# format string to finetune the output of the branch status
#
# The repository status will be displayed only if you are currently in a
# git repository. The %s token is the placeholder for the shown status.
#
# The prompt status always includes the current branch name.
#
# In addition, if you set GIT_PS1_SHOWDIRTYSTATE to a nonempty value,
# unstaged (*) and staged (+) changes will be shown next to the branch
# name. You can configure this per-repository with the
# bash.showDirtyState variable, which defaults to true once
# GIT_PS1_SHOWDIRTYSTATE is enabled.
#
# You can also see if currently something is stashed, by setting
# GIT_PS1_SHOWSTASHSTATE to a nonempty value. If something is stashed,
# then a '$' will be shown next to the branch name.
#
# If you would like to see if there're untracked files, then you can set
# GIT_PS1_SHOWUNTRACKEDFILES to a nonempty value. If there're untracked
# files, then a '%' will be shown next to the branch name. You can
# configure this per-repository with the bash.showUntrackedFiles
# variable, which defaults to true once GIT_PS1_SHOWUNTRACKEDFILES is
# enabled.
#
# If you would like to see the difference between HEAD and its upstream,
# set GIT_PS1_SHOWUPSTREAM="auto". A "<" indicates you are behind, ">"
# indicates you are ahead, "<>" indicates you have diverged and "="
# indicates that there is no difference. You can further control
# behaviour by setting GIT_PS1_SHOWUPSTREAM to a space-separated list
# of values:
#
# verbose show number of commits ahead/behind (+/-) upstream
# name if verbose, then also show the upstream abbrev name
# legacy don't use the '--count' option available in recent
# versions of git-rev-list
# git always compare HEAD to @{upstream}
# svn always compare HEAD to your SVN upstream
#
# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
# find one, or @{upstream} otherwise. Once you have set
# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
# setting the bash.showUpstream config variable.
#
# If you would like to see more information about the identity of
# commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
# to one of these values:
#
# contains relative to newer annotated tag (v1.6.3.2~35)
# branch relative to newer tag or branch (master~4)
# describe relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
# default exactly matching tag
#
# If you would like a colored hint about the current dirty state, set
# GIT_PS1_SHOWCOLORHINTS to a nonempty value. The colors are based on
# the colored output of "git status -sb" and are available only when
# using __git_ps1 for PROMPT_COMMAND or precmd.
# check whether printf supports -v
__git_printf_supports_v=
printf -v __git_printf_supports_v -- '%s' yes >/dev/null 2>&1
# stores the divergence from upstream in $p
# used by GIT_PS1_SHOWUPSTREAM
__git_ps1_show_upstream ()
{
local key value
local svn_remote svn_url_pattern count n
local upstream=git legacy="" verbose="" name=""
svn_remote=()
# get some config options from git-config
local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
while read -r key value; do
case "$key" in
bash.showupstream)
GIT_PS1_SHOWUPSTREAM="$value"
if [[ -z "${GIT_PS1_SHOWUPSTREAM}" ]]; then
p=""
return
fi
;;
svn-remote.*.url)
svn_remote[$((${#svn_remote[@]} + 1))]="$value"
svn_url_pattern="$svn_url_pattern\\|$value"
upstream=svn+git # default upstream is SVN if available, else git
;;
esac
done <<< "$output"
# parse configuration values
for option in ${GIT_PS1_SHOWUPSTREAM}; do
case "$option" in
git|svn) upstream="$option" ;;
verbose) verbose=1 ;;
legacy) legacy=1 ;;
name) name=1 ;;
esac
done
# Find our upstream
case "$upstream" in
git) upstream="@{upstream}" ;;
svn*)
# get the upstream from the "git-svn-id: ..." in a commit message
# (git-svn uses essentially the same procedure internally)
local -a svn_upstream
svn_upstream=($(git log --first-parent -1 \
--grep="^git-svn-id: \(${svn_url_pattern#??}\)" 2>/dev/null))
if [[ 0 -ne ${#svn_upstream[@]} ]]; then
svn_upstream=${svn_upstream[${#svn_upstream[@]} - 2]}
svn_upstream=${svn_upstream%@*}
local n_stop="${#svn_remote[@]}"
for ((n=1; n <= n_stop; n++)); do
svn_upstream=${svn_upstream#${svn_remote[$n]}}
done
if [[ -z "$svn_upstream" ]]; then
# default branch name for checkouts with no layout:
upstream=${GIT_SVN_ID:-git-svn}
else
upstream=${svn_upstream#/}
fi
elif [[ "svn+git" = "$upstream" ]]; then
upstream="@{upstream}"
fi
;;
esac
# Find how many commits we are ahead/behind our upstream
if [[ -z "$legacy" ]]; then
count="$(git rev-list --count --left-right \
"$upstream"...HEAD 2>/dev/null)"
else
# produce equivalent output to --count for older versions of git
local commits
if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
then
local commit behind=0 ahead=0
for commit in $commits
do
case "$commit" in
"<"*) ((behind++)) ;;
*) ((ahead++)) ;;
esac
done
count="$behind $ahead"
else
count=""
fi
fi
# calculate the result
if [[ -z "$verbose" ]]; then
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p="=" ;;
"0 "*) # ahead of upstream
p=">" ;;
*" 0") # behind upstream
p="<" ;;
*) # diverged from upstream
p="<>" ;;
esac
else
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p=" u=" ;;
"0 "*) # ahead of upstream
p=" u+${count#0 }" ;;
*" 0") # behind upstream
p=" u-${count% 0}" ;;
*) # diverged from upstream
p=" u+${count#* }-${count% *}" ;;
esac
if [[ -n "$count" && -n "$name" ]]; then
p="$p $(git rev-parse --abbrev-ref "$upstream" 2>/dev/null)"
fi
fi
}
# Helper function that is meant to be called from __git_ps1. It
# injects color codes into the appropriate gitstring variables used
# to build a gitstring.
__git_ps1_colorize_gitstring ()
{
if [[ -n ${ZSH_VERSION-} ]]; then
local c_red='%F{red}'
local c_green='%F{green}'
local c_lblue='%F{blue}'
local c_clear='%f'
else
# Using \[ and \] around colors is necessary to prevent
# issues with command line editing/browsing/completion!
local c_red='\[\e[31m\]'
local c_green='\[\e[32m\]'
local c_lblue='\[\e[1;34m\]'
local c_clear='\[\e[0m\]'
fi
local bad_color=$c_red
local ok_color=$c_green
local flags_color="$c_lblue"
local branch_color=""
if [ $detached = no ]; then
branch_color="$ok_color"
else
branch_color="$bad_color"
fi
c="$branch_color$c"
z="$c_clear$z"
if [ "$w" = "*" ]; then
w="$bad_color$w"
fi
if [ -n "$i" ]; then
i="$ok_color$i"
fi
if [ -n "$s" ]; then
s="$flags_color$s"
fi
if [ -n "$u" ]; then
u="$bad_color$u"
fi
r="$c_clear$r"
}
# __git_ps1 accepts 0 or 1 arguments (i.e., format string)
# when called from PS1 using command substitution
# in this mode it prints text to add to bash PS1 prompt (includes branch name)
#
# __git_ps1 requires 2 or 3 arguments when called from PROMPT_COMMAND (pc)
# in that case it _sets_ PS1. The arguments are parts of a PS1 string.
# when two arguments are given, the first is prepended and the second appended
# to the state string when assigned to PS1.
# The optional third parameter will be used as printf format string to further
# customize the output of the git-status string.
# In this mode you can request colored hints using GIT_PS1_SHOWCOLORHINTS=true
__git_ps1 ()
{
local pcmode=no
local detached=no
local ps1pc_start='\u@\h:\w '
local ps1pc_end='\$ '
local printf_format=' (%s)'
case "$#" in
2|3) pcmode=yes
ps1pc_start="$1"
ps1pc_end="$2"
printf_format="${3:-$printf_format}"
;;
0|1) printf_format="${1:-$printf_format}"
;;
*) return
;;
esac
local repo_info rev_parse_exit_code
repo_info="$(git rev-parse --git-dir --is-inside-git-dir \
--is-bare-repository --is-inside-work-tree \
--short HEAD 2>/dev/null)"
rev_parse_exit_code="$?"
if [ -z "$repo_info" ]; then
if [ $pcmode = yes ]; then
#In PC mode PS1 always needs to be set
PS1="$ps1pc_start$ps1pc_end"
fi
return
fi
local short_sha
if [ "$rev_parse_exit_code" = "0" ]; then
short_sha="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
fi
local inside_worktree="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local bare_repo="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local inside_gitdir="${repo_info##*$'\n'}"
local g="${repo_info%$'\n'*}"
local r=""
local b=""
local step=""
local total=""
if [ -d "$g/rebase-merge" ]; then
read b 2>/dev/null <"$g/rebase-merge/head-name"
read step 2>/dev/null <"$g/rebase-merge/msgnum"
read total 2>/dev/null <"$g/rebase-merge/end"
if [ -f "$g/rebase-merge/interactive" ]; then
r="|REBASE-i"
else
r="|REBASE-m"
fi
else
if [ -d "$g/rebase-apply" ]; then
read step 2>/dev/null <"$g/rebase-apply/next"
read total 2>/dev/null <"$g/rebase-apply/last"
if [ -f "$g/rebase-apply/rebasing" ]; then
read b 2>/dev/null <"$g/rebase-apply/head-name"
r="|REBASE"
elif [ -f "$g/rebase-apply/applying" ]; then
r="|AM"
else
r="|AM/REBASE"
fi
elif [ -f "$g/MERGE_HEAD" ]; then
r="|MERGING"
elif [ -f "$g/CHERRY_PICK_HEAD" ]; then
r="|CHERRY-PICKING"
elif [ -f "$g/REVERT_HEAD" ]; then
r="|REVERTING"
elif [ -f "$g/BISECT_LOG" ]; then
r="|BISECTING"
fi
if [ -n "$b" ]; then
:
elif [ -h "$g/HEAD" ]; then
# symlink symbolic ref
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
local head=""
if ! read head 2>/dev/null <"$g/HEAD"; then
if [ $pcmode = yes ]; then
PS1="$ps1pc_start$ps1pc_end"
fi
return
fi
# is it a symbolic ref?
b="${head#ref: }"
if [ "$head" = "$b" ]; then
detached=yes
b="$(
case "${GIT_PS1_DESCRIBE_STYLE-}" in
(contains)
git describe --contains HEAD ;;
(branch)
git describe --contains --all HEAD ;;
(describe)
git describe HEAD ;;
(* | default)
git describe --tags --exact-match HEAD ;;
esac 2>/dev/null)" ||
b="$short_sha..."
b="($b)"
fi
fi
fi
if [ -n "$step" ] && [ -n "$total" ]; then
r="$r $step/$total"
fi
local w=""
local i=""
local s=""
local u=""
local c=""
local p=""
if [ "true" = "$inside_gitdir" ]; then
if [ "true" = "$bare_repo" ]; then
c="BARE:"
else
b="GIT_DIR!"
fi
elif [ "true" = "$inside_worktree" ]; then
if [ -n "${GIT_PS1_SHOWDIRTYSTATE-}" ] &&
[ "$(git config --bool bash.showDirtyState)" != "false" ]
then
git diff --no-ext-diff --quiet --exit-code || w="*"
if [ -n "$short_sha" ]; then
git diff-index --cached --quiet HEAD -- || i="+"
else
i="#"
fi
fi
if [ -n "${GIT_PS1_SHOWSTASHSTATE-}" ] &&
[ -r "$g/refs/stash" ]; then
s="$"
fi
if [ -n "${GIT_PS1_SHOWUNTRACKEDFILES-}" ] &&
[ "$(git config --bool bash.showUntrackedFiles)" != "false" ] &&
git ls-files --others --exclude-standard --error-unmatch -- '*' >/dev/null 2>/dev/null
then
u="%${ZSH_VERSION+%}"
fi
if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
__git_ps1_show_upstream
fi
fi
local z="${GIT_PS1_STATESEPARATOR-" "}"
# NO color option unless in PROMPT_COMMAND mode
if [ $pcmode = yes ] && [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
__git_ps1_colorize_gitstring
fi
local f="$w$i$s$u"
local gitstring="$c${b##refs/heads/}${f:+$z$f}$r$p"
if [ $pcmode = yes ]; then
if [ "${__git_printf_supports_v-}" != yes ]; then
gitstring=$(printf -- "$printf_format" "$gitstring")
else
printf -v gitstring -- "$printf_format" "$gitstring"
fi
PS1="$ps1pc_start$gitstring$ps1pc_end"
else
printf -- "$printf_format" "$gitstring"
fi
}
|
# bash/zsh git prompt support
#
# Copyright (C) 2006,2007 Shawn O. Pearce <spearce@spearce.org>
# Distributed under the GNU General Public License, version 2.0.
#
# This script allows you to see repository status in your prompt.
#
# To enable:
#
# 1) Copy this file to somewhere (e.g. ~/.git-prompt.sh).
# 2) Add the following line to your .bashrc/.zshrc:
# source ~/.git-prompt.sh
# 3a) Change your PS1 to call __git_ps1 as
# command-substitution:
# Bash: PS1='[\u@\h \W$(__git_ps1 " (%s)")]\$ '
# ZSH: setopt PROMPT_SUBST ; PS1='[%n@%m %c$(__git_ps1 " (%s)")]\$ '
# the optional argument will be used as format string.
# 3b) Alternatively, for a slightly faster prompt, __git_ps1 can
# be used for PROMPT_COMMAND in Bash or for precmd() in Zsh
# with two parameters, <pre> and <post>, which are strings
# you would put in $PS1 before and after the status string
# generated by the git-prompt machinery. e.g.
# Bash: PROMPT_COMMAND='__git_ps1 "\u@\h:\w" "\\\$ "'
# will show username, at-sign, host, colon, cwd, then
# various status string, followed by dollar and SP, as
# your prompt.
# ZSH: precmd () { __git_ps1 "%n" ":%~$ " "|%s" }
# will show username, pipe, then various status string,
# followed by colon, cwd, dollar and SP, as your prompt.
# Optionally, you can supply a third argument with a printf
# format string to finetune the output of the branch status
#
# The repository status will be displayed only if you are currently in a
# git repository. The %s token is the placeholder for the shown status.
#
# The prompt status always includes the current branch name.
#
# In addition, if you set GIT_PS1_SHOWDIRTYSTATE to a nonempty value,
# unstaged (*) and staged (+) changes will be shown next to the branch
# name. You can configure this per-repository with the
# bash.showDirtyState variable, which defaults to true once
# GIT_PS1_SHOWDIRTYSTATE is enabled.
#
# You can also see if currently something is stashed, by setting
# GIT_PS1_SHOWSTASHSTATE to a nonempty value. If something is stashed,
# then a '$' will be shown next to the branch name.
#
# If you would like to see if there're untracked files, then you can set
# GIT_PS1_SHOWUNTRACKEDFILES to a nonempty value. If there're untracked
# files, then a '%' will be shown next to the branch name. You can
# configure this per-repository with the bash.showUntrackedFiles
# variable, which defaults to true once GIT_PS1_SHOWUNTRACKEDFILES is
# enabled.
#
# If you would like to see the difference between HEAD and its upstream,
# set GIT_PS1_SHOWUPSTREAM="auto". A "<" indicates you are behind, ">"
# indicates you are ahead, "<>" indicates you have diverged and "="
# indicates that there is no difference. You can further control
# behaviour by setting GIT_PS1_SHOWUPSTREAM to a space-separated list
# of values:
#
# verbose show number of commits ahead/behind (+/-) upstream
# name if verbose, then also show the upstream abbrev name
# legacy don't use the '--count' option available in recent
# versions of git-rev-list
# git always compare HEAD to @{upstream}
# svn always compare HEAD to your SVN upstream
#
# By default, __git_ps1 will compare HEAD to your SVN upstream if it can
# find one, or @{upstream} otherwise. Once you have set
# GIT_PS1_SHOWUPSTREAM, you can override it on a per-repository basis by
# setting the bash.showUpstream config variable.
#
# If you would like to see more information about the identity of
# commits checked out as a detached HEAD, set GIT_PS1_DESCRIBE_STYLE
# to one of these values:
#
# contains relative to newer annotated tag (v1.6.3.2~35)
# branch relative to newer tag or branch (master~4)
# describe relative to older annotated tag (v1.6.3.1-13-gdd42c2f)
# default exactly matching tag
#
# If you would like a colored hint about the current dirty state, set
# GIT_PS1_SHOWCOLORHINTS to a nonempty value. The colors are based on
# the colored output of "git status -sb" and are available only when
# using __git_ps1 for PROMPT_COMMAND or precmd.
# check whether printf supports -v
__git_printf_supports_v=
printf -v __git_printf_supports_v -- '%s' yes >/dev/null 2>&1
# stores the divergence from upstream in $p
# used by GIT_PS1_SHOWUPSTREAM
__git_ps1_show_upstream ()
{
local key value
local svn_remote svn_url_pattern count n
local upstream=git legacy="" verbose="" name=""
svn_remote=()
# get some config options from git-config
local output="$(git config -z --get-regexp '^(svn-remote\..*\.url|bash\.showupstream)$' 2>/dev/null | tr '\0\n' '\n ')"
while read -r key value; do
case "$key" in
bash.showupstream)
GIT_PS1_SHOWUPSTREAM="$value"
if [[ -z "${GIT_PS1_SHOWUPSTREAM}" ]]; then
p=""
return
fi
;;
svn-remote.*.url)
svn_remote[$((${#svn_remote[@]} + 1))]="$value"
svn_url_pattern="$svn_url_pattern\\|$value"
upstream=svn+git # default upstream is SVN if available, else git
;;
esac
done <<< "$output"
# parse configuration values
for option in ${GIT_PS1_SHOWUPSTREAM}; do
case "$option" in
git|svn) upstream="$option" ;;
verbose) verbose=1 ;;
legacy) legacy=1 ;;
name) name=1 ;;
esac
done
# Find our upstream
case "$upstream" in
git) upstream="@{upstream}" ;;
svn*)
# get the upstream from the "git-svn-id: ..." in a commit message
# (git-svn uses essentially the same procedure internally)
local -a svn_upstream
svn_upstream=($(git log --first-parent -1 \
--grep="^git-svn-id: \(${svn_url_pattern#??}\)" 2>/dev/null))
if [[ 0 -ne ${#svn_upstream[@]} ]]; then
svn_upstream=${svn_upstream[${#svn_upstream[@]} - 2]}
svn_upstream=${svn_upstream%@*}
local n_stop="${#svn_remote[@]}"
for ((n=1; n <= n_stop; n++)); do
svn_upstream=${svn_upstream#${svn_remote[$n]}}
done
if [[ -z "$svn_upstream" ]]; then
# default branch name for checkouts with no layout:
upstream=${GIT_SVN_ID:-git-svn}
else
upstream=${svn_upstream#/}
fi
elif [[ "svn+git" = "$upstream" ]]; then
upstream="@{upstream}"
fi
;;
esac
# Find how many commits we are ahead/behind our upstream
if [[ -z "$legacy" ]]; then
count="$(git rev-list --count --left-right \
"$upstream"...HEAD 2>/dev/null)"
else
# produce equivalent output to --count for older versions of git
local commits
if commits="$(git rev-list --left-right "$upstream"...HEAD 2>/dev/null)"
then
local commit behind=0 ahead=0
for commit in $commits
do
case "$commit" in
"<"*) ((behind++)) ;;
*) ((ahead++)) ;;
esac
done
count="$behind $ahead"
else
count=""
fi
fi
# calculate the result
if [[ -z "$verbose" ]]; then
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p="=" ;;
"0 "*) # ahead of upstream
p=">" ;;
*" 0") # behind upstream
p="<" ;;
*) # diverged from upstream
p="<>" ;;
esac
else
case "$count" in
"") # no upstream
p="" ;;
"0 0") # equal to upstream
p=" u=" ;;
"0 "*) # ahead of upstream
p=" u+${count#0 }" ;;
*" 0") # behind upstream
p=" u-${count% 0}" ;;
*) # diverged from upstream
p=" u+${count#* }-${count% *}" ;;
esac
if [[ -n "$count" && -n "$name" ]]; then
__git_ps1_upstream_name=$(git rev-parse \
--abbrev-ref "$upstream" 2>/dev/null)
if [ $pcmode = yes ]; then
# see the comments around the
# __git_ps1_branch_name variable below
p="$p \${__git_ps1_upstream_name}"
else
p="$p ${__git_ps1_upstream_name}"
# not needed anymore; keep user's
# environment clean
unset __git_ps1_upstream_name
fi
fi
fi
}
# Helper function that is meant to be called from __git_ps1. It
# injects color codes into the appropriate gitstring variables used
# to build a gitstring.
__git_ps1_colorize_gitstring ()
{
if [[ -n ${ZSH_VERSION-} ]]; then
local c_red='%F{red}'
local c_green='%F{green}'
local c_lblue='%F{blue}'
local c_clear='%f'
else
# Using \[ and \] around colors is necessary to prevent
# issues with command line editing/browsing/completion!
local c_red='\[\e[31m\]'
local c_green='\[\e[32m\]'
local c_lblue='\[\e[1;34m\]'
local c_clear='\[\e[0m\]'
fi
local bad_color=$c_red
local ok_color=$c_green
local flags_color="$c_lblue"
local branch_color=""
if [ $detached = no ]; then
branch_color="$ok_color"
else
branch_color="$bad_color"
fi
c="$branch_color$c"
z="$c_clear$z"
if [ "$w" = "*" ]; then
w="$bad_color$w"
fi
if [ -n "$i" ]; then
i="$ok_color$i"
fi
if [ -n "$s" ]; then
s="$flags_color$s"
fi
if [ -n "$u" ]; then
u="$bad_color$u"
fi
r="$c_clear$r"
}
# __git_ps1 accepts 0 or 1 arguments (i.e., format string)
# when called from PS1 using command substitution
# in this mode it prints text to add to bash PS1 prompt (includes branch name)
#
# __git_ps1 requires 2 or 3 arguments when called from PROMPT_COMMAND (pc)
# in that case it _sets_ PS1. The arguments are parts of a PS1 string.
# when two arguments are given, the first is prepended and the second appended
# to the state string when assigned to PS1.
# The optional third parameter will be used as printf format string to further
# customize the output of the git-status string.
# In this mode you can request colored hints using GIT_PS1_SHOWCOLORHINTS=true
__git_ps1 ()
{
local pcmode=no
local detached=no
local ps1pc_start='\u@\h:\w '
local ps1pc_end='\$ '
local printf_format=' (%s)'
case "$#" in
2|3) pcmode=yes
ps1pc_start="$1"
ps1pc_end="$2"
printf_format="${3:-$printf_format}"
;;
0|1) printf_format="${1:-$printf_format}"
;;
*) return
;;
esac
local repo_info rev_parse_exit_code
repo_info="$(git rev-parse --git-dir --is-inside-git-dir \
--is-bare-repository --is-inside-work-tree \
--short HEAD 2>/dev/null)"
rev_parse_exit_code="$?"
if [ -z "$repo_info" ]; then
if [ $pcmode = yes ]; then
#In PC mode PS1 always needs to be set
PS1="$ps1pc_start$ps1pc_end"
fi
return
fi
local short_sha
if [ "$rev_parse_exit_code" = "0" ]; then
short_sha="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
fi
local inside_worktree="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local bare_repo="${repo_info##*$'\n'}"
repo_info="${repo_info%$'\n'*}"
local inside_gitdir="${repo_info##*$'\n'}"
local g="${repo_info%$'\n'*}"
local r=""
local b=""
local step=""
local total=""
if [ -d "$g/rebase-merge" ]; then
read b 2>/dev/null <"$g/rebase-merge/head-name"
read step 2>/dev/null <"$g/rebase-merge/msgnum"
read total 2>/dev/null <"$g/rebase-merge/end"
if [ -f "$g/rebase-merge/interactive" ]; then
r="|REBASE-i"
else
r="|REBASE-m"
fi
else
if [ -d "$g/rebase-apply" ]; then
read step 2>/dev/null <"$g/rebase-apply/next"
read total 2>/dev/null <"$g/rebase-apply/last"
if [ -f "$g/rebase-apply/rebasing" ]; then
read b 2>/dev/null <"$g/rebase-apply/head-name"
r="|REBASE"
elif [ -f "$g/rebase-apply/applying" ]; then
r="|AM"
else
r="|AM/REBASE"
fi
elif [ -f "$g/MERGE_HEAD" ]; then
r="|MERGING"
elif [ -f "$g/CHERRY_PICK_HEAD" ]; then
r="|CHERRY-PICKING"
elif [ -f "$g/REVERT_HEAD" ]; then
r="|REVERTING"
elif [ -f "$g/BISECT_LOG" ]; then
r="|BISECTING"
fi
if [ -n "$b" ]; then
:
elif [ -h "$g/HEAD" ]; then
# symlink symbolic ref
b="$(git symbolic-ref HEAD 2>/dev/null)"
else
local head=""
if ! read head 2>/dev/null <"$g/HEAD"; then
if [ $pcmode = yes ]; then
PS1="$ps1pc_start$ps1pc_end"
fi
return
fi
# is it a symbolic ref?
b="${head#ref: }"
if [ "$head" = "$b" ]; then
detached=yes
b="$(
case "${GIT_PS1_DESCRIBE_STYLE-}" in
(contains)
git describe --contains HEAD ;;
(branch)
git describe --contains --all HEAD ;;
(describe)
git describe HEAD ;;
(* | default)
git describe --tags --exact-match HEAD ;;
esac 2>/dev/null)" ||
b="$short_sha..."
b="($b)"
fi
fi
fi
if [ -n "$step" ] && [ -n "$total" ]; then
r="$r $step/$total"
fi
local w=""
local i=""
local s=""
local u=""
local c=""
local p=""
if [ "true" = "$inside_gitdir" ]; then
if [ "true" = "$bare_repo" ]; then
c="BARE:"
else
b="GIT_DIR!"
fi
elif [ "true" = "$inside_worktree" ]; then
if [ -n "${GIT_PS1_SHOWDIRTYSTATE-}" ] &&
[ "$(git config --bool bash.showDirtyState)" != "false" ]
then
git diff --no-ext-diff --quiet --exit-code || w="*"
if [ -n "$short_sha" ]; then
git diff-index --cached --quiet HEAD -- || i="+"
else
i="#"
fi
fi
if [ -n "${GIT_PS1_SHOWSTASHSTATE-}" ] &&
[ -r "$g/refs/stash" ]; then
s="$"
fi
if [ -n "${GIT_PS1_SHOWUNTRACKEDFILES-}" ] &&
[ "$(git config --bool bash.showUntrackedFiles)" != "false" ] &&
git ls-files --others --exclude-standard --error-unmatch -- '*' >/dev/null 2>/dev/null
then
u="%${ZSH_VERSION+%}"
fi
if [ -n "${GIT_PS1_SHOWUPSTREAM-}" ]; then
__git_ps1_show_upstream
fi
fi
local z="${GIT_PS1_STATESEPARATOR-" "}"
# NO color option unless in PROMPT_COMMAND mode
if [ $pcmode = yes ] && [ -n "${GIT_PS1_SHOWCOLORHINTS-}" ]; then
__git_ps1_colorize_gitstring
fi
b=${b##refs/heads/}
if [ $pcmode = yes ]; then
# In pcmode (and only pcmode) the contents of
# $gitstring are subject to expansion by the shell.
# Avoid putting the raw ref name in the prompt to
# protect the user from arbitrary code execution via
# specially crafted ref names (e.g., a ref named
# '$(IFS=_;cmd=sudo_rm_-rf_/;$cmd)' would execute
# 'sudo rm -rf /' when the prompt is drawn). Instead,
# put the ref name in a new global variable (in the
# __git_ps1_* namespace to avoid colliding with the
# user's environment) and reference that variable from
# PS1.
__git_ps1_branch_name=$b
# note that the $ is escaped -- the variable will be
# expanded later (when it's time to draw the prompt)
b="\${__git_ps1_branch_name}"
fi
local f="$w$i$s$u"
local gitstring="$c$b${f:+$z$f}$r$p"
if [ $pcmode = yes ]; then
if [ "${__git_printf_supports_v-}" != yes ]; then
gitstring=$(printf -- "$printf_format" "$gitstring")
else
printf -v gitstring -- "$printf_format" "$gitstring"
fi
PS1="$ps1pc_start$gitstring$ps1pc_end"
else
printf -- "$printf_format" "$gitstring"
fi
}
|
2439_0
|
crossvul
|
sh
|
CWE-116
|
Improper Encoding or Escaping of Output - Improper encoding or escaping can allow attackers to change the commands that are sent to another component, inserting malicious commands instead.
|
shell
|
#!/bin/sh
#
# Copyright (c) 2012 SZEDER Gábor
#
test_description='test git-specific bash prompt functions'
. ./lib-bash.sh
. "$GIT_BUILD_DIR/contrib/completion/git-prompt.sh"
actual="$TRASH_DIRECTORY/actual"
c_red='\\[\\e[31m\\]'
c_green='\\[\\e[32m\\]'
c_lblue='\\[\\e[1;34m\\]'
c_clear='\\[\\e[0m\\]'
test_expect_success 'setup for prompt tests' '
git init otherrepo &&
echo 1 >file &&
git add file &&
test_tick &&
git commit -m initial &&
git tag -a -m msg1 t1 &&
git checkout -b b1 &&
echo 2 >file &&
git commit -m "second b1" file &&
echo 3 >file &&
git commit -m "third b1" file &&
git tag -a -m msg2 t2 &&
git checkout -b b2 master &&
echo 0 >file &&
git commit -m "second b2" file &&
echo 00 >file &&
git commit -m "another b2" file &&
echo 000 >file &&
git commit -m "yet another b2" file &&
git checkout master
'
test_expect_success 'prompt - branch name' '
printf " (master)" >expected &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success SYMLINKS 'prompt - branch name - symlink symref' '
printf " (master)" >expected &&
test_when_finished "git checkout master" &&
test_config core.preferSymlinkRefs true &&
git checkout master &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - unborn branch' '
printf " (unborn)" >expected &&
git checkout --orphan unborn &&
test_when_finished "git checkout master" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
repo_with_newline='repo
with
newline'
if mkdir "$repo_with_newline" 2>/dev/null
then
test_set_prereq FUNNYNAMES
else
say 'Your filesystem does not allow newlines in filenames.'
fi
test_expect_success FUNNYNAMES 'prompt - with newline in path' '
printf " (master)" >expected &&
git init "$repo_with_newline" &&
test_when_finished "rm -rf \"$repo_with_newline\"" &&
mkdir "$repo_with_newline"/subdir &&
(
cd "$repo_with_newline/subdir" &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - detached head' '
printf " ((%s...))" $(git log -1 --format="%h" --abbrev=13 b1^) >expected &&
test_config core.abbrev 13 &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - contains' '
printf " ((t2~1))" >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_DESCRIBE_STYLE=contains &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - branch' '
printf " ((b1~1))" >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_DESCRIBE_STYLE=branch &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - describe' '
printf " ((t1-1-g%s))" $(git log -1 --format="%h" b1^) >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_DESCRIBE_STYLE=describe &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - default' '
printf " ((t2))" >expected &&
git checkout --detach b1 &&
test_when_finished "git checkout master" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - inside .git directory' '
printf " (GIT_DIR!)" >expected &&
(
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - deep inside .git directory' '
printf " (GIT_DIR!)" >expected &&
(
cd .git/refs/heads &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - inside bare repository' '
printf " (BARE:master)" >expected &&
git init --bare bare.git &&
test_when_finished "rm -rf bare.git" &&
(
cd bare.git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - interactive rebase' '
printf " (b1|REBASE-i 2/3)" >expected
write_script fake_editor.sh <<-\EOF &&
echo "exec echo" >"$1"
echo "edit $(git log -1 --format="%h")" >>"$1"
echo "exec echo" >>"$1"
EOF
test_when_finished "rm -f fake_editor.sh" &&
test_set_editor "$TRASH_DIRECTORY/fake_editor.sh" &&
git checkout b1 &&
test_when_finished "git checkout master" &&
git rebase -i HEAD^ &&
test_when_finished "git rebase --abort"
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - rebase merge' '
printf " (b2|REBASE-m 1/3)" >expected &&
git checkout b2 &&
test_when_finished "git checkout master" &&
test_must_fail git rebase --merge b1 b2 &&
test_when_finished "git rebase --abort" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - rebase' '
printf " (b2|REBASE 1/3)" >expected &&
git checkout b2 &&
test_when_finished "git checkout master" &&
test_must_fail git rebase b1 b2 &&
test_when_finished "git rebase --abort" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - merge' '
printf " (b1|MERGING)" >expected &&
git checkout b1 &&
test_when_finished "git checkout master" &&
test_must_fail git merge b2 &&
test_when_finished "git reset --hard" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - cherry-pick' '
printf " (master|CHERRY-PICKING)" >expected &&
test_must_fail git cherry-pick b1 &&
test_when_finished "git reset --hard" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bisect' '
printf " (master|BISECTING)" >expected &&
git bisect start &&
test_when_finished "git bisect reset" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - clean' '
printf " (master)" >expected &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - dirty worktree' '
printf " (master *)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - dirty index' '
printf " (master +)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - dirty index and worktree' '
printf " (master *+)" >expected &&
echo "dirty index" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
echo "dirty worktree" >file &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - before root commit' '
printf " (master #)" >expected &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
cd otherrepo &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable unset with config disabled' '
printf " (master)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState false &&
(
sane_unset GIT_PS1_SHOWDIRTYSTATE &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable unset with config enabled' '
printf " (master)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState true &&
(
sane_unset GIT_PS1_SHOWDIRTYSTATE &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable set with config disabled' '
printf " (master)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState false &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable set with config enabled' '
printf " (master *)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState true &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - not shown inside .git directory' '
printf " (GIT_DIR!)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - stash status indicator - no stash' '
printf " (master)" >expected &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - stash status indicator - stash' '
printf " (master $)" >expected &&
echo 2 >file &&
git stash &&
test_when_finished "git stash drop" &&
git pack-refs --all &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - stash status indicator - not shown inside .git directory' '
printf " (GIT_DIR!)" >expected &&
echo 2 >file &&
git stash &&
test_when_finished "git stash drop" &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - no untracked files' '
printf " (master)" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
cd otherrepo &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - untracked files' '
printf " (master %%)" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable unset with config disabled' '
printf " (master)" >expected &&
test_config bash.showUntrackedFiles false &&
(
sane_unset GIT_PS1_SHOWUNTRACKEDFILES &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable unset with config enabled' '
printf " (master)" >expected &&
test_config bash.showUntrackedFiles true &&
(
sane_unset GIT_PS1_SHOWUNTRACKEDFILES &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable set with config disabled' '
printf " (master)" >expected &&
test_config bash.showUntrackedFiles false &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable set with config enabled' '
printf " (master %%)" >expected &&
test_config bash.showUntrackedFiles true &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - not shown inside .git directory' '
printf " (GIT_DIR!)" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - format string starting with dash' '
printf -- "-master" >expected &&
__git_ps1 "-%s" >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - pc mode' '
printf "BEFORE: (master):AFTER" >expected &&
printf "" >expected_output &&
(
__git_ps1 "BEFORE:" ":AFTER" >"$actual" &&
test_cmp expected_output "$actual" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - branch name' '
printf "BEFORE: (${c_green}master${c_clear}):AFTER" >expected &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" >"$actual"
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - detached head' '
printf "BEFORE: (${c_red}(%s...)${c_clear}):AFTER" $(git log -1 --format="%h" b1^) >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - dirty worktree' '
printf "BEFORE: (${c_green}master${c_clear} ${c_red}*${c_clear}):AFTER" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - dirty index' '
printf "BEFORE: (${c_green}master${c_clear} ${c_green}+${c_clear}):AFTER" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - dirty index and worktree' '
printf "BEFORE: (${c_green}master${c_clear} ${c_red}*${c_green}+${c_clear}):AFTER" >expected &&
echo "dirty index" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
echo "dirty worktree" >file &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - before root commit' '
printf "BEFORE: (${c_green}master${c_clear} ${c_green}#${c_clear}):AFTER" >expected &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
cd otherrepo &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - inside .git directory' '
printf "BEFORE: (${c_green}GIT_DIR!${c_clear}):AFTER" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
cd .git &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - stash status indicator' '
printf "BEFORE: (${c_green}master${c_clear} ${c_lblue}\$${c_clear}):AFTER" >expected &&
echo 2 >file &&
git stash &&
test_when_finished "git stash drop" &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - untracked files status indicator' '
printf "BEFORE: (${c_green}master${c_clear} ${c_red}%%${c_clear}):AFTER" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - zsh color pc mode' '
printf "BEFORE: (%%F{green}master%%f):AFTER" >expected &&
(
ZSH_VERSION=5.0.0 &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" >"$actual"
printf "%s" "$PS1" >"$actual"
) &&
test_cmp expected "$actual"
'
test_done
|
#!/bin/sh
#
# Copyright (c) 2012 SZEDER Gábor
#
test_description='test git-specific bash prompt functions'
. ./lib-bash.sh
. "$GIT_BUILD_DIR/contrib/completion/git-prompt.sh"
actual="$TRASH_DIRECTORY/actual"
c_red='\\[\\e[31m\\]'
c_green='\\[\\e[32m\\]'
c_lblue='\\[\\e[1;34m\\]'
c_clear='\\[\\e[0m\\]'
test_expect_success 'setup for prompt tests' '
git init otherrepo &&
echo 1 >file &&
git add file &&
test_tick &&
git commit -m initial &&
git tag -a -m msg1 t1 &&
git checkout -b b1 &&
echo 2 >file &&
git commit -m "second b1" file &&
echo 3 >file &&
git commit -m "third b1" file &&
git tag -a -m msg2 t2 &&
git checkout -b b2 master &&
echo 0 >file &&
git commit -m "second b2" file &&
echo 00 >file &&
git commit -m "another b2" file &&
echo 000 >file &&
git commit -m "yet another b2" file &&
git checkout master
'
test_expect_success 'prompt - branch name' '
printf " (master)" >expected &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success SYMLINKS 'prompt - branch name - symlink symref' '
printf " (master)" >expected &&
test_when_finished "git checkout master" &&
test_config core.preferSymlinkRefs true &&
git checkout master &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - unborn branch' '
printf " (unborn)" >expected &&
git checkout --orphan unborn &&
test_when_finished "git checkout master" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
repo_with_newline='repo
with
newline'
if mkdir "$repo_with_newline" 2>/dev/null
then
test_set_prereq FUNNYNAMES
else
say 'Your filesystem does not allow newlines in filenames.'
fi
test_expect_success FUNNYNAMES 'prompt - with newline in path' '
printf " (master)" >expected &&
git init "$repo_with_newline" &&
test_when_finished "rm -rf \"$repo_with_newline\"" &&
mkdir "$repo_with_newline"/subdir &&
(
cd "$repo_with_newline/subdir" &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - detached head' '
printf " ((%s...))" $(git log -1 --format="%h" --abbrev=13 b1^) >expected &&
test_config core.abbrev 13 &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - contains' '
printf " ((t2~1))" >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_DESCRIBE_STYLE=contains &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - branch' '
printf " ((b1~1))" >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_DESCRIBE_STYLE=branch &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - describe' '
printf " ((t1-1-g%s))" $(git log -1 --format="%h" b1^) >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_DESCRIBE_STYLE=describe &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - describe detached head - default' '
printf " ((t2))" >expected &&
git checkout --detach b1 &&
test_when_finished "git checkout master" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - inside .git directory' '
printf " (GIT_DIR!)" >expected &&
(
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - deep inside .git directory' '
printf " (GIT_DIR!)" >expected &&
(
cd .git/refs/heads &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - inside bare repository' '
printf " (BARE:master)" >expected &&
git init --bare bare.git &&
test_when_finished "rm -rf bare.git" &&
(
cd bare.git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - interactive rebase' '
printf " (b1|REBASE-i 2/3)" >expected
write_script fake_editor.sh <<-\EOF &&
echo "exec echo" >"$1"
echo "edit $(git log -1 --format="%h")" >>"$1"
echo "exec echo" >>"$1"
EOF
test_when_finished "rm -f fake_editor.sh" &&
test_set_editor "$TRASH_DIRECTORY/fake_editor.sh" &&
git checkout b1 &&
test_when_finished "git checkout master" &&
git rebase -i HEAD^ &&
test_when_finished "git rebase --abort"
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - rebase merge' '
printf " (b2|REBASE-m 1/3)" >expected &&
git checkout b2 &&
test_when_finished "git checkout master" &&
test_must_fail git rebase --merge b1 b2 &&
test_when_finished "git rebase --abort" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - rebase' '
printf " (b2|REBASE 1/3)" >expected &&
git checkout b2 &&
test_when_finished "git checkout master" &&
test_must_fail git rebase b1 b2 &&
test_when_finished "git rebase --abort" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - merge' '
printf " (b1|MERGING)" >expected &&
git checkout b1 &&
test_when_finished "git checkout master" &&
test_must_fail git merge b2 &&
test_when_finished "git reset --hard" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - cherry-pick' '
printf " (master|CHERRY-PICKING)" >expected &&
test_must_fail git cherry-pick b1 &&
test_when_finished "git reset --hard" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bisect' '
printf " (master|BISECTING)" >expected &&
git bisect start &&
test_when_finished "git bisect reset" &&
__git_ps1 >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - clean' '
printf " (master)" >expected &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - dirty worktree' '
printf " (master *)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - dirty index' '
printf " (master +)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - dirty index and worktree' '
printf " (master *+)" >expected &&
echo "dirty index" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
echo "dirty worktree" >file &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - before root commit' '
printf " (master #)" >expected &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
cd otherrepo &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable unset with config disabled' '
printf " (master)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState false &&
(
sane_unset GIT_PS1_SHOWDIRTYSTATE &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable unset with config enabled' '
printf " (master)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState true &&
(
sane_unset GIT_PS1_SHOWDIRTYSTATE &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable set with config disabled' '
printf " (master)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState false &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - shell variable set with config enabled' '
printf " (master *)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
test_config bash.showDirtyState true &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - dirty status indicator - not shown inside .git directory' '
printf " (GIT_DIR!)" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - stash status indicator - no stash' '
printf " (master)" >expected &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - stash status indicator - stash' '
printf " (master $)" >expected &&
echo 2 >file &&
git stash &&
test_when_finished "git stash drop" &&
git pack-refs --all &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - stash status indicator - not shown inside .git directory' '
printf " (GIT_DIR!)" >expected &&
echo 2 >file &&
git stash &&
test_when_finished "git stash drop" &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - no untracked files' '
printf " (master)" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
cd otherrepo &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - untracked files' '
printf " (master %%)" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable unset with config disabled' '
printf " (master)" >expected &&
test_config bash.showUntrackedFiles false &&
(
sane_unset GIT_PS1_SHOWUNTRACKEDFILES &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable unset with config enabled' '
printf " (master)" >expected &&
test_config bash.showUntrackedFiles true &&
(
sane_unset GIT_PS1_SHOWUNTRACKEDFILES &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable set with config disabled' '
printf " (master)" >expected &&
test_config bash.showUntrackedFiles false &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - shell variable set with config enabled' '
printf " (master %%)" >expected &&
test_config bash.showUntrackedFiles true &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - untracked files status indicator - not shown inside .git directory' '
printf " (GIT_DIR!)" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
cd .git &&
__git_ps1 >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - format string starting with dash' '
printf -- "-master" >expected &&
__git_ps1 "-%s" >"$actual" &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - pc mode' '
printf "BEFORE: (\${__git_ps1_branch_name}):AFTER\\nmaster" >expected &&
printf "" >expected_output &&
(
__git_ps1 "BEFORE:" ":AFTER" >"$actual" &&
test_cmp expected_output "$actual" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - branch name' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear}):AFTER\\nmaster" >expected &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" >"$actual"
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - detached head' '
printf "BEFORE: (${c_red}\${__git_ps1_branch_name}${c_clear}):AFTER\\n(%s...)" $(git log -1 --format="%h" b1^) >expected &&
git checkout b1^ &&
test_when_finished "git checkout master" &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - dirty worktree' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear} ${c_red}*${c_clear}):AFTER\\nmaster" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - dirty index' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear} ${c_green}+${c_clear}):AFTER\\nmaster" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - dirty index and worktree' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear} ${c_red}*${c_green}+${c_clear}):AFTER\\nmaster" >expected &&
echo "dirty index" >file &&
test_when_finished "git reset --hard" &&
git add -u &&
echo "dirty worktree" >file &&
(
GIT_PS1_SHOWCOLORHINTS=y &&
GIT_PS1_SHOWDIRTYSTATE=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - dirty status indicator - before root commit' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear} ${c_green}#${c_clear}):AFTER\\nmaster" >expected &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
cd otherrepo &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - inside .git directory' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear}):AFTER\\nGIT_DIR!" >expected &&
echo "dirty" >file &&
test_when_finished "git reset --hard" &&
(
GIT_PS1_SHOWDIRTYSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
cd .git &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - stash status indicator' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear} ${c_lblue}\$${c_clear}):AFTER\\nmaster" >expected &&
echo 2 >file &&
git stash &&
test_when_finished "git stash drop" &&
(
GIT_PS1_SHOWSTASHSTATE=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - bash color pc mode - untracked files status indicator' '
printf "BEFORE: (${c_green}\${__git_ps1_branch_name}${c_clear} ${c_red}%%${c_clear}):AFTER\\nmaster" >expected &&
(
GIT_PS1_SHOWUNTRACKEDFILES=y &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" &&
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_expect_success 'prompt - zsh color pc mode' '
printf "BEFORE: (%%F{green}\${__git_ps1_branch_name}%%f):AFTER\\nmaster" >expected &&
(
ZSH_VERSION=5.0.0 &&
GIT_PS1_SHOWCOLORHINTS=y &&
__git_ps1 "BEFORE:" ":AFTER" >"$actual"
printf "%s\\n%s" "$PS1" "${__git_ps1_branch_name}" >"$actual"
) &&
test_cmp expected "$actual"
'
test_done
|
2439_1
|
crossvul
|
sh
|
CWE-1187
|
DEPRECATED: Use of Uninitialized Resource - This entry has been deprecated because it was a duplicate of CWE-908.
|
cpp
|
/* -*-mode:c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/**
Copyright (c) 2006...2016, Matthias Stirner and HTW Aalen University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
/*
This file contains special classes for bitwise
reading and writing of arrays
*/
#include "../../vp8/util/memory.hh"
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <algorithm>
#include <assert.h>
#include "bitops.hh"
#define BUFFER_SIZE 1024 * 1024
/* -----------------------------------------------
constructor for abitreader class
----------------------------------------------- */
abitreader::abitreader( unsigned char* array, int size )
{
cbyte2 = 0;
cbit2 = 0;
data2 = array;
eof = false;
lbyte = size;
buf = 0;
}
/* -----------------------------------------------
destructor for abitreader class
----------------------------------------------- */
abitreader::~abitreader( void )
{
}
/* -----------------------------------------------
constructor for abitwriter class
----------------------------------------------- */
abitwriter::abitwriter( int size , int max_file_size)
{
size_bound = max_file_size;
if (size_bound) {
size_bound += 8; // 64 bits of padding on the end
}
fillbit = 1;
adds = 65536;
cbyte2 = 0;
cbit2 = 64;
buf = 0;
error = false;
fmem = true;
dsize = ( size > 0 ) ? size : adds;
data2 = ( unsigned char* ) custom_calloc (dsize);
if ( data2 == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
// for ( int i = 0; i < dsize; i++ ) data[i] = 0;
}
/* -----------------------------------------------
destructor for abitwriter class
----------------------------------------------- */
abitwriter::~abitwriter( void )
{
// free memory if pointer was not given out
if ( fmem ) custom_free( data2 );
}
void aligned_dealloc(unsigned char *data) {
if (!data) return;
data -= data[-1];
custom_free(data);
}
unsigned char *aligned_alloc(size_t dsize) {
unsigned char*data = (unsigned char*) custom_malloc( dsize + 16);
if (data) {
size_t rem = (size_t)(data - 0) & 0xf;
if (rem) {
data += rem;
data[-1] = rem;
} else {
data += 0x10;
data[-1] = 0x10;
}
}
return data;
}
/* -----------------------------------------------
constructor for abytewriter class
----------------------------------------------- */
abytewriter::abytewriter( int size )
{
adds = 65536;
cbyte = 0;
error = false;
fmem = true;
dsize = ( size > 0 ) ? size : adds;
data = aligned_alloc(dsize);
if ( data == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
}
/* -----------------------------------------------
destructor for abytewriter class
----------------------------------------------- */
abytewriter::~abytewriter( void )
{
// free data if pointer is not read
if (fmem && data) aligned_dealloc(data);
}
/* -----------------------------------------------
writes 1 byte to abytewriter
----------------------------------------------- */
void abytewriter::write( unsigned char byte )
{
// safety check for error
if ( error ) return;
// test if pointer beyond flush threshold
if ( cbyte >= ( dsize - 2 ) ) {
if (data) {
unsigned char * newData = aligned_alloc(dsize * 2);
memcpy(newData, data, dsize);
dsize *= 2;
aligned_dealloc(data);
data = newData;
}
if ( data == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
}
// write data
data[ cbyte++ ] = byte;
}
/* -----------------------------------------------
writes n byte to abytewriter
----------------------------------------------- */
void abytewriter::write_n( unsigned char* byte, int n )
{
// safety check for error
if ( error ) return;
// make sure that pointer doesn't get beyond flush threshold
while ( ( cbyte + n ) >= ( dsize - 2 ) ) {
unsigned char * newData = aligned_alloc(dsize * 2);
memcpy(newData, data, dsize);
dsize *= 2;
aligned_dealloc(data);
data = newData;
if ( data == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
}
// copy data from array
while ( n-- > 0 )
data[ cbyte++ ] = *(byte++);
}
/* -----------------------------------------------
gets data array from abytewriter
----------------------------------------------- */
unsigned char* abytewriter::getptr_aligned( void )
{
// forbid freeing memory
fmem = false;
return data;
}
/* -----------------------------------------------
peeks into data array from abytewriter
----------------------------------------------- */
unsigned char* abytewriter::peekptr_aligned( void )
{
return data;
}
/* -----------------------------------------------
gets size of data array from abytewriter
----------------------------------------------- */
int abytewriter::getpos( void )
{
return cbyte;
}
/* -----------------------------------------------
reset without realloc
----------------------------------------------- */
void abytewriter::reset( void )
{
// set position of current byte
cbyte = 0;
}
/* -----------------------------------------------
constructor for abytewriter class
----------------------------------------------- */
abytereader::abytereader( unsigned char* array, int size )
{
cbyte = 0;
eof = false;
data = array;
lbyte = size;
if ( ( data == NULL ) || ( lbyte == 0 ) )
eof = true;
}
/* -----------------------------------------------
destructor for abytewriter class
----------------------------------------------- */
abytereader::~abytereader( void )
{
}
/* -----------------------------------------------
reads 1 byte from abytereader
----------------------------------------------- */
int abytereader::read( unsigned char* byte )
{
if ( cbyte >= lbyte ) {
cbyte = lbyte;
eof = true;
return 0;
}
else {
*byte = data[ cbyte++ ];
return 1;
}
}
/* -----------------------------------------------
reads n bytes from abytereader
----------------------------------------------- */
int abytereader::read_n( unsigned char* byte, int n )
{
int nl = lbyte - cbyte;
int i;
if ( nl < n ) {
for ( i = 0; i < nl; i++ )
byte[ i ] = data[ cbyte + i ];
cbyte = lbyte;
eof = true;
return nl;
}
else {
for ( i = 0; i < n; i++ )
byte[ i ] = data[ cbyte + i ];
cbyte += n;
return n;
}
}
/* -----------------------------------------------
go to position in data
----------------------------------------------- */
void abytereader::seek( int pos )
{
if ( pos >= lbyte ) {
cbyte = lbyte;
eof = true;
}
else {
cbyte = pos;
eof = false;
}
}
/* -----------------------------------------------
gets size of current data
----------------------------------------------- */
int abytereader::getsize( void )
{
return lbyte;
}
/* -----------------------------------------------
gets current position from abytereader
----------------------------------------------- */
int abytereader::getpos( void )
{
return cbyte;
}
bounded_iostream::bounded_iostream(Sirikata::DecoderWriter *w,
const std::function<void(Sirikata::DecoderWriter*, size_t)> &size_callback,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: parent(w), err(Sirikata::JpegError::nil()) {
this->size_callback = size_callback;
buffer_position = 0;
byte_position = 0;
num_bytes_attempted_to_write = 0;
set_bound(0);
}
void bounded_iostream::call_size_callback(size_t size) {
size_callback(parent, size);
}
bool bounded_iostream::chkerr() {
return err != Sirikata::JpegError::nil();
}
void bounded_iostream::set_bound(size_t bound) {
flush();
if (num_bytes_attempted_to_write > byte_bound) {
num_bytes_attempted_to_write = byte_bound;
}
byte_bound = bound;
}
void bounded_iostream::flush() {
if (buffer_position) {
write_no_buffer(buffer, buffer_position);
buffer_position = 0;
}
}
void bounded_iostream::close() {
flush();
parent->Close();
}
unsigned int bounded_iostream::write_no_buffer(const void *from, size_t bytes_to_write) {
//return iostream::write(from,tpsize,dtsize);
std::pair<unsigned int, Sirikata::JpegError> retval;
if (byte_bound != 0 && byte_position + bytes_to_write > byte_bound) {
size_t real_bytes_to_write = byte_bound - byte_position;
byte_position += real_bytes_to_write;
retval = parent->Write(reinterpret_cast<const unsigned char*>(from), real_bytes_to_write);
if (retval.first < real_bytes_to_write) {
err = retval.second;
return retval.first;
}
return bytes_to_write; // pretend we wrote it all
}
size_t total = bytes_to_write;
retval = parent->Write(reinterpret_cast<const unsigned char*>(from), total);
unsigned int written = retval.first;
byte_position += written;
if (written < total ) {
err = retval.second;
return written;
}
return bytes_to_write;
}
unsigned int bounded_iostream::getsize() {
return byte_position;
}
bounded_iostream::~bounded_iostream(){
}
ibytestreamcopier::ibytestreamcopier(Sirikata::DecoderReader *p, unsigned int byte_offset,
unsigned int max_file_size,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: ibytestream(p, byte_offset, alloc), side_channel(alloc) {
if (max_file_size) {
side_channel.reserve(max_file_size);
}
}
bool ibytestreamcopier::read_byte(unsigned char *output) {
bool retval = ibytestream::read_byte(output);
if (retval) {
side_channel.push_back(*output);
}
return retval;
}
unsigned int ibytestreamcopier::read(unsigned char *output, unsigned int size) {
unsigned int retval = ibytestream::read(output, size);
if (retval > 0) {
side_channel.insert(side_channel.end(), output, output + retval);
}
return retval;
}
ibytestream::ibytestream(Sirikata::DecoderReader *p, unsigned int byte_offset,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: parent(p) {
bytes_read = byte_offset;
}
unsigned int ibytestream::read(unsigned char*output, unsigned int size) {
dev_assert(size);
if (size == 1) {
return read_byte(output) ? 1 : 0;
}
int retval = IOUtil::ReadFull(parent, output, size);
bytes_read += retval;
static_assert(sizeof(last_read) == 2, "Last read must hold full jpeg huffman");
if (retval >= 2) {
memcpy(last_read, output + size - sizeof(last_read), sizeof(last_read));
} else if (retval) {
last_read[0] = last_read[1];
last_read[1] = *output;
}
return retval;
}
bool ibytestream::read_byte(unsigned char *output) {
unsigned int retval = parent->Read(output, 1).first;
if (retval != 0) {
last_read[0] = last_read[1];
last_read[1] = *output;
bytes_read += 1;
return true;
}
return false;
}
|
/* -*-mode:c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
/**
Copyright (c) 2006...2016, Matthias Stirner and HTW Aalen University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**/
/*
This file contains special classes for bitwise
reading and writing of arrays
*/
#include "../../vp8/util/memory.hh"
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <algorithm>
#include <assert.h>
#include "bitops.hh"
#define BUFFER_SIZE 1024 * 1024
/* -----------------------------------------------
constructor for abitreader class
----------------------------------------------- */
abitreader::abitreader( unsigned char* array, int size )
{
cbyte2 = 0;
cbit2 = 0;
data2 = array;
eof = false;
lbyte = size;
buf = 0;
}
/* -----------------------------------------------
destructor for abitreader class
----------------------------------------------- */
abitreader::~abitreader( void )
{
}
/* -----------------------------------------------
constructor for abitwriter class
----------------------------------------------- */
abitwriter::abitwriter( int size , int max_file_size)
{
size_bound = max_file_size;
if (size_bound) {
size_bound += 8; // 64 bits of padding on the end
}
fillbit = 1;
adds = 65536;
cbyte2 = 0;
cbit2 = 64;
buf = 0;
error = false;
fmem = true;
dsize = ( size > 0 ) ? size : adds;
data2 = ( unsigned char* ) custom_calloc (dsize);
if ( data2 == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
// for ( int i = 0; i < dsize; i++ ) data[i] = 0;
}
/* -----------------------------------------------
destructor for abitwriter class
----------------------------------------------- */
abitwriter::~abitwriter( void )
{
// free memory if pointer was not given out
if ( fmem ) custom_free( data2 );
}
void aligned_dealloc(unsigned char *data) {
if (!data) return;
data -= data[-1];
custom_free(data);
}
unsigned char *aligned_alloc(size_t dsize) {
unsigned char*data = (unsigned char*) custom_malloc( dsize + 16);
if (data) {
size_t rem = (size_t)(data - 0) & 0xf;
if (rem) {
data += rem;
data[-1] = rem;
} else {
data += 0x10;
data[-1] = 0x10;
}
}
return data;
}
/* -----------------------------------------------
constructor for abytewriter class
----------------------------------------------- */
abytewriter::abytewriter( int size )
{
adds = 65536;
cbyte = 0;
error = false;
fmem = true;
dsize = ( size > 0 ) ? size : adds;
data = aligned_alloc(dsize);
if ( data == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
}
/* -----------------------------------------------
destructor for abytewriter class
----------------------------------------------- */
abytewriter::~abytewriter( void )
{
// free data if pointer is not read
if (fmem && data) aligned_dealloc(data);
}
/* -----------------------------------------------
writes 1 byte to abytewriter
----------------------------------------------- */
void abytewriter::write( unsigned char byte )
{
// safety check for error
if ( error ) return;
// test if pointer beyond flush threshold
if ( cbyte >= ( dsize - 2 ) ) {
if (data) {
unsigned char * newData = aligned_alloc(dsize * 2);
memcpy(newData, data, dsize);
dsize *= 2;
aligned_dealloc(data);
data = newData;
}
if ( data == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
}
// write data
data[ cbyte++ ] = byte;
}
/* -----------------------------------------------
writes n byte to abytewriter
----------------------------------------------- */
void abytewriter::write_n( unsigned char* byte, int n )
{
// safety check for error
if ( error ) return;
// make sure that pointer doesn't get beyond flush threshold
while ( ( cbyte + n ) >= ( dsize - 2 ) ) {
unsigned char * newData = aligned_alloc(dsize * 2);
memcpy(newData, data, dsize);
dsize *= 2;
aligned_dealloc(data);
data = newData;
if ( data == NULL ) {
error = true;
custom_exit(ExitCode::MALLOCED_NULL);
return;
}
}
// copy data from array
while ( n-- > 0 )
data[ cbyte++ ] = *(byte++);
}
/* -----------------------------------------------
gets data array from abytewriter
----------------------------------------------- */
unsigned char* abytewriter::getptr_aligned( void )
{
// forbid freeing memory
fmem = false;
return data;
}
/* -----------------------------------------------
peeks into data array from abytewriter
----------------------------------------------- */
unsigned char* abytewriter::peekptr_aligned( void )
{
return data;
}
/* -----------------------------------------------
gets size of data array from abytewriter
----------------------------------------------- */
int abytewriter::getpos( void )
{
return cbyte;
}
/* -----------------------------------------------
reset without realloc
----------------------------------------------- */
void abytewriter::reset( void )
{
// set position of current byte
cbyte = 0;
}
/* -----------------------------------------------
constructor for abytewriter class
----------------------------------------------- */
abytereader::abytereader( unsigned char* array, int size )
{
cbyte = 0;
eof = false;
data = array;
lbyte = size;
if ( ( data == NULL ) || ( lbyte == 0 ) )
eof = true;
}
/* -----------------------------------------------
destructor for abytewriter class
----------------------------------------------- */
abytereader::~abytereader( void )
{
}
/* -----------------------------------------------
reads 1 byte from abytereader
----------------------------------------------- */
int abytereader::read( unsigned char* byte )
{
if ( cbyte >= lbyte ) {
cbyte = lbyte;
eof = true;
return 0;
}
else {
*byte = data[ cbyte++ ];
return 1;
}
}
/* -----------------------------------------------
reads n bytes from abytereader
----------------------------------------------- */
int abytereader::read_n( unsigned char* byte, int n )
{
int nl = lbyte - cbyte;
int i;
if ( nl < n ) {
for ( i = 0; i < nl; i++ )
byte[ i ] = data[ cbyte + i ];
cbyte = lbyte;
eof = true;
return nl;
}
else {
for ( i = 0; i < n; i++ )
byte[ i ] = data[ cbyte + i ];
cbyte += n;
return n;
}
}
/* -----------------------------------------------
go to position in data
----------------------------------------------- */
void abytereader::seek( int pos )
{
if ( pos >= lbyte ) {
cbyte = lbyte;
eof = true;
}
else {
cbyte = pos;
eof = false;
}
}
/* -----------------------------------------------
gets size of current data
----------------------------------------------- */
int abytereader::getsize( void )
{
return lbyte;
}
/* -----------------------------------------------
gets current position from abytereader
----------------------------------------------- */
int abytereader::getpos( void )
{
return cbyte;
}
bounded_iostream::bounded_iostream(Sirikata::DecoderWriter *w,
const std::function<void(Sirikata::DecoderWriter*, size_t)> &size_callback,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: parent(w), err(Sirikata::JpegError::nil()) {
this->size_callback = size_callback;
buffer_position = 0;
byte_position = 0;
byte_bound = 0x7FFFFFFF;
num_bytes_attempted_to_write = 0;
set_bound(0);
}
void bounded_iostream::call_size_callback(size_t size) {
size_callback(parent, size);
}
bool bounded_iostream::chkerr() {
return err != Sirikata::JpegError::nil();
}
void bounded_iostream::set_bound(size_t bound) {
flush();
if (num_bytes_attempted_to_write > byte_bound) {
num_bytes_attempted_to_write = byte_bound;
}
byte_bound = bound;
}
void bounded_iostream::flush() {
if (buffer_position) {
write_no_buffer(buffer, buffer_position);
buffer_position = 0;
}
}
void bounded_iostream::close() {
flush();
parent->Close();
}
uint32_t bounded_iostream::write_no_buffer(const void *from, size_t bytes_to_write) {
//return iostream::write(from,tpsize,dtsize);
std::pair<unsigned int, Sirikata::JpegError> retval;
if (byte_bound != 0 && byte_position + bytes_to_write > byte_bound) {
size_t real_bytes_to_write = byte_bound - byte_position;
byte_position += real_bytes_to_write;
retval = parent->Write(reinterpret_cast<const unsigned char*>(from), real_bytes_to_write);
if (retval.first < real_bytes_to_write) {
err = retval.second;
return retval.first;
}
return bytes_to_write; // pretend we wrote it all
}
size_t total = bytes_to_write;
retval = parent->Write(reinterpret_cast<const unsigned char*>(from), total);
unsigned int written = retval.first;
byte_position += written;
if (written < total ) {
err = retval.second;
return written;
}
return bytes_to_write;
}
unsigned int bounded_iostream::getsize() {
return byte_position;
}
bounded_iostream::~bounded_iostream(){
}
ibytestreamcopier::ibytestreamcopier(Sirikata::DecoderReader *p, unsigned int byte_offset,
unsigned int max_file_size,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: ibytestream(p, byte_offset, alloc), side_channel(alloc) {
if (max_file_size) {
side_channel.reserve(max_file_size);
}
}
bool ibytestreamcopier::read_byte(unsigned char *output) {
bool retval = ibytestream::read_byte(output);
if (retval) {
side_channel.push_back(*output);
}
return retval;
}
unsigned int ibytestreamcopier::read(unsigned char *output, unsigned int size) {
unsigned int retval = ibytestream::read(output, size);
if (retval > 0) {
side_channel.insert(side_channel.end(), output, output + retval);
}
return retval;
}
ibytestream::ibytestream(Sirikata::DecoderReader *p, unsigned int byte_offset,
const Sirikata::JpegAllocator<uint8_t> &alloc)
: parent(p) {
bytes_read = byte_offset;
}
unsigned int ibytestream::read(unsigned char*output, unsigned int size) {
dev_assert(size);
if (size == 1) {
return read_byte(output) ? 1 : 0;
}
int retval = IOUtil::ReadFull(parent, output, size);
bytes_read += retval;
static_assert(sizeof(last_read) == 2, "Last read must hold full jpeg huffman");
if (retval >= 2) {
memcpy(last_read, output + size - sizeof(last_read), sizeof(last_read));
} else if (retval) {
last_read[0] = last_read[1];
last_read[1] = *output;
}
return retval;
}
bool ibytestream::read_byte(unsigned char *output) {
unsigned int retval = parent->Read(output, 1).first;
if (retval != 0) {
last_read[0] = last_read[1];
last_read[1] = *output;
bytes_read += 1;
return true;
}
return false;
}
|
3343_0
|
crossvul
|
cc
|
CWE-1187
|
DEPRECATED: Use of Uninitialized Resource - This entry has been deprecated because it was a duplicate of CWE-908.
|
cpp
|
#include "lepton_codec.hh"
#include "uncompressed_components.hh"
#include "../vp8/decoder/decoder.hh"
template<class Left, class Middle, class Right, bool force_memory_optimization>
void LeptonCodec::ThreadState::decode_row(Left & left_model,
Middle& middle_model,
Right& right_model,
int curr_y,
BlockBasedImagePerChannel<force_memory_optimization>& image_data,
int component_size_in_block) {
uint32_t block_width = image_data[(int)middle_model.COLOR]->block_width();
if (block_width > 0) {
BlockContext context = context_.at((int)middle_model.COLOR);
parse_tokens(context,
bool_decoder_,
left_model,
model_); //FIXME
int offset = image_data[middle_model.COLOR]->next(context_.at((int)middle_model.COLOR), true, curr_y);
if (offset >= component_size_in_block) {
return;
}
}
for (unsigned int jpeg_x = 1; jpeg_x + 1 < block_width; jpeg_x++) {
BlockContext context = context_.at((int)middle_model.COLOR);
parse_tokens(context,
bool_decoder_,
middle_model,
model_); //FIXME
int offset = image_data[middle_model.COLOR]->next(context_.at((int)middle_model.COLOR),
true,
curr_y);
if (offset >= component_size_in_block) {
return;
}
}
if (block_width > 1) {
BlockContext context = context_.at((int)middle_model.COLOR);
parse_tokens(context,
bool_decoder_,
right_model,
model_);
image_data[middle_model.COLOR]->next(context_.at((int)middle_model.COLOR), false, curr_y);
}
}
#ifdef ALLOW_FOUR_COLORS
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR2>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR3>
#define EACH_BLOCK_TYPE(left, above, right) ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR0>(BlockType::Y, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR1>(BlockType::Cb, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR2>(BlockType::Cr, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR3>(BlockType::Ck, \
left, \
above, \
right)
#else
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR2>
#define EACH_BLOCK_TYPE(left, above, right) ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR0>(BlockType::Y, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR1>(BlockType::Cb, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR2>(BlockType::Cr, \
left, \
above, \
right)
#endif
void LeptonCodec::ThreadState::decode_row_wrapper(BlockBasedImagePerChannel<true>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
return decode_rowt(image_data, component_size_in_blocks, component, curr_y);
}
void LeptonCodec::ThreadState::decode_rowf(BlockBasedImagePerChannel<false>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
decode_row_internal(image_data, component_size_in_blocks,component,curr_y);
}
void LeptonCodec::ThreadState::decode_rowt(BlockBasedImagePerChannel<true>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
decode_row_internal(image_data, component_size_in_blocks,component,curr_y);
}
template<bool force_memory_optimization>
void LeptonCodec::ThreadState::decode_row_internal(BlockBasedImagePerChannel<force_memory_optimization>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
using std::tuple;
tuple<ProbabilityTablesTuple(false, false, false)> corner(EACH_BLOCK_TYPE(false,false,false));
tuple<ProbabilityTablesTuple(true, false, false)> top(EACH_BLOCK_TYPE(true,false,false));
tuple<ProbabilityTablesTuple(false, true, true)> midleft(EACH_BLOCK_TYPE(false, true, true));
tuple<ProbabilityTablesTuple(true, true, true)> middle(EACH_BLOCK_TYPE(true,true,true));
tuple<ProbabilityTablesTuple(true, true, false)> midright(EACH_BLOCK_TYPE(true, true, false));
tuple<ProbabilityTablesTuple(false, true, false)> width_one(EACH_BLOCK_TYPE(false, true, false));
context_.at(component)
= image_data[component]->off_y(curr_y,
num_nonzeros_.at(component).begin());
int block_width = image_data[component]->block_width();
if (is_top_row_.at(component)) {
is_top_row_.at(component) = false;
switch((BlockType)component) {
case BlockType::Y:
decode_row(std::get<(int)BlockType::Y>(corner),
std::get<(int)BlockType::Y>(top),
std::get<(int)BlockType::Y>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cb:
decode_row(std::get<(int)BlockType::Cb>(corner),
std::get<(int)BlockType::Cb>(top),
std::get<(int)BlockType::Cb>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cr:
decode_row(std::get<(int)BlockType::Cr>(corner),
std::get<(int)BlockType::Cr>(top),
std::get<(int)BlockType::Cr>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#ifdef ALLOW_FOUR_COLORS
case BlockType::Ck:
decode_row(std::get<(int)BlockType::Ck>(corner),
std::get<(int)BlockType::Ck>(top),
std::get<(int)BlockType::Ck>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#endif
}
} else if (block_width > 1) {
dev_assert(curr_y); // just a sanity check that the zeroth row took the first branch
switch((BlockType)component) {
case BlockType::Y:
decode_row(std::get<(int)BlockType::Y>(midleft),
std::get<(int)BlockType::Y>(middle),
std::get<(int)BlockType::Y>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cb:
decode_row(std::get<(int)BlockType::Cb>(midleft),
std::get<(int)BlockType::Cb>(middle),
std::get<(int)BlockType::Cb>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cr:
decode_row(std::get<(int)BlockType::Cr>(midleft),
std::get<(int)BlockType::Cr>(middle),
std::get<(int)BlockType::Cr>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#ifdef ALLOW_FOUR_COLORS
case BlockType::Ck:
decode_row(std::get<(int)BlockType::Ck>(midleft),
std::get<(int)BlockType::Ck>(middle),
std::get<(int)BlockType::Ck>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#endif
}
} else {
dev_assert(curr_y); // just a sanity check that the zeroth row took the first branch
dev_assert(block_width == 1);
switch((BlockType)component) {
case BlockType::Y:
decode_row(std::get<(int)BlockType::Y>(width_one),
std::get<(int)BlockType::Y>(width_one),
std::get<(int)BlockType::Y>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cb:
decode_row(std::get<(int)BlockType::Cb>(width_one),
std::get<(int)BlockType::Cb>(width_one),
std::get<(int)BlockType::Cb>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cr:
decode_row(std::get<(int)BlockType::Cr>(width_one),
std::get<(int)BlockType::Cr>(width_one),
std::get<(int)BlockType::Cr>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#ifdef ALLOW_FOUR_COLORS
case BlockType::Ck:
decode_row(std::get<(int)BlockType::Ck>(width_one),
std::get<(int)BlockType::Ck>(width_one),
std::get<(int)BlockType::Ck>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#endif
}
}
}
CodingReturnValue LeptonCodec::ThreadState::vp8_decode_thread(unsigned int thread_id,
UncompressedComponents *const colldata) {
Sirikata::Array1d<uint32_t, (uint32_t)ColorChannel::NumBlockTypes> component_size_in_blocks;
BlockBasedImagePerChannel<false> image_data;
for (int i = 0; i < colldata->get_num_components(); ++i) {
component_size_in_blocks[i] = colldata->component_size_in_blocks(i);
image_data[i] = &colldata->full_component_write((BlockType)i);
}
Sirikata::Array1d<uint32_t,
(size_t)ColorChannel::NumBlockTypes> max_coded_heights
= colldata->get_max_coded_heights();
/* deserialize each block in planar order */
dev_assert(luma_splits_.size() == 2); // not ready to do multiple work items on a thread yet
int min_y = luma_splits_[0];
int max_y = luma_splits_[1];
while(true) {
RowSpec cur_row = row_spec_from_index(decode_index_++, image_data, colldata->get_mcu_count_vertical(), max_coded_heights);
if (cur_row.done) {
break;
}
if (cur_row.luma_y >= max_y && thread_id + 1 != NUM_THREADS) {
break;
}
if (cur_row.skip) {
continue;
}
if (cur_row.luma_y < min_y) {
continue;
}
decode_rowf(image_data,
component_size_in_blocks,
cur_row.component,
cur_row.curr_y);
if (thread_id == 0) {
colldata->worker_update_cmp_progress((BlockType)cur_row.component,
image_data[cur_row.component]->block_width() );
}
return CODING_PARTIAL;
}
return CODING_DONE;
}
|
#include "lepton_codec.hh"
#include "uncompressed_components.hh"
#include "../vp8/decoder/decoder.hh"
template<class Left, class Middle, class Right, bool force_memory_optimization>
void LeptonCodec::ThreadState::decode_row(Left & left_model,
Middle& middle_model,
Right& right_model,
int curr_y,
BlockBasedImagePerChannel<force_memory_optimization>& image_data,
int component_size_in_block) {
uint32_t block_width = image_data[(int)middle_model.COLOR]->block_width();
if (block_width > 0) {
BlockContext context = context_.at((int)middle_model.COLOR);
parse_tokens(context,
bool_decoder_,
left_model,
model_); //FIXME
int offset = image_data[middle_model.COLOR]->next(context_.at((int)middle_model.COLOR), true, curr_y);
if (offset >= component_size_in_block) {
return;
}
}
for (unsigned int jpeg_x = 1; jpeg_x + 1 < block_width; jpeg_x++) {
BlockContext context = context_.at((int)middle_model.COLOR);
parse_tokens(context,
bool_decoder_,
middle_model,
model_); //FIXME
int offset = image_data[middle_model.COLOR]->next(context_.at((int)middle_model.COLOR),
true,
curr_y);
if (offset >= component_size_in_block) {
return;
}
}
if (block_width > 1) {
BlockContext context = context_.at((int)middle_model.COLOR);
parse_tokens(context,
bool_decoder_,
right_model,
model_);
image_data[middle_model.COLOR]->next(context_.at((int)middle_model.COLOR), false, curr_y);
}
}
#ifdef ALLOW_FOUR_COLORS
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR2>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR3>
#define EACH_BLOCK_TYPE(left, above, right) ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR0>(BlockType::Y, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR1>(BlockType::Cb, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR2>(BlockType::Cr, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR3>(BlockType::Ck, \
left, \
above, \
right)
#else
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left && above && right, TEMPLATE_ARG_COLOR2>
#define EACH_BLOCK_TYPE(left, above, right) ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR0>(BlockType::Y, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR1>(BlockType::Cb, \
left, \
above, \
right), \
ProbabilityTables<left&&above&&right, TEMPLATE_ARG_COLOR2>(BlockType::Cr, \
left, \
above, \
right)
#endif
void LeptonCodec::ThreadState::decode_row_wrapper(BlockBasedImagePerChannel<true>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
return decode_rowt(image_data, component_size_in_blocks, component, curr_y);
}
void LeptonCodec::ThreadState::decode_rowf(BlockBasedImagePerChannel<false>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
decode_row_internal(image_data, component_size_in_blocks,component,curr_y);
}
void LeptonCodec::ThreadState::decode_rowt(BlockBasedImagePerChannel<true>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
decode_row_internal(image_data, component_size_in_blocks,component,curr_y);
}
template<bool force_memory_optimization>
void LeptonCodec::ThreadState::decode_row_internal(BlockBasedImagePerChannel<force_memory_optimization>& image_data,
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
using std::tuple;
tuple<ProbabilityTablesTuple(false, false, false)> corner(EACH_BLOCK_TYPE(false,false,false));
tuple<ProbabilityTablesTuple(true, false, false)> top(EACH_BLOCK_TYPE(true,false,false));
tuple<ProbabilityTablesTuple(false, true, true)> midleft(EACH_BLOCK_TYPE(false, true, true));
tuple<ProbabilityTablesTuple(true, true, true)> middle(EACH_BLOCK_TYPE(true,true,true));
tuple<ProbabilityTablesTuple(true, true, false)> midright(EACH_BLOCK_TYPE(true, true, false));
tuple<ProbabilityTablesTuple(false, true, false)> width_one(EACH_BLOCK_TYPE(false, true, false));
context_.at(component)
= image_data[component]->off_y(curr_y,
num_nonzeros_.at(component).begin());
int block_width = image_data[component]->block_width();
if (is_top_row_.at(component)) {
is_top_row_.at(component) = false;
switch((BlockType)component) {
case BlockType::Y:
decode_row(std::get<(int)BlockType::Y>(corner),
std::get<(int)BlockType::Y>(top),
std::get<(int)BlockType::Y>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cb:
decode_row(std::get<(int)BlockType::Cb>(corner),
std::get<(int)BlockType::Cb>(top),
std::get<(int)BlockType::Cb>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cr:
decode_row(std::get<(int)BlockType::Cr>(corner),
std::get<(int)BlockType::Cr>(top),
std::get<(int)BlockType::Cr>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#ifdef ALLOW_FOUR_COLORS
case BlockType::Ck:
decode_row(std::get<(int)BlockType::Ck>(corner),
std::get<(int)BlockType::Ck>(top),
std::get<(int)BlockType::Ck>(top),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#endif
}
} else if (block_width > 1) {
dev_assert(curr_y); // just a sanity check that the zeroth row took the first branch
switch((BlockType)component) {
case BlockType::Y:
decode_row(std::get<(int)BlockType::Y>(midleft),
std::get<(int)BlockType::Y>(middle),
std::get<(int)BlockType::Y>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cb:
decode_row(std::get<(int)BlockType::Cb>(midleft),
std::get<(int)BlockType::Cb>(middle),
std::get<(int)BlockType::Cb>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cr:
decode_row(std::get<(int)BlockType::Cr>(midleft),
std::get<(int)BlockType::Cr>(middle),
std::get<(int)BlockType::Cr>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#ifdef ALLOW_FOUR_COLORS
case BlockType::Ck:
decode_row(std::get<(int)BlockType::Ck>(midleft),
std::get<(int)BlockType::Ck>(middle),
std::get<(int)BlockType::Ck>(midright),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#endif
}
} else {
dev_assert(curr_y); // just a sanity check that the zeroth row took the first branch
dev_assert(block_width == 1);
switch((BlockType)component) {
case BlockType::Y:
decode_row(std::get<(int)BlockType::Y>(width_one),
std::get<(int)BlockType::Y>(width_one),
std::get<(int)BlockType::Y>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cb:
decode_row(std::get<(int)BlockType::Cb>(width_one),
std::get<(int)BlockType::Cb>(width_one),
std::get<(int)BlockType::Cb>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
case BlockType::Cr:
decode_row(std::get<(int)BlockType::Cr>(width_one),
std::get<(int)BlockType::Cr>(width_one),
std::get<(int)BlockType::Cr>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#ifdef ALLOW_FOUR_COLORS
case BlockType::Ck:
decode_row(std::get<(int)BlockType::Ck>(width_one),
std::get<(int)BlockType::Ck>(width_one),
std::get<(int)BlockType::Ck>(width_one),
curr_y,
image_data,
component_size_in_blocks[component]);
break;
#endif
}
}
}
CodingReturnValue LeptonCodec::ThreadState::vp8_decode_thread(unsigned int thread_id,
UncompressedComponents *const colldata) {
Sirikata::Array1d<uint32_t, (uint32_t)ColorChannel::NumBlockTypes> component_size_in_blocks;
BlockBasedImagePerChannel<false> image_data;
for (int i = 0; i < colldata->get_num_components(); ++i) {
component_size_in_blocks[i] = colldata->component_size_in_blocks(i);
image_data[i] = &colldata->full_component_write((BlockType)i);
}
Sirikata::Array1d<uint32_t,
(size_t)ColorChannel::NumBlockTypes> max_coded_heights
= colldata->get_max_coded_heights();
/* deserialize each block in planar order */
dev_assert(luma_splits_.size() == 2); // not ready to do multiple work items on a thread yet
always_assert(luma_splits_.size() >= 2);
int min_y = luma_splits_[0];
int max_y = luma_splits_[1];
while(true) {
RowSpec cur_row = row_spec_from_index(decode_index_++, image_data, colldata->get_mcu_count_vertical(), max_coded_heights);
if (cur_row.done) {
break;
}
if (cur_row.luma_y >= max_y && thread_id + 1 != NUM_THREADS) {
break;
}
if (cur_row.skip) {
continue;
}
if (cur_row.luma_y < min_y) {
continue;
}
decode_rowf(image_data,
component_size_in_blocks,
cur_row.component,
cur_row.curr_y);
if (thread_id == 0) {
colldata->worker_update_cmp_progress((BlockType)cur_row.component,
image_data[cur_row.component]->block_width() );
}
return CODING_PARTIAL;
}
return CODING_DONE;
}
|
3343_2
|
crossvul
|
cc
|
CWE-1187
|
DEPRECATED: Use of Uninitialized Resource - This entry has been deprecated because it was a duplicate of CWE-908.
|
cpp
|
/* -*-mode:c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include <algorithm>
#include <cassert>
#include <iostream>
#include <tuple>
#include "bitops.hh"
#include "component_info.hh"
#include "uncompressed_components.hh"
#include "jpgcoder.hh"
#include "vp8_decoder.hh"
#include "../io/Reader.hh"
#include "../vp8/decoder/decoder.hh"
using namespace std;
void VP8ComponentDecoder::initialize( Sirikata::DecoderReader *input,
const std::vector<ThreadHandoff>& thread_handoff)
{
str_in = input;
mux_reader_.init(input);
thread_handoff_ = thread_handoff;
}
void VP8ComponentDecoder::decode_row(int target_thread_id,
BlockBasedImagePerChannel<true>& image_data, // FIXME: set image_data to true
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
thread_state_[target_thread_id]->decode_rowt(image_data,
component_size_in_blocks,
component,
curr_y);
}
VP8ComponentDecoder::VP8ComponentDecoder(bool do_threading)
: VP8ComponentEncoder(do_threading),
mux_reader_(Sirikata::JpegAllocator<uint8_t>(),
8,
0) {
virtual_thread_id_ = -1;
}
VP8ComponentDecoder::~VP8ComponentDecoder() {
}
#ifdef ALLOW_FOUR_COLORS
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR2>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR3>
#define EACH_BLOCK_TYPE(left, above, right) BlockType::Y, \
BlockType::Cb, \
BlockType::Cr, \
BlockType::Ck
#else
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR2>
#define EACH_BLOCK_TYPE BlockType::Y, \
BlockType::Cb, \
BlockType::Cr
#endif
void VP8ComponentDecoder::clear_thread_state(int thread_id, int target_thread_state, BlockBasedImagePerChannel<true>& framebuffer) {
initialize_thread_id(thread_id, target_thread_state, framebuffer);
initialize_bool_decoder(thread_id, target_thread_state);
}
class ActualThreadPacketReader : public PacketReader{
GenericWorker *worker;
VP8ComponentDecoder::SendToActualThread *base;
uint8_t stream_id;
ResizableByteBufferListNode* last;
public:
ActualThreadPacketReader(uint8_t stream_id, GenericWorker*worker, VP8ComponentDecoder::SendToActualThread*base) {
this->worker = worker;
this->stream_id = stream_id;
this->base = base;
}
// returns a buffer with at least sizeof(BD_VALUE) before it
virtual ROBuffer getNext() {
if (!base->vbuffers[stream_id].empty()) {
auto retval = base->vbuffers[stream_id].front();
if (!retval->empty()) {
base->vbuffers[stream_id].pop();
}
if (retval->empty()) {
isEof = true;
return {NULL, NULL};
}
return {retval->data(), retval->data() + retval->size()};
}
while(!isEof) {
auto dat = worker->batch_recv_data();
for (unsigned int i = 0; i < dat.count; ++i) {
ResizableByteBufferListNode* lnode = (ResizableByteBufferListNode*) dat.data[i];
if (dat.count == 1 && lnode->stream_id == stream_id && lnode && lnode->size()) {
assert(stream_id == lnode->stream_id);
last = lnode;
return {lnode->data(), lnode->data() + lnode->size()};
} else {
base->vbuffers[lnode->stream_id].push(lnode);
}
}
if (!base->vbuffers[stream_id].empty()) {
return getNext(); // recursive call, 1 deep
}
if (dat.return_code < 0) {
isEof = true; // hmm... should we bail here?
always_assert(false);
}
}
return {NULL, NULL};
}
bool eof()const {
return isEof;
}
virtual void setFree(ROBuffer buffer) {// don't even bother
if (last && last->data() == buffer.first) {
delete last; // hax
last = NULL;
}
}
virtual ~ActualThreadPacketReader(){}
};
void VP8ComponentDecoder::worker_thread(ThreadState *ts, int thread_id, UncompressedComponents * const colldata,
int8_t thread_target[Sirikata::MuxReader::MAX_STREAM_ID],
GenericWorker *worker,
SendToActualThread *send_to_actual_thread_state) {
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us();
for (uint8_t i = 0; i < Sirikata::MuxReader::MAX_STREAM_ID; ++i) {
if (thread_target[i] == int8_t(thread_id)) {
ts->bool_decoder_.init(new ActualThreadPacketReader(i,worker, send_to_actual_thread_state));
}
}
while (ts->vp8_decode_thread(thread_id, colldata) == CODING_PARTIAL) {
}
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us();
}
class VirtualThreadPacketReader : public PacketReader{
VP8ComponentDecoder::SendToVirtualThread*base;
uint8_t stream_id;
Sirikata::MuxReader*mux_reader_;
Sirikata::MuxReader::ResizableByteBuffer * last;
public:
VirtualThreadPacketReader(uint8_t stream_id, Sirikata::MuxReader * mr, VP8ComponentDecoder::SendToVirtualThread*base) {
this->base = base;
this->stream_id = stream_id;
this->mux_reader_ = mr;
last = NULL;
}
// returns a buffer with at least sizeof(BD_VALUE) before it
virtual ROBuffer getNext() {
auto retval = base->read(*mux_reader_, stream_id);
if (retval->size() == 0) {
isEof = true;
return {NULL, NULL};
}
always_assert(!retval->empty()); // we check this earlier
return {retval->data(), retval->data() + retval->size()};
}
bool eof()const {
return isEof;
}
virtual void setFree(ROBuffer buffer) {// don't even bother
if (last && last->data() == buffer.first) {
delete last; // hax
last = NULL;
}
}
virtual ~VirtualThreadPacketReader(){}
};
void VP8ComponentDecoder::initialize_bool_decoder(int thread_id, int target_thread_state) {
if (NUM_THREADS > 1 && g_threaded) {
thread_state_[target_thread_state]->bool_decoder_.init(new ActualThreadPacketReader(thread_id,
getWorker(target_thread_state),
&send_to_actual_thread_state));
} else {
thread_state_[target_thread_state]->bool_decoder_.init(new VirtualThreadPacketReader(thread_id, &mux_reader_, &mux_splicer));
}
}
template <bool force_memory_optimized>
void VP8ComponentDecoder::initialize_thread_id(int thread_id, int target_thread_state,
BlockBasedImagePerChannel<force_memory_optimized>& framebuffer) {
if (target_thread_state) {
always_assert(spin_workers_);
}
TimingHarness::timing[thread_id%NUM_THREADS][TimingHarness::TS_STREAM_MULTIPLEX_STARTED] = TimingHarness::get_time_us();
//if (thread_id != target_thread_state) {
reset_thread_model_state(target_thread_state);
//}
thread_state_[target_thread_state]->decode_index_ = 0;
for (unsigned int i = 0; i < framebuffer.size(); ++i) {
if (framebuffer[i] != NULL) {
thread_state_[target_thread_state]->is_top_row_.at(i) = true;
thread_state_[target_thread_state]->num_nonzeros_.at(i).resize(framebuffer[i]->block_width() << 1);
thread_state_[target_thread_state]->context_.at(i)
= framebuffer[i]->begin(thread_state_[target_thread_state]->num_nonzeros_.at(i).begin());
}
}
/* initialize the bool decoder */
int index = thread_id;
always_assert((size_t)index < streams_.size());
thread_state_[target_thread_state]->is_valid_range_ = false;
thread_state_[target_thread_state]->luma_splits_.resize(2);
if ((size_t)index < thread_handoff_.size()) {
thread_state_[target_thread_state]->luma_splits_[0] = thread_handoff_[thread_id].luma_y_start;
thread_state_[target_thread_state]->luma_splits_[1] = thread_handoff_[thread_id].luma_y_end;
} else {
// we have extra threads that are not in use during this decode.
// set them to zero sized work (i.e. starting at end and ending at end)
// since they don't have any rows to decode
thread_state_[target_thread_state]->luma_splits_[0] = thread_handoff_.back().luma_y_end; // <- not a typo
thread_state_[target_thread_state]->luma_splits_[1] = thread_handoff_.back().luma_y_end; // both start and end at end
}
//fprintf(stderr, "tid: %d %d -> %d\n", thread_id, thread_state_[target_thread_state]->luma_splits_[0],
// thread_state_[target_thread_state]->luma_splits_[1]);
TimingHarness::timing[thread_id%NUM_THREADS][TimingHarness::TS_STREAM_MULTIPLEX_FINISHED] = TimingHarness::get_time_us();
}
std::vector<ThreadHandoff> VP8ComponentDecoder::initialize_baseline_decoder(
const UncompressedComponents * const colldata,
Sirikata::Array1d<BlockBasedImagePerChannel<true>,
MAX_NUM_THREADS>& framebuffer) {
mux_splicer.init(spin_workers_);
return initialize_decoder_state(colldata, framebuffer);
}
void VP8ComponentDecoder::SendToVirtualThread::set_eof() {
using namespace Sirikata;
if (!eof) {
for (unsigned int thread_id = 0; thread_id < Sirikata::MuxReader::MAX_STREAM_ID; ++thread_id) {
for (int i = 0; i < Sirikata::MuxReader::MAX_STREAM_ID; ++i) {
if (thread_target[i] == int8_t(thread_id)) {
auto eof = new ResizableByteBufferListNode;
eof->stream_id = i;
send(eof); // sends an EOF flag (empty buffer)
}
}
}
}
eof = true;
}
VP8ComponentDecoder::SendToVirtualThread::SendToVirtualThread(){
eof = false;
for (int i = 0; i < Sirikata::MuxReader::MAX_STREAM_ID; ++i) {
thread_target[i] = -1;
}
this->all_workers = NULL;
}
void VP8ComponentDecoder::SendToVirtualThread::init(GenericWorker * all_workers) {
this->all_workers = all_workers;
}
void VP8ComponentDecoder::SendToVirtualThread::send(ResizableByteBufferListNode *data) {
always_assert(data);
always_assert(data->stream_id < sizeof(vbuffers) / sizeof(vbuffers[0]) &&
"INVALID SEND STREAM ID");
if (!g_threaded || NUM_THREADS == 1) {
/*
fprintf(stderr, "VSending (%d) %d bytes of data : ptr %p\n",
(int)data->stream_id, (int)data->size(),
(void*)data);*/
vbuffers[data->stream_id].push(data);
return;
}
auto thread_target_id = thread_target[data->stream_id];
/*
fprintf(stderr, "Sending (%d) %d bytes of data : ptr %p to %d\n",
(int)data->stream_id, (int)data->size(),
(void*)data, thread_target_id);
*/
if (thread_target_id >= 0) {
int retval = all_workers[thread_target_id].send_more_data(data);
always_assert(retval == 0 && "Communication with thread lost");
}else {
always_assert(false && "Cannot send to thread that wasn't bound");
}
}
void VP8ComponentDecoder::SendToVirtualThread::drain(Sirikata::MuxReader&reader) {
while (!reader.eof) {
ResizableByteBufferListNode *data = new ResizableByteBufferListNode;
auto ret = reader.nextDataPacket(*data);
if (ret.second != Sirikata::JpegError::nil()) {
set_eof();
break;
}
data->stream_id = ret.first;
always_assert(data->size()); // the protocol can't store empty runs
send(data);
}
}
ResizableByteBufferListNode* VP8ComponentDecoder::SendToVirtualThread::read(Sirikata::MuxReader&reader, uint8_t stream_id) {
using namespace Sirikata;
always_assert(stream_id < sizeof(vbuffers) / sizeof(vbuffers[0]) &&
"INVALID READ STREAM ID");
if (!vbuffers[stream_id].empty()) {
auto retval = vbuffers[stream_id].front();
if (retval->size() == 0) {
always_assert(eof);
} else { // keep this placeholder there
vbuffers[stream_id].pop();
}
return retval;
}
if (eof) {
always_assert(false);
return NULL;
}
while (!eof) {
ResizableByteBufferListNode *data = new ResizableByteBufferListNode;
auto ret = reader.nextDataPacket(*data);
if (ret.second != JpegError::nil()) {
set_eof();
break;
}
data->stream_id = ret.first;
bool buffer_it = ret.first != stream_id;
if (buffer_it) {
send(data);
} else {
return data;
}
}
if (!vbuffers[stream_id].empty()) {
auto retval = vbuffers[stream_id].front();
if (retval->size() == 0) {
always_assert(eof);
} else { // keep this placeholder there
vbuffers[stream_id].pop();
}
return retval;
}
return NULL;
}
void VP8ComponentDecoder::SendToVirtualThread::read_all(Sirikata::MuxReader&reader) {
using namespace Sirikata;
while (!eof) {
ResizableByteBufferListNode *data = new ResizableByteBufferListNode;
auto ret = reader.nextDataPacket(*data);
if (ret.second != JpegError::nil()) {
set_eof();
break;
}
data->stream_id = ret.first;
always_assert(data->size());
send(data);
}
}
template <bool force_memory_optimized>
std::vector<ThreadHandoff> VP8ComponentDecoder::initialize_decoder_state(const UncompressedComponents * const colldata,
Sirikata::Array1d<BlockBasedImagePerChannel<force_memory_optimized>,
MAX_NUM_THREADS>& framebuffer) {
if (colldata->get_num_components() > (int)BlockType::Y) {
ProbabilityTablesBase::set_quantization_table(BlockType::Y,
colldata->get_quantization_tables(BlockType::Y));
}
if (colldata->get_num_components() > (int)BlockType::Cb) {
ProbabilityTablesBase::set_quantization_table(BlockType::Cb,
colldata->get_quantization_tables(BlockType::Cb));
}
if (colldata->get_num_components() > (int)BlockType::Cr) {
ProbabilityTablesBase::set_quantization_table(BlockType::Cr,
colldata->get_quantization_tables(BlockType::Cr));
}
#ifdef ALLOW_FOUR_COLORS
if (colldata->get_num_components() > (int)BlockType::Ck) {
ProbabilityTablesBase::set_quantization_table(BlockType::Ck,
colldata->get_quantization_tables(BlockType::Ck));
}
#endif
if (thread_handoff_.empty()) {
/* read and verify "x" mark */
unsigned char mark {};
const bool ok = str_in->Read( &mark, 1 ).second == Sirikata::JpegError::nil();
if (!ok) {
return std::vector<ThreadHandoff>();
}
ThreadHandoff th;
memset(&th, 0, sizeof(th));
th.num_overhang_bits = ThreadHandoff::LEGACY_OVERHANG_BITS; // to make sure we don't use this value
th.luma_y_end = colldata->block_height(0);
thread_handoff_.insert(thread_handoff_.end(), mark, th);
std::vector<uint16_t> luma_splits_tmp(mark - 1);
IOUtil::ReadFull(str_in, luma_splits_tmp.data(), sizeof(uint16_t) * (mark - 1));
int sfv_lcm = colldata->min_vertical_luma_multiple();
for (int i = 0; i + 1 < mark; ++i) {
thread_handoff_[i].luma_y_end = htole16(luma_splits_tmp[i]);
if (thread_handoff_[i].luma_y_end % sfv_lcm) {
custom_exit(ExitCode::THREADING_PARTIAL_MCU);
}
}
for (int i = 1; i < mark; ++i) {
thread_handoff_[i].luma_y_start = thread_handoff_[i - 1].luma_y_end;
}
}
/* read entire chunk into memory */
//initialize_thread_id(0, 0, framebuffer[0]);
if (thread_handoff_.size()) {
thread_handoff_.back().luma_y_end = colldata->block_height(0);
}
return thread_handoff_;
}
void VP8ComponentDecoder::flush() {
mux_splicer.drain(mux_reader_);
}
CodingReturnValue VP8ComponentDecoder::decode_chunk(UncompressedComponents * const colldata)
{
mux_splicer.init(spin_workers_);
/* cmpc is a global variable with the component count */
/* construct 4x4 VP8 blocks to hold 8x8 JPEG blocks */
if ( thread_state_[0] == nullptr || thread_state_[0]->context_[0].isNil() ) {
/* first call */
BlockBasedImagePerChannel<false> framebuffer;
framebuffer.memset(0);
for (size_t i = 0; i < framebuffer.size() && int( i ) < colldata->get_num_components(); ++i) {
framebuffer[i] = &colldata->full_component_write((BlockType)i);
}
Sirikata::Array1d<BlockBasedImagePerChannel<false>, MAX_NUM_THREADS> all_framebuffers;
for (size_t i = 0; i < all_framebuffers.size(); ++i) {
all_framebuffers[i] = framebuffer;
}
size_t num_threads_needed = initialize_decoder_state(colldata,
all_framebuffers).size();
for (size_t i = 0;i < num_threads_needed; ++i) {
map_logical_thread_to_physical_thread(i, i);
}
for (size_t i = 0;i < num_threads_needed; ++i) {
initialize_thread_id(i, i, framebuffer);
if (!do_threading_) {
break;
}
}
if (num_threads_needed > NUM_THREADS || num_threads_needed == 0) {
return CODING_ERROR;
}
}
if (do_threading_) {
for (unsigned int thread_id = 0; thread_id < NUM_THREADS; ++thread_id) {
unsigned int cur_spin_worker = thread_id;
spin_workers_[cur_spin_worker].work
= std::bind(worker_thread,
thread_state_[thread_id],
thread_id,
colldata,
mux_splicer.thread_target,
getWorker(cur_spin_worker),
&send_to_actual_thread_state);
spin_workers_[cur_spin_worker].activate_work();
}
flush();
for (unsigned int thread_id = 0; thread_id < NUM_THREADS; ++thread_id) {
unsigned int cur_spin_worker = thread_id;
TimingHarness::timing[thread_id][TimingHarness::TS_THREAD_WAIT_STARTED] = TimingHarness::get_time_us();
spin_workers_[cur_spin_worker].main_wait_for_done();
TimingHarness::timing[thread_id][TimingHarness::TS_THREAD_WAIT_FINISHED] = TimingHarness::get_time_us();
}
// join on all threads
} else {
if (virtual_thread_id_ != -1) {
TimingHarness::timing[0][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us();
CodingReturnValue ret = thread_state_[0]->vp8_decode_thread(0, colldata);
if (ret == CODING_PARTIAL) {
return ret;
}
TimingHarness::timing[0][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us();
}
// wait for "threads"
virtual_thread_id_ += 1; // first time's a charm
for (unsigned int thread_id = virtual_thread_id_; thread_id < NUM_THREADS; ++thread_id, ++virtual_thread_id_) {
BlockBasedImagePerChannel<false> framebuffer;
framebuffer.memset(0);
for (size_t i = 0; i < framebuffer.size() && int( i ) < colldata->get_num_components(); ++i) {
framebuffer[i] = &colldata->full_component_write((BlockType)i);
}
initialize_thread_id(thread_id, 0, framebuffer);
thread_state_[0]->bool_decoder_.init(new VirtualThreadPacketReader(thread_id, &mux_reader_, &mux_splicer));
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us();
CodingReturnValue ret;
if ((ret = thread_state_[0]->vp8_decode_thread(0, colldata)) == CODING_PARTIAL) {
return ret;
}
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us();
}
}
TimingHarness::timing[0][TimingHarness::TS_JPEG_RECODE_STARTED] = TimingHarness::get_time_us();
for (int component = 0; component < colldata->get_num_components(); ++component) {
colldata->worker_mark_cmp_finished((BlockType)component);
}
colldata->worker_update_coefficient_position_progress( 64 );
colldata->worker_update_bit_progress( 16 );
write_byte_bill(Billing::DELIMITERS, true, mux_reader_.getOverhead());
return CODING_DONE;
}
|
/* -*-mode:c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
#include <algorithm>
#include <cassert>
#include <iostream>
#include <tuple>
#include "bitops.hh"
#include "component_info.hh"
#include "uncompressed_components.hh"
#include "jpgcoder.hh"
#include "vp8_decoder.hh"
#include "../io/Reader.hh"
#include "../vp8/decoder/decoder.hh"
using namespace std;
void VP8ComponentDecoder::initialize( Sirikata::DecoderReader *input,
const std::vector<ThreadHandoff>& thread_handoff)
{
str_in = input;
mux_reader_.init(input);
thread_handoff_ = thread_handoff;
}
void VP8ComponentDecoder::decode_row(int target_thread_id,
BlockBasedImagePerChannel<true>& image_data, // FIXME: set image_data to true
Sirikata::Array1d<uint32_t,
(uint32_t)ColorChannel::
NumBlockTypes> component_size_in_blocks,
int component,
int curr_y) {
thread_state_[target_thread_id]->decode_rowt(image_data,
component_size_in_blocks,
component,
curr_y);
}
VP8ComponentDecoder::VP8ComponentDecoder(bool do_threading)
: VP8ComponentEncoder(do_threading),
mux_reader_(Sirikata::JpegAllocator<uint8_t>(),
8,
0) {
virtual_thread_id_ = -1;
}
VP8ComponentDecoder::~VP8ComponentDecoder() {
}
#ifdef ALLOW_FOUR_COLORS
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR2>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR3>
#define EACH_BLOCK_TYPE(left, above, right) BlockType::Y, \
BlockType::Cb, \
BlockType::Cr, \
BlockType::Ck
#else
#define ProbabilityTablesTuple(left, above, right) \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR0>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR1>, \
ProbabilityTables<left, above, right, TEMPLATE_ARG_COLOR2>
#define EACH_BLOCK_TYPE BlockType::Y, \
BlockType::Cb, \
BlockType::Cr
#endif
void VP8ComponentDecoder::clear_thread_state(int thread_id, int target_thread_state, BlockBasedImagePerChannel<true>& framebuffer) {
initialize_thread_id(thread_id, target_thread_state, framebuffer);
initialize_bool_decoder(thread_id, target_thread_state);
}
class ActualThreadPacketReader : public PacketReader{
GenericWorker *worker;
VP8ComponentDecoder::SendToActualThread *base;
uint8_t stream_id;
ResizableByteBufferListNode* last;
public:
ActualThreadPacketReader(uint8_t stream_id, GenericWorker*worker, VP8ComponentDecoder::SendToActualThread*base) {
this->worker = worker;
this->stream_id = stream_id;
this->base = base;
}
// returns a buffer with at least sizeof(BD_VALUE) before it
virtual ROBuffer getNext() {
if (!base->vbuffers[stream_id].empty()) {
auto retval = base->vbuffers[stream_id].front();
if (!retval->empty()) {
base->vbuffers[stream_id].pop();
}
if (retval->empty()) {
isEof = true;
return {NULL, NULL};
}
return {retval->data(), retval->data() + retval->size()};
}
while(!isEof) {
auto dat = worker->batch_recv_data();
for (unsigned int i = 0; i < dat.count; ++i) {
ResizableByteBufferListNode* lnode = (ResizableByteBufferListNode*) dat.data[i];
if (dat.count == 1 && lnode->stream_id == stream_id && lnode && lnode->size()) {
assert(stream_id == lnode->stream_id);
last = lnode;
return {lnode->data(), lnode->data() + lnode->size()};
} else {
base->vbuffers[lnode->stream_id].push(lnode);
}
}
if (!base->vbuffers[stream_id].empty()) {
return getNext(); // recursive call, 1 deep
}
if (dat.return_code < 0) {
isEof = true; // hmm... should we bail here?
always_assert(false);
}
}
return {NULL, NULL};
}
bool eof()const {
return isEof;
}
virtual void setFree(ROBuffer buffer) {// don't even bother
if (last && last->data() == buffer.first) {
delete last; // hax
last = NULL;
}
}
virtual ~ActualThreadPacketReader(){}
};
void VP8ComponentDecoder::worker_thread(ThreadState *ts, int thread_id, UncompressedComponents * const colldata,
int8_t thread_target[Sirikata::MuxReader::MAX_STREAM_ID],
GenericWorker *worker,
SendToActualThread *send_to_actual_thread_state) {
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us();
for (uint8_t i = 0; i < Sirikata::MuxReader::MAX_STREAM_ID; ++i) {
if (thread_target[i] == int8_t(thread_id)) {
ts->bool_decoder_.init(new ActualThreadPacketReader(i,worker, send_to_actual_thread_state));
}
}
while (ts->vp8_decode_thread(thread_id, colldata) == CODING_PARTIAL) {
}
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us();
}
class VirtualThreadPacketReader : public PacketReader{
VP8ComponentDecoder::SendToVirtualThread*base;
uint8_t stream_id;
Sirikata::MuxReader*mux_reader_;
Sirikata::MuxReader::ResizableByteBuffer * last;
public:
VirtualThreadPacketReader(uint8_t stream_id, Sirikata::MuxReader * mr, VP8ComponentDecoder::SendToVirtualThread*base) {
this->base = base;
this->stream_id = stream_id;
this->mux_reader_ = mr;
last = NULL;
}
// returns a buffer with at least sizeof(BD_VALUE) before it
virtual ROBuffer getNext() {
auto retval = base->read(*mux_reader_, stream_id);
if (retval->size() == 0) {
isEof = true;
return {NULL, NULL};
}
always_assert(!retval->empty()); // we check this earlier
return {retval->data(), retval->data() + retval->size()};
}
bool eof()const {
return isEof;
}
virtual void setFree(ROBuffer buffer) {// don't even bother
if (last && last->data() == buffer.first) {
delete last; // hax
last = NULL;
}
}
virtual ~VirtualThreadPacketReader(){}
};
void VP8ComponentDecoder::initialize_bool_decoder(int thread_id, int target_thread_state) {
if (NUM_THREADS > 1 && g_threaded) {
thread_state_[target_thread_state]->bool_decoder_.init(new ActualThreadPacketReader(thread_id,
getWorker(target_thread_state),
&send_to_actual_thread_state));
} else {
thread_state_[target_thread_state]->bool_decoder_.init(new VirtualThreadPacketReader(thread_id, &mux_reader_, &mux_splicer));
}
}
template <bool force_memory_optimized>
void VP8ComponentDecoder::initialize_thread_id(int thread_id, int target_thread_state,
BlockBasedImagePerChannel<force_memory_optimized>& framebuffer) {
if (target_thread_state) {
always_assert(spin_workers_);
}
TimingHarness::timing[thread_id%NUM_THREADS][TimingHarness::TS_STREAM_MULTIPLEX_STARTED] = TimingHarness::get_time_us();
//if (thread_id != target_thread_state) {
reset_thread_model_state(target_thread_state);
//}
thread_state_[target_thread_state]->decode_index_ = 0;
for (unsigned int i = 0; i < framebuffer.size(); ++i) {
if (framebuffer[i] != NULL) {
thread_state_[target_thread_state]->is_top_row_.at(i) = true;
thread_state_[target_thread_state]->num_nonzeros_.at(i).resize(framebuffer[i]->block_width() << 1);
thread_state_[target_thread_state]->context_.at(i)
= framebuffer[i]->begin(thread_state_[target_thread_state]->num_nonzeros_.at(i).begin());
}
}
/* initialize the bool decoder */
int index = thread_id;
always_assert((size_t)index < streams_.size());
thread_state_[target_thread_state]->is_valid_range_ = false;
thread_state_[target_thread_state]->luma_splits_.resize(2);
if ((size_t)index < thread_handoff_.size()) {
thread_state_[target_thread_state]->luma_splits_[0] = thread_handoff_[thread_id].luma_y_start;
thread_state_[target_thread_state]->luma_splits_[1] = thread_handoff_[thread_id].luma_y_end;
} else {
// we have extra threads that are not in use during this decode.
// set them to zero sized work (i.e. starting at end and ending at end)
// since they don't have any rows to decode
thread_state_[target_thread_state]->luma_splits_[0] = thread_handoff_.back().luma_y_end; // <- not a typo
thread_state_[target_thread_state]->luma_splits_[1] = thread_handoff_.back().luma_y_end; // both start and end at end
}
//fprintf(stderr, "tid: %d %d -> %d\n", thread_id, thread_state_[target_thread_state]->luma_splits_[0],
// thread_state_[target_thread_state]->luma_splits_[1]);
TimingHarness::timing[thread_id%NUM_THREADS][TimingHarness::TS_STREAM_MULTIPLEX_FINISHED] = TimingHarness::get_time_us();
}
std::vector<ThreadHandoff> VP8ComponentDecoder::initialize_baseline_decoder(
const UncompressedComponents * const colldata,
Sirikata::Array1d<BlockBasedImagePerChannel<true>,
MAX_NUM_THREADS>& framebuffer) {
mux_splicer.init(spin_workers_);
return initialize_decoder_state(colldata, framebuffer);
}
void VP8ComponentDecoder::SendToVirtualThread::set_eof() {
using namespace Sirikata;
if (!eof) {
for (unsigned int thread_id = 0; thread_id < Sirikata::MuxReader::MAX_STREAM_ID; ++thread_id) {
for (int i = 0; i < Sirikata::MuxReader::MAX_STREAM_ID; ++i) {
if (thread_target[i] == int8_t(thread_id)) {
auto eof = new ResizableByteBufferListNode;
eof->stream_id = i;
send(eof); // sends an EOF flag (empty buffer)
}
}
}
}
eof = true;
}
VP8ComponentDecoder::SendToVirtualThread::SendToVirtualThread(){
eof = false;
for (int i = 0; i < Sirikata::MuxReader::MAX_STREAM_ID; ++i) {
thread_target[i] = -1;
}
this->all_workers = NULL;
}
void VP8ComponentDecoder::SendToVirtualThread::init(GenericWorker * all_workers) {
this->all_workers = all_workers;
}
void VP8ComponentDecoder::SendToVirtualThread::send(ResizableByteBufferListNode *data) {
always_assert(data);
always_assert(data->stream_id < sizeof(vbuffers) / sizeof(vbuffers[0]) &&
"INVALID SEND STREAM ID");
if (!g_threaded || NUM_THREADS == 1) {
/*
fprintf(stderr, "VSending (%d) %d bytes of data : ptr %p\n",
(int)data->stream_id, (int)data->size(),
(void*)data);*/
vbuffers[data->stream_id].push(data);
return;
}
auto thread_target_id = thread_target[data->stream_id];
/*
fprintf(stderr, "Sending (%d) %d bytes of data : ptr %p to %d\n",
(int)data->stream_id, (int)data->size(),
(void*)data, thread_target_id);
*/
if (thread_target_id >= 0) {
int retval = all_workers[thread_target_id].send_more_data(data);
always_assert(retval == 0 && "Communication with thread lost");
}else {
always_assert(false && "Cannot send to thread that wasn't bound");
}
}
void VP8ComponentDecoder::SendToVirtualThread::drain(Sirikata::MuxReader&reader) {
while (!reader.eof) {
ResizableByteBufferListNode *data = new ResizableByteBufferListNode;
auto ret = reader.nextDataPacket(*data);
if (ret.second != Sirikata::JpegError::nil()) {
set_eof();
break;
}
data->stream_id = ret.first;
always_assert(data->size()); // the protocol can't store empty runs
send(data);
}
}
ResizableByteBufferListNode* VP8ComponentDecoder::SendToVirtualThread::read(Sirikata::MuxReader&reader, uint8_t stream_id) {
using namespace Sirikata;
always_assert(stream_id < sizeof(vbuffers) / sizeof(vbuffers[0]) &&
"INVALID READ STREAM ID");
if (!vbuffers[stream_id].empty()) {
auto retval = vbuffers[stream_id].front();
if (retval->size() == 0) {
always_assert(eof);
} else { // keep this placeholder there
vbuffers[stream_id].pop();
}
return retval;
}
if (eof) {
always_assert(false);
return NULL;
}
while (!eof) {
ResizableByteBufferListNode *data = new ResizableByteBufferListNode;
auto ret = reader.nextDataPacket(*data);
if (ret.second != JpegError::nil()) {
set_eof();
break;
}
data->stream_id = ret.first;
bool buffer_it = ret.first != stream_id;
if (buffer_it) {
send(data);
} else {
return data;
}
}
if (!vbuffers[stream_id].empty()) {
auto retval = vbuffers[stream_id].front();
if (retval->size() == 0) {
always_assert(eof);
} else { // keep this placeholder there
vbuffers[stream_id].pop();
}
return retval;
}
return NULL;
}
void VP8ComponentDecoder::SendToVirtualThread::read_all(Sirikata::MuxReader&reader) {
using namespace Sirikata;
while (!eof) {
ResizableByteBufferListNode *data = new ResizableByteBufferListNode;
auto ret = reader.nextDataPacket(*data);
if (ret.second != JpegError::nil()) {
set_eof();
break;
}
data->stream_id = ret.first;
always_assert(data->size());
send(data);
}
}
template <bool force_memory_optimized>
std::vector<ThreadHandoff> VP8ComponentDecoder::initialize_decoder_state(const UncompressedComponents * const colldata,
Sirikata::Array1d<BlockBasedImagePerChannel<force_memory_optimized>,
MAX_NUM_THREADS>& framebuffer) {
if (colldata->get_num_components() > (int)BlockType::Y) {
ProbabilityTablesBase::set_quantization_table(BlockType::Y,
colldata->get_quantization_tables(BlockType::Y));
}
if (colldata->get_num_components() > (int)BlockType::Cb) {
ProbabilityTablesBase::set_quantization_table(BlockType::Cb,
colldata->get_quantization_tables(BlockType::Cb));
}
if (colldata->get_num_components() > (int)BlockType::Cr) {
ProbabilityTablesBase::set_quantization_table(BlockType::Cr,
colldata->get_quantization_tables(BlockType::Cr));
}
#ifdef ALLOW_FOUR_COLORS
if (colldata->get_num_components() > (int)BlockType::Ck) {
ProbabilityTablesBase::set_quantization_table(BlockType::Ck,
colldata->get_quantization_tables(BlockType::Ck));
}
#endif
if (thread_handoff_.empty()) {
/* read and verify "x" mark */
unsigned char mark {};
const bool ok = str_in->Read( &mark, 1 ).second == Sirikata::JpegError::nil();
if (!ok) {
return std::vector<ThreadHandoff>();
}
ThreadHandoff th;
memset(&th, 0, sizeof(th));
th.num_overhang_bits = ThreadHandoff::LEGACY_OVERHANG_BITS; // to make sure we don't use this value
th.luma_y_end = colldata->block_height(0);
thread_handoff_.insert(thread_handoff_.end(), mark, th);
std::vector<uint16_t> luma_splits_tmp(mark - 1);
IOUtil::ReadFull(str_in, luma_splits_tmp.data(), sizeof(uint16_t) * (mark - 1));
int sfv_lcm = colldata->min_vertical_luma_multiple();
for (int i = 0; i + 1 < mark; ++i) {
thread_handoff_[i].luma_y_end = htole16(luma_splits_tmp[i]);
if (thread_handoff_[i].luma_y_end % sfv_lcm) {
custom_exit(ExitCode::THREADING_PARTIAL_MCU);
}
}
for (int i = 1; i < mark; ++i) {
thread_handoff_[i].luma_y_start = thread_handoff_[i - 1].luma_y_end;
}
}
/* read entire chunk into memory */
//initialize_thread_id(0, 0, framebuffer[0]);
if (thread_handoff_.size()) {
thread_handoff_.back().luma_y_end = colldata->block_height(0);
}
return thread_handoff_;
}
void VP8ComponentDecoder::flush() {
mux_splicer.drain(mux_reader_);
}
namespace{void nop(){}}
CodingReturnValue VP8ComponentDecoder::decode_chunk(UncompressedComponents * const colldata)
{
mux_splicer.init(spin_workers_);
/* cmpc is a global variable with the component count */
/* construct 4x4 VP8 blocks to hold 8x8 JPEG blocks */
if ( thread_state_[0] == nullptr || thread_state_[0]->context_[0].isNil() ) {
/* first call */
BlockBasedImagePerChannel<false> framebuffer;
framebuffer.memset(0);
for (size_t i = 0; i < framebuffer.size() && int( i ) < colldata->get_num_components(); ++i) {
framebuffer[i] = &colldata->full_component_write((BlockType)i);
}
Sirikata::Array1d<BlockBasedImagePerChannel<false>, MAX_NUM_THREADS> all_framebuffers;
for (size_t i = 0; i < all_framebuffers.size(); ++i) {
all_framebuffers[i] = framebuffer;
}
size_t num_threads_needed = initialize_decoder_state(colldata,
all_framebuffers).size();
for (size_t i = 0;i < num_threads_needed; ++i) {
map_logical_thread_to_physical_thread(i, i);
}
for (size_t i = 0;i < num_threads_needed; ++i) {
initialize_thread_id(i, i, framebuffer);
if (!do_threading_) {
break;
}
}
if (num_threads_needed > NUM_THREADS || num_threads_needed == 0) {
return CODING_ERROR;
}
}
if (do_threading_) {
for (unsigned int thread_id = 0; thread_id < NUM_THREADS; ++thread_id) {
unsigned int cur_spin_worker = thread_id;
if (!thread_state_[thread_id]) {
spin_workers_[cur_spin_worker].work
= &nop;
} else {
spin_workers_[cur_spin_worker].work
= std::bind(worker_thread,
thread_state_[thread_id],
thread_id,
colldata,
mux_splicer.thread_target,
getWorker(cur_spin_worker),
&send_to_actual_thread_state);
}
spin_workers_[cur_spin_worker].activate_work();
}
flush();
for (unsigned int thread_id = 0; thread_id < NUM_THREADS; ++thread_id) {
unsigned int cur_spin_worker = thread_id;
TimingHarness::timing[thread_id][TimingHarness::TS_THREAD_WAIT_STARTED] = TimingHarness::get_time_us();
spin_workers_[cur_spin_worker].main_wait_for_done();
TimingHarness::timing[thread_id][TimingHarness::TS_THREAD_WAIT_FINISHED] = TimingHarness::get_time_us();
}
// join on all threads
} else {
if (virtual_thread_id_ != -1) {
TimingHarness::timing[0][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us();
CodingReturnValue ret = thread_state_[0]->vp8_decode_thread(0, colldata);
if (ret == CODING_PARTIAL) {
return ret;
}
TimingHarness::timing[0][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us();
}
// wait for "threads"
virtual_thread_id_ += 1; // first time's a charm
for (unsigned int thread_id = virtual_thread_id_; thread_id < NUM_THREADS; ++thread_id, ++virtual_thread_id_) {
BlockBasedImagePerChannel<false> framebuffer;
framebuffer.memset(0);
for (size_t i = 0; i < framebuffer.size() && int( i ) < colldata->get_num_components(); ++i) {
framebuffer[i] = &colldata->full_component_write((BlockType)i);
}
initialize_thread_id(thread_id, 0, framebuffer);
thread_state_[0]->bool_decoder_.init(new VirtualThreadPacketReader(thread_id, &mux_reader_, &mux_splicer));
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_STARTED] = TimingHarness::get_time_us();
CodingReturnValue ret;
if ((ret = thread_state_[0]->vp8_decode_thread(0, colldata)) == CODING_PARTIAL) {
return ret;
}
TimingHarness::timing[thread_id][TimingHarness::TS_ARITH_FINISHED] = TimingHarness::get_time_us();
}
}
TimingHarness::timing[0][TimingHarness::TS_JPEG_RECODE_STARTED] = TimingHarness::get_time_us();
for (int component = 0; component < colldata->get_num_components(); ++component) {
colldata->worker_mark_cmp_finished((BlockType)component);
}
colldata->worker_update_coefficient_position_progress( 64 );
colldata->worker_update_bit_progress( 16 );
write_byte_bill(Billing::DELIMITERS, true, mux_reader_.getOverhead());
return CODING_DONE;
}
|
3343_3
|
crossvul
|
cc
|
CWE-1187
|
DEPRECATED: Use of Uninitialized Resource - This entry has been deprecated because it was a duplicate of CWE-908.
|
c
|
/* $OpenBSD: doas.c,v 1.57 2016/06/19 19:29:43 martijn Exp $ */
/*
* Copyright (c) 2015 Ted Unangst <tedu@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#if defined(HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <err.h>
#include <unistd.h>
#include <pwd.h>
#include <grp.h>
#include <syslog.h>
#include <errno.h>
#include <fcntl.h>
#if defined(HAVE_LOGIN_CAP_H)
#include <login_cap.h>
#endif
#if defined(USE_BSD_AUTH)
#include <bsd_auth.h>
#include <readpassphrase.h>
#endif
#if defined(USE_PAM)
#include <security/pam_appl.h>
#if defined(OPENPAM) /* BSD, MacOS & certain Linux distros */
#include <security/openpam.h>
static struct pam_conv pamc = { openpam_ttyconv, NULL };
#elif defined(__LINUX_PAM__) /* Linux */
#include <security/pam_misc.h>
static struct pam_conv pamc = { misc_conv, NULL };
#elif defined(SOLARIS_PAM) /* illumos & Solaris */
#include "pm_pam_conv.h"
static struct pam_conv pamc = { pam_tty_conv, NULL };
#endif /* OPENPAM */
#endif /* USE_PAM */
#include "doas.h"
static void
usage(void)
{
fprintf(stderr, "usage: doas [-ns] [-a style] [-C config] [-u user]"
" command [args]\n");
exit(1);
}
#ifdef linux
void
errc(int eval, int code, const char *format)
{
fprintf(stderr, "%s", format);
exit(code);
}
#endif
static int
parseuid(const char *s, uid_t *uid)
{
struct passwd *pw;
const char *errstr;
if ((pw = getpwnam(s)) != NULL) {
*uid = pw->pw_uid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*uid = strtonum(s, 0, UID_MAX, &errstr);
#else
sscanf(s, "%d", uid);
#endif
if (errstr)
return -1;
return 0;
}
static int
uidcheck(const char *s, uid_t desired)
{
uid_t uid;
if (parseuid(s, &uid) != 0)
return -1;
if (uid != desired)
return -1;
return 0;
}
static int
parsegid(const char *s, gid_t *gid)
{
struct group *gr;
const char *errstr;
if ((gr = getgrnam(s)) != NULL) {
*gid = gr->gr_gid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*gid = strtonum(s, 0, GID_MAX, &errstr);
#else
sscanf(s, "%d", gid);
#endif
if (errstr)
return -1;
return 0;
}
static int
match(uid_t uid, gid_t *groups, int ngroups, uid_t target, const char *cmd,
const char **cmdargs, struct rule *r)
{
int i;
if (r->ident[0] == ':') {
gid_t rgid;
if (parsegid(r->ident + 1, &rgid) == -1)
return 0;
for (i = 0; i < ngroups; i++) {
if (rgid == groups[i])
break;
}
if (i == ngroups)
return 0;
} else {
if (uidcheck(r->ident, uid) != 0)
return 0;
}
if (r->target && uidcheck(r->target, target) != 0)
return 0;
if (r->cmd) {
if (strcmp(r->cmd, cmd))
return 0;
if (r->cmdargs) {
/* if arguments were given, they should match explicitly */
for (i = 0; r->cmdargs[i]; i++) {
if (!cmdargs[i])
return 0;
if (strcmp(r->cmdargs[i], cmdargs[i]))
return 0;
}
if (cmdargs[i])
return 0;
}
}
return 1;
}
static int
permit(uid_t uid, gid_t *groups, int ngroups, struct rule **lastr,
uid_t target, const char *cmd, const char **cmdargs)
{
int i;
*lastr = NULL;
for (i = 0; i < nrules; i++) {
if (match(uid, groups, ngroups, target, cmd,
cmdargs, rules[i]))
*lastr = rules[i];
}
if (!*lastr)
return 0;
return (*lastr)->action == PERMIT;
}
static void
parseconfig(const char *filename, int checkperms)
{
extern FILE *yyfp;
extern int yyparse(void);
struct stat sb;
yyfp = fopen(filename, "r");
if (!yyfp)
err(1, checkperms ? "doas is not enabled, %s" :
"could not open config file %s", filename);
if (checkperms) {
if (fstat(fileno(yyfp), &sb) != 0)
err(1, "fstat(\"%s\")", filename);
if ((sb.st_mode & (S_IWGRP|S_IWOTH)) != 0)
errx(1, "%s is writable by group or other", filename);
if (sb.st_uid != 0)
errx(1, "%s is not owned by root", filename);
}
yyparse();
fclose(yyfp);
if (parse_errors)
exit(1);
}
static void
checkconfig(const char *confpath, int argc, char **argv,
uid_t uid, gid_t *groups, int ngroups, uid_t target)
{
struct rule *rule;
int status;
#if defined(__linux__) || defined(__FreeBSD__)
status = setresuid(uid, uid, uid);
#else
status = setreuid(uid, uid);
#endif
if (status == -1)
{
printf("doas: Unable to set UID\n");
exit(1);
}
parseconfig(confpath, 0);
if (!argc)
exit(0);
if (permit(uid, groups, ngroups, &rule, target, argv[0],
(const char **)argv + 1)) {
printf("permit%s\n", (rule->options & NOPASS) ? " nopass" : "");
exit(0);
} else {
printf("deny\n");
exit(1);
}
}
#if defined(USE_BSD_AUTH)
static void
authuser(char *myname, char *login_style, int persist)
{
char *challenge = NULL, *response, rbuf[1024], cbuf[128];
auth_session_t *as;
int fd = -1;
if (persist)
fd = open("/dev/tty", O_RDWR);
if (fd != -1) {
if (ioctl(fd, TIOCCHKVERAUTH) == 0)
goto good;
}
if (!(as = auth_userchallenge(myname, login_style, "auth-doas",
&challenge)))
errx(1, "Authorization failed");
if (!challenge) {
char host[HOST_NAME_MAX + 1];
if (gethostname(host, sizeof(host)))
snprintf(host, sizeof(host), "?");
snprintf(cbuf, sizeof(cbuf),
"\rdoas (%.32s@%.32s) password: ", myname, host);
challenge = cbuf;
}
response = readpassphrase(challenge, rbuf, sizeof(rbuf),
RPP_REQUIRE_TTY);
if (response == NULL && errno == ENOTTY) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"tty required for %s", myname);
errx(1, "a tty is required");
}
if (!auth_userresponse(as, response, 0)) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errc(1, EPERM, NULL);
}
explicit_bzero(rbuf, sizeof(rbuf));
good:
if (fd != -1) {
int secs = 5 * 60;
ioctl(fd, TIOCSETVERAUTH, &secs);
close(fd);
}
}
#endif
int
main(int argc, char **argv)
{
const char *safepath = SAFE_PATH;
const char *confpath = NULL;
char *shargv[] = { NULL, NULL };
char *sh;
const char *cmd;
char cmdline[LINE_MAX];
char myname[_PW_NAME_LEN + 1];
struct passwd *original_pw, *target_pw;
struct rule *rule;
uid_t uid;
uid_t target = 0;
gid_t groups[NGROUPS_MAX + 1];
int ngroups;
int i, ch;
int sflag = 0;
int nflag = 0;
char cwdpath[PATH_MAX];
const char *cwd;
char *login_style = NULL;
char **envp;
#ifndef linux
setprogname("doas");
#endif
#ifndef linux
closefrom(STDERR_FILENO + 1);
#endif
uid = getuid();
while ((ch = getopt(argc, argv, "a:C:nsu:")) != -1) {
/* while ((ch = getopt(argc, argv, "a:C:Lnsu:")) != -1) { */
switch (ch) {
case 'a':
login_style = optarg;
break;
case 'C':
confpath = optarg;
break;
/* case 'L':
i = open("/dev/tty", O_RDWR);
if (i != -1)
ioctl(i, TIOCCLRVERAUTH);
exit(i != -1);
*/
case 'u':
if (parseuid(optarg, &target) != 0)
errx(1, "unknown user");
break;
case 'n':
nflag = 1;
break;
case 's':
sflag = 1;
break;
default:
usage();
break;
}
}
argv += optind;
argc -= optind;
if (confpath) {
if (sflag)
usage();
} else if ((!sflag && !argc) || (sflag && argc))
usage();
original_pw = getpwuid(uid);
if (! original_pw)
err(1, "getpwuid failed");
if (strlcpy(myname, original_pw->pw_name, sizeof(myname)) >= sizeof(myname))
errx(1, "pw_name too long");
ngroups = getgroups(NGROUPS_MAX, groups);
if (ngroups == -1)
err(1, "can't get groups");
groups[ngroups++] = getgid();
if (sflag) {
sh = getenv("SHELL");
if (sh == NULL || *sh == '\0') {
shargv[0] = strdup(original_pw->pw_shell);
if (shargv[0] == NULL)
err(1, NULL);
} else
shargv[0] = sh;
argv = shargv;
argc = 1;
}
if (confpath) {
checkconfig(confpath, argc, argv, uid, groups, ngroups,
target);
exit(1); /* fail safe */
}
if (geteuid())
errx(1, "not installed setuid");
parseconfig(DOAS_CONF, 1);
/* cmdline is used only for logging, no need to abort on truncate */
(void)strlcpy(cmdline, argv[0], sizeof(cmdline));
for (i = 1; i < argc; i++) {
if (strlcat(cmdline, " ", sizeof(cmdline)) >= sizeof(cmdline))
break;
if (strlcat(cmdline, argv[i], sizeof(cmdline)) >= sizeof(cmdline))
break;
}
cmd = argv[0];
if (!permit(uid, groups, ngroups, &rule, target, cmd,
(const char **)argv + 1)) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed command for %s: %s", myname, cmdline);
errc(1, EPERM, NULL);
}
if (!(rule->options & NOPASS)) {
if (nflag)
errx(1, "Authorization required");
#if defined(USE_BSD_AUTH)
authuser(myname, login_style, rule->options & PERSIST);
#elif defined(USE_PAM)
#define PAM_END(msg) do { \
syslog(LOG_ERR, "%s: %s", msg, pam_strerror(pamh, pam_err)); \
warnx("%s: %s", msg, pam_strerror(pamh, pam_err)); \
pam_end(pamh, pam_err); \
exit(EXIT_FAILURE); \
} while (/*CONSTCOND*/0)
pam_handle_t *pamh = NULL;
int pam_err;
/* #ifndef linux */
int temp_stdin;
/* openpam_ttyconv checks if stdin is a terminal and
* if it is then does not bother to open /dev/tty.
* The result is that PAM writes the password prompt
* directly to stdout. In scenarios where stdin is a
* terminal, but stdout is redirected to a file
* e.g. by running doas ls &> ls.out interactively,
* the password prompt gets written to ls.out as well.
* By closing stdin first we forces PAM to read/write
* to/from the terminal directly. We restore stdin
* after authenticating. */
temp_stdin = dup(STDIN_FILENO);
if (temp_stdin == -1)
err(1, "dup");
close(STDIN_FILENO);
/* #else */
/* force password prompt to display on stderr, not stdout */
int temp_stdout = dup(1);
if (temp_stdout == -1)
err(1, "dup");
close(1);
if (dup2(2, 1) == -1)
err(1, "dup2");
/* #endif */
pam_err = pam_start("doas", myname, &pamc, &pamh);
if (pam_err != PAM_SUCCESS) {
if (pamh != NULL)
PAM_END("pam_start");
syslog(LOG_ERR, "pam_start failed: %s",
pam_strerror(pamh, pam_err));
errx(EXIT_FAILURE, "pam_start failed");
}
switch (pam_err = pam_authenticate(pamh, PAM_SILENT)) {
case PAM_SUCCESS:
switch (pam_err = pam_acct_mgmt(pamh, PAM_SILENT)) {
case PAM_SUCCESS:
break;
case PAM_NEW_AUTHTOK_REQD:
pam_err = pam_chauthtok(pamh,
PAM_SILENT|PAM_CHANGE_EXPIRED_AUTHTOK);
if (pam_err != PAM_SUCCESS)
PAM_END("pam_chauthtok");
break;
case PAM_AUTH_ERR:
case PAM_USER_UNKNOWN:
case PAM_MAXTRIES:
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(EXIT_FAILURE, "second authentication failed");
break;
default:
PAM_END("pam_acct_mgmt");
break;
}
break;
case PAM_AUTH_ERR:
case PAM_USER_UNKNOWN:
case PAM_MAXTRIES:
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(EXIT_FAILURE, "authentication failed");
break;
default:
PAM_END("pam_authenticate");
break;
}
pam_end(pamh, pam_err);
#ifndef linux
/* Re-establish stdin */
if (dup2(temp_stdin, STDIN_FILENO) == -1)
err(1, "dup2");
close(temp_stdin);
#else
/* Re-establish stdout */
close(1);
if (dup2(temp_stdout, 1) == -1)
err(1, "dup2");
#endif
#else
#error No auth module!
#endif
}
/*
if (pledge("stdio rpath getpw exec id", NULL) == -1)
err(1, "pledge");
*/
target_pw = getpwuid(target);
if (! target_pw)
errx(1, "no passwd entry for target");
#if defined(HAVE_LOGIN_CAP_H)
if (setusercontext(NULL, target_pw, target, LOGIN_SETGROUP |
LOGIN_SETPRIORITY | LOGIN_SETRESOURCES | LOGIN_SETUMASK |
LOGIN_SETUSER) != 0)
errx(1, "failed to set user context for target");
#else
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
if (setresgid(target_pw->pw_gid, target_pw->pw_gid, target_pw->pw_gid) == -1)
err(1, "setresgid");
#else
if (setregid(target_pw->pw_gid, target_pw->pw_gid) == -1)
err(1, "setregid");
#endif
if (initgroups(target_pw->pw_name, target_pw->pw_gid) == -1)
err(1, "initgroups");
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
if (setresuid(target, target, target) == -1)
err(1, "setresuid");
#else
if (setreuid(target, target) == -1)
err(1, "setreuid");
#endif
#endif
/*
if (pledge("stdio rpath exec", NULL) == -1)
err(1, "pledge");
*/
if (getcwd(cwdpath, sizeof(cwdpath)) == NULL)
cwd = "(failed)";
else
cwd = cwdpath;
/*
if (pledge("stdio exec", NULL) == -1)
err(1, "pledge");
*/
syslog(LOG_AUTHPRIV | LOG_INFO, "%s ran command %s as %s from %s",
myname, cmdline, target_pw->pw_name, cwd);
envp = prepenv(rule, original_pw, target_pw);
if (rule->cmd) {
if (setenv("PATH", safepath, 1) == -1)
err(1, "failed to set PATH '%s'", safepath);
}
execvpe(cmd, argv, envp);
if (errno == ENOENT)
errx(1, "%s: command not found", cmd);
err(1, "%s", cmd);
}
|
/* $OpenBSD: doas.c,v 1.57 2016/06/19 19:29:43 martijn Exp $ */
/*
* Copyright (c) 2015 Ted Unangst <tedu@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/ioctl.h>
#if defined(HAVE_INTTYPES_H)
#include <inttypes.h>
#endif
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <err.h>
#include <unistd.h>
#include <pwd.h>
#include <grp.h>
#include <syslog.h>
#include <errno.h>
#include <fcntl.h>
#if defined(HAVE_LOGIN_CAP_H)
#include <login_cap.h>
#endif
#if defined(USE_BSD_AUTH)
#include <bsd_auth.h>
#include <readpassphrase.h>
#endif
#if defined(USE_PAM)
#include <security/pam_appl.h>
#if defined(OPENPAM) /* BSD, MacOS & certain Linux distros */
#include <security/openpam.h>
static struct pam_conv pamc = { openpam_ttyconv, NULL };
#elif defined(__LINUX_PAM__) /* Linux */
#include <security/pam_misc.h>
static struct pam_conv pamc = { misc_conv, NULL };
#elif defined(SOLARIS_PAM) /* illumos & Solaris */
#include "pm_pam_conv.h"
static struct pam_conv pamc = { pam_tty_conv, NULL };
#endif /* OPENPAM */
#endif /* USE_PAM */
#include "doas.h"
static void
usage(void)
{
fprintf(stderr, "usage: doas [-ns] [-a style] [-C config] [-u user]"
" command [args]\n");
exit(1);
}
#ifdef linux
void
errc(int eval, int code, const char *format)
{
fprintf(stderr, "%s", format);
exit(code);
}
#endif
static int
parseuid(const char *s, uid_t *uid)
{
struct passwd *pw;
#if !defined(__linux__) && !defined(__NetBSD__)
const char *errstr = NULL;
#else
int status;
#endif
if ((pw = getpwnam(s)) != NULL) {
*uid = pw->pw_uid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*uid = strtonum(s, 0, UID_MAX, &errstr);
if (errstr)
return -1;
#else
status = sscanf(s, "%d", uid);
if (status != 1)
return -1;
#endif
return 0;
}
static int
uidcheck(const char *s, uid_t desired)
{
uid_t uid;
if (parseuid(s, &uid) != 0)
return -1;
if (uid != desired)
return -1;
return 0;
}
static int
parsegid(const char *s, gid_t *gid)
{
struct group *gr;
#if !defined(__linux__) && !defined(__NetBSD__)
const char *errstr = NULL;
#else
int status;
#endif
if ((gr = getgrnam(s)) != NULL) {
*gid = gr->gr_gid;
return 0;
}
#if !defined(__linux__) && !defined(__NetBSD__)
*gid = strtonum(s, 0, GID_MAX, &errstr);
if (errstr)
return -1;
#else
status = sscanf(s, "%d", gid);
if (status != 1)
return -1;
#endif
return 0;
}
static int
match(uid_t uid, gid_t *groups, int ngroups, uid_t target, const char *cmd,
const char **cmdargs, struct rule *r)
{
int i;
if (r->ident[0] == ':') {
gid_t rgid;
if (parsegid(r->ident + 1, &rgid) == -1)
return 0;
for (i = 0; i < ngroups; i++) {
if (rgid == groups[i])
break;
}
if (i == ngroups)
return 0;
} else {
if (uidcheck(r->ident, uid) != 0)
return 0;
}
if (r->target && uidcheck(r->target, target) != 0)
return 0;
if (r->cmd) {
if (strcmp(r->cmd, cmd))
return 0;
if (r->cmdargs) {
/* if arguments were given, they should match explicitly */
for (i = 0; r->cmdargs[i]; i++) {
if (!cmdargs[i])
return 0;
if (strcmp(r->cmdargs[i], cmdargs[i]))
return 0;
}
if (cmdargs[i])
return 0;
}
}
return 1;
}
static int
permit(uid_t uid, gid_t *groups, int ngroups, struct rule **lastr,
uid_t target, const char *cmd, const char **cmdargs)
{
int i;
*lastr = NULL;
for (i = 0; i < nrules; i++) {
if (match(uid, groups, ngroups, target, cmd,
cmdargs, rules[i]))
*lastr = rules[i];
}
if (!*lastr)
return 0;
return (*lastr)->action == PERMIT;
}
static void
parseconfig(const char *filename, int checkperms)
{
extern FILE *yyfp;
extern int yyparse(void);
struct stat sb;
yyfp = fopen(filename, "r");
if (!yyfp)
err(1, checkperms ? "doas is not enabled, %s" :
"could not open config file %s", filename);
if (checkperms) {
if (fstat(fileno(yyfp), &sb) != 0)
err(1, "fstat(\"%s\")", filename);
if ((sb.st_mode & (S_IWGRP|S_IWOTH)) != 0)
errx(1, "%s is writable by group or other", filename);
if (sb.st_uid != 0)
errx(1, "%s is not owned by root", filename);
}
yyparse();
fclose(yyfp);
if (parse_errors)
exit(1);
}
static void
checkconfig(const char *confpath, int argc, char **argv,
uid_t uid, gid_t *groups, int ngroups, uid_t target)
{
struct rule *rule;
int status;
#if defined(__linux__) || defined(__FreeBSD__)
status = setresuid(uid, uid, uid);
#else
status = setreuid(uid, uid);
#endif
if (status == -1)
{
printf("doas: Unable to set UID\n");
exit(1);
}
parseconfig(confpath, 0);
if (!argc)
exit(0);
if (permit(uid, groups, ngroups, &rule, target, argv[0],
(const char **)argv + 1)) {
printf("permit%s\n", (rule->options & NOPASS) ? " nopass" : "");
exit(0);
} else {
printf("deny\n");
exit(1);
}
}
#if defined(USE_BSD_AUTH)
static void
authuser(char *myname, char *login_style, int persist)
{
char *challenge = NULL, *response, rbuf[1024], cbuf[128];
auth_session_t *as;
int fd = -1;
if (persist)
fd = open("/dev/tty", O_RDWR);
if (fd != -1) {
if (ioctl(fd, TIOCCHKVERAUTH) == 0)
goto good;
}
if (!(as = auth_userchallenge(myname, login_style, "auth-doas",
&challenge)))
errx(1, "Authorization failed");
if (!challenge) {
char host[HOST_NAME_MAX + 1];
if (gethostname(host, sizeof(host)))
snprintf(host, sizeof(host), "?");
snprintf(cbuf, sizeof(cbuf),
"\rdoas (%.32s@%.32s) password: ", myname, host);
challenge = cbuf;
}
response = readpassphrase(challenge, rbuf, sizeof(rbuf),
RPP_REQUIRE_TTY);
if (response == NULL && errno == ENOTTY) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"tty required for %s", myname);
errx(1, "a tty is required");
}
if (!auth_userresponse(as, response, 0)) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errc(1, EPERM, NULL);
}
explicit_bzero(rbuf, sizeof(rbuf));
good:
if (fd != -1) {
int secs = 5 * 60;
ioctl(fd, TIOCSETVERAUTH, &secs);
close(fd);
}
}
#endif
int
main(int argc, char **argv)
{
const char *safepath = SAFE_PATH;
const char *confpath = NULL;
char *shargv[] = { NULL, NULL };
char *sh;
const char *cmd;
char cmdline[LINE_MAX];
char myname[_PW_NAME_LEN + 1];
struct passwd *original_pw, *target_pw;
struct rule *rule;
uid_t uid;
uid_t target = 0;
gid_t groups[NGROUPS_MAX + 1];
int ngroups;
int i, ch;
int sflag = 0;
int nflag = 0;
char cwdpath[PATH_MAX];
const char *cwd;
char *login_style = NULL;
char **envp;
#ifndef linux
setprogname("doas");
#endif
#ifndef linux
closefrom(STDERR_FILENO + 1);
#endif
uid = getuid();
while ((ch = getopt(argc, argv, "a:C:nsu:")) != -1) {
/* while ((ch = getopt(argc, argv, "a:C:Lnsu:")) != -1) { */
switch (ch) {
case 'a':
login_style = optarg;
break;
case 'C':
confpath = optarg;
break;
/* case 'L':
i = open("/dev/tty", O_RDWR);
if (i != -1)
ioctl(i, TIOCCLRVERAUTH);
exit(i != -1);
*/
case 'u':
if (parseuid(optarg, &target) != 0)
errx(1, "unknown user");
break;
case 'n':
nflag = 1;
break;
case 's':
sflag = 1;
break;
default:
usage();
break;
}
}
argv += optind;
argc -= optind;
if (confpath) {
if (sflag)
usage();
} else if ((!sflag && !argc) || (sflag && argc))
usage();
original_pw = getpwuid(uid);
if (! original_pw)
err(1, "getpwuid failed");
if (strlcpy(myname, original_pw->pw_name, sizeof(myname)) >= sizeof(myname))
errx(1, "pw_name too long");
ngroups = getgroups(NGROUPS_MAX, groups);
if (ngroups == -1)
err(1, "can't get groups");
groups[ngroups++] = getgid();
if (sflag) {
sh = getenv("SHELL");
if (sh == NULL || *sh == '\0') {
shargv[0] = strdup(original_pw->pw_shell);
if (shargv[0] == NULL)
err(1, NULL);
} else
shargv[0] = sh;
argv = shargv;
argc = 1;
}
if (confpath) {
checkconfig(confpath, argc, argv, uid, groups, ngroups,
target);
exit(1); /* fail safe */
}
if (geteuid())
errx(1, "not installed setuid");
parseconfig(DOAS_CONF, 1);
/* cmdline is used only for logging, no need to abort on truncate */
(void)strlcpy(cmdline, argv[0], sizeof(cmdline));
for (i = 1; i < argc; i++) {
if (strlcat(cmdline, " ", sizeof(cmdline)) >= sizeof(cmdline))
break;
if (strlcat(cmdline, argv[i], sizeof(cmdline)) >= sizeof(cmdline))
break;
}
cmd = argv[0];
if (!permit(uid, groups, ngroups, &rule, target, cmd,
(const char **)argv + 1)) {
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed command for %s: %s", myname, cmdline);
errc(1, EPERM, NULL);
}
if (!(rule->options & NOPASS)) {
if (nflag)
errx(1, "Authorization required");
#if defined(USE_BSD_AUTH)
authuser(myname, login_style, rule->options & PERSIST);
#elif defined(USE_PAM)
#define PAM_END(msg) do { \
syslog(LOG_ERR, "%s: %s", msg, pam_strerror(pamh, pam_err)); \
warnx("%s: %s", msg, pam_strerror(pamh, pam_err)); \
pam_end(pamh, pam_err); \
exit(EXIT_FAILURE); \
} while (/*CONSTCOND*/0)
pam_handle_t *pamh = NULL;
int pam_err;
/* #ifndef linux */
int temp_stdin;
/* openpam_ttyconv checks if stdin is a terminal and
* if it is then does not bother to open /dev/tty.
* The result is that PAM writes the password prompt
* directly to stdout. In scenarios where stdin is a
* terminal, but stdout is redirected to a file
* e.g. by running doas ls &> ls.out interactively,
* the password prompt gets written to ls.out as well.
* By closing stdin first we forces PAM to read/write
* to/from the terminal directly. We restore stdin
* after authenticating. */
temp_stdin = dup(STDIN_FILENO);
if (temp_stdin == -1)
err(1, "dup");
close(STDIN_FILENO);
/* #else */
/* force password prompt to display on stderr, not stdout */
int temp_stdout = dup(1);
if (temp_stdout == -1)
err(1, "dup");
close(1);
if (dup2(2, 1) == -1)
err(1, "dup2");
/* #endif */
pam_err = pam_start("doas", myname, &pamc, &pamh);
if (pam_err != PAM_SUCCESS) {
if (pamh != NULL)
PAM_END("pam_start");
syslog(LOG_ERR, "pam_start failed: %s",
pam_strerror(pamh, pam_err));
errx(EXIT_FAILURE, "pam_start failed");
}
switch (pam_err = pam_authenticate(pamh, PAM_SILENT)) {
case PAM_SUCCESS:
switch (pam_err = pam_acct_mgmt(pamh, PAM_SILENT)) {
case PAM_SUCCESS:
break;
case PAM_NEW_AUTHTOK_REQD:
pam_err = pam_chauthtok(pamh,
PAM_SILENT|PAM_CHANGE_EXPIRED_AUTHTOK);
if (pam_err != PAM_SUCCESS)
PAM_END("pam_chauthtok");
break;
case PAM_AUTH_ERR:
case PAM_USER_UNKNOWN:
case PAM_MAXTRIES:
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(EXIT_FAILURE, "second authentication failed");
break;
default:
PAM_END("pam_acct_mgmt");
break;
}
break;
case PAM_AUTH_ERR:
case PAM_USER_UNKNOWN:
case PAM_MAXTRIES:
syslog(LOG_AUTHPRIV | LOG_NOTICE,
"failed auth for %s", myname);
errx(EXIT_FAILURE, "authentication failed");
break;
default:
PAM_END("pam_authenticate");
break;
}
pam_end(pamh, pam_err);
#ifndef linux
/* Re-establish stdin */
if (dup2(temp_stdin, STDIN_FILENO) == -1)
err(1, "dup2");
close(temp_stdin);
#else
/* Re-establish stdout */
close(1);
if (dup2(temp_stdout, 1) == -1)
err(1, "dup2");
#endif
#else
#error No auth module!
#endif
}
/*
if (pledge("stdio rpath getpw exec id", NULL) == -1)
err(1, "pledge");
*/
target_pw = getpwuid(target);
if (! target_pw)
errx(1, "no passwd entry for target");
#if defined(HAVE_LOGIN_CAP_H)
if (setusercontext(NULL, target_pw, target, LOGIN_SETGROUP |
LOGIN_SETPRIORITY | LOGIN_SETRESOURCES | LOGIN_SETUMASK |
LOGIN_SETUSER) != 0)
errx(1, "failed to set user context for target");
#else
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
if (setresgid(target_pw->pw_gid, target_pw->pw_gid, target_pw->pw_gid) == -1)
err(1, "setresgid");
#else
if (setregid(target_pw->pw_gid, target_pw->pw_gid) == -1)
err(1, "setregid");
#endif
if (initgroups(target_pw->pw_name, target_pw->pw_gid) == -1)
err(1, "initgroups");
#if defined(__linux__) || defined(__FreeBSD__) || defined(__NetBSD__)
if (setresuid(target, target, target) == -1)
err(1, "setresuid");
#else
if (setreuid(target, target) == -1)
err(1, "setreuid");
#endif
#endif
/*
if (pledge("stdio rpath exec", NULL) == -1)
err(1, "pledge");
*/
if (getcwd(cwdpath, sizeof(cwdpath)) == NULL)
cwd = "(failed)";
else
cwd = cwdpath;
/*
if (pledge("stdio exec", NULL) == -1)
err(1, "pledge");
*/
syslog(LOG_AUTHPRIV | LOG_INFO, "%s ran command %s as %s from %s",
myname, cmdline, target_pw->pw_name, cwd);
envp = prepenv(rule, original_pw, target_pw);
if (rule->cmd) {
if (setenv("PATH", safepath, 1) == -1)
err(1, "failed to set PATH '%s'", safepath);
}
execvpe(cmd, argv, envp);
if (errno == ENOENT)
errx(1, "%s: command not found", cmd);
err(1, "%s", cmd);
}
|
1052_1
|
crossvul
|
c
|
CWE-1188
|
Initialization of a Resource with an Insecure Default - Developers often choose default values that leave the product as open and easy to use as possible out-of-the-box, under the assumption that the administrator can (or should) change the default value.
|
javascript
|
'use strict';
var grunt = require('../grunt');
// Nodejs libs.
var fs = require('fs');
var path = require('path');
// The module to be exported.
var file = module.exports = {};
// External libs.
file.glob = require('glob');
file.minimatch = require('minimatch');
file.findup = require('findup-sync');
var YAML = require('js-yaml');
var rimraf = require('rimraf');
var iconv = require('iconv-lite');
var mkdirp = require('mkdirp').sync;
// Windows?
var win32 = process.platform === 'win32';
// Normalize \\ paths to / paths.
var unixifyPath = function(filepath) {
if (win32) {
return filepath.replace(/\\/g, '/');
} else {
return filepath;
}
};
// Change the current base path (ie, CWD) to the specified path.
file.setBase = function() {
var dirpath = path.join.apply(path, arguments);
process.chdir(dirpath);
};
// Process specified wildcard glob patterns or filenames against a
// callback, excluding and uniquing files in the result set.
var processPatterns = function(patterns, fn) {
// Filepaths to return.
var result = [];
// Iterate over flattened patterns array.
grunt.util._.flattenDeep(patterns).forEach(function(pattern) {
// If the first character is ! it should be omitted
var exclusion = pattern.indexOf('!') === 0;
// If the pattern is an exclusion, remove the !
if (exclusion) { pattern = pattern.slice(1); }
// Find all matching files for this pattern.
var matches = fn(pattern);
if (exclusion) {
// If an exclusion, remove matching files.
result = grunt.util._.difference(result, matches);
} else {
// Otherwise add matching files.
result = grunt.util._.union(result, matches);
}
});
return result;
};
// Match a filepath or filepaths against one or more wildcard patterns. Returns
// all matching filepaths.
file.match = function(options, patterns, filepaths) {
if (grunt.util.kindOf(options) !== 'object') {
filepaths = patterns;
patterns = options;
options = {};
}
// Return empty set if either patterns or filepaths was omitted.
if (patterns == null || filepaths == null) { return []; }
// Normalize patterns and filepaths to arrays.
if (!Array.isArray(patterns)) { patterns = [patterns]; }
if (!Array.isArray(filepaths)) { filepaths = [filepaths]; }
// Return empty set if there are no patterns or filepaths.
if (patterns.length === 0 || filepaths.length === 0) { return []; }
// Return all matching filepaths.
return processPatterns(patterns, function(pattern) {
return file.minimatch.match(filepaths, pattern, options);
});
};
// Match a filepath or filepaths against one or more wildcard patterns. Returns
// true if any of the patterns match.
file.isMatch = function() {
return file.match.apply(file, arguments).length > 0;
};
// Return an array of all file paths that match the given wildcard patterns.
file.expand = function() {
var args = grunt.util.toArray(arguments);
// If the first argument is an options object, save those options to pass
// into the file.glob.sync method.
var options = grunt.util.kindOf(args[0]) === 'object' ? args.shift() : {};
// Use the first argument if it's an Array, otherwise convert the arguments
// object to an array and use that.
var patterns = Array.isArray(args[0]) ? args[0] : args;
// Return empty set if there are no patterns or filepaths.
if (patterns.length === 0) { return []; }
// Return all matching filepaths.
var matches = processPatterns(patterns, function(pattern) {
// Find all matching files for this pattern.
return file.glob.sync(pattern, options);
});
// Filter result set?
if (options.filter) {
matches = matches.filter(function(filepath) {
filepath = path.join(options.cwd || '', filepath);
try {
if (typeof options.filter === 'function') {
return options.filter(filepath);
} else {
// If the file is of the right type and exists, this should work.
return fs.statSync(filepath)[options.filter]();
}
} catch (e) {
// Otherwise, it's probably not the right type.
return false;
}
});
}
return matches;
};
var pathSeparatorRe = /[\/\\]/g;
// The "ext" option refers to either everything after the first dot (default)
// or everything after the last dot.
var extDotRe = {
first: /(\.[^\/]*)?$/,
last: /(\.[^\/\.]*)?$/,
};
// Build a multi task "files" object dynamically.
file.expandMapping = function(patterns, destBase, options) {
options = grunt.util._.defaults({}, options, {
extDot: 'first',
rename: function(destBase, destPath) {
return path.join(destBase || '', destPath);
}
});
var files = [];
var fileByDest = {};
// Find all files matching pattern, using passed-in options.
file.expand(options, patterns).forEach(function(src) {
var destPath = src;
// Flatten?
if (options.flatten) {
destPath = path.basename(destPath);
}
// Change the extension?
if ('ext' in options) {
destPath = destPath.replace(extDotRe[options.extDot], options.ext);
}
// Generate destination filename.
var dest = options.rename(destBase, destPath, options);
// Prepend cwd to src path if necessary.
if (options.cwd) { src = path.join(options.cwd, src); }
// Normalize filepaths to be unix-style.
dest = dest.replace(pathSeparatorRe, '/');
src = src.replace(pathSeparatorRe, '/');
// Map correct src path to dest path.
if (fileByDest[dest]) {
// If dest already exists, push this src onto that dest's src array.
fileByDest[dest].src.push(src);
} else {
// Otherwise create a new src-dest file mapping object.
files.push({
src: [src],
dest: dest,
});
// And store a reference for later use.
fileByDest[dest] = files[files.length - 1];
}
});
return files;
};
// Like mkdir -p. Create a directory and any intermediary directories.
file.mkdir = function(dirpath, mode) {
if (grunt.option('no-write')) { return; }
try {
mkdirp(dirpath, { mode: mode });
} catch (e) {
throw grunt.util.error('Unable to create directory "' + dirpath + '" (Error code: ' + e.code + ').', e);
}
};
// Recurse into a directory, executing callback for each file.
file.recurse = function recurse(rootdir, callback, subdir) {
var abspath = subdir ? path.join(rootdir, subdir) : rootdir;
fs.readdirSync(abspath).forEach(function(filename) {
var filepath = path.join(abspath, filename);
if (fs.statSync(filepath).isDirectory()) {
recurse(rootdir, callback, unixifyPath(path.join(subdir || '', filename || '')));
} else {
callback(unixifyPath(filepath), rootdir, subdir, filename);
}
});
};
// The default file encoding to use.
file.defaultEncoding = 'utf8';
// Whether to preserve the BOM on file.read rather than strip it.
file.preserveBOM = false;
// Read a file, return its contents.
file.read = function(filepath, options) {
if (!options) { options = {}; }
var contents;
grunt.verbose.write('Reading ' + filepath + '...');
try {
contents = fs.readFileSync(String(filepath));
// If encoding is not explicitly null, convert from encoded buffer to a
// string. If no encoding was specified, use the default.
if (options.encoding !== null) {
contents = iconv.decode(contents, options.encoding || file.defaultEncoding, {stripBOM: !file.preserveBOM});
}
grunt.verbose.ok();
return contents;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to read "' + filepath + '" file (Error code: ' + e.code + ').', e);
}
};
// Read a file, parse its contents, return an object.
file.readJSON = function(filepath, options) {
var src = file.read(filepath, options);
var result;
grunt.verbose.write('Parsing ' + filepath + '...');
try {
result = JSON.parse(src);
grunt.verbose.ok();
return result;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to parse "' + filepath + '" file (' + e.message + ').', e);
}
};
// Read a YAML file, parse its contents, return an object.
file.readYAML = function(filepath, options) {
var src = file.read(filepath, options);
var result;
grunt.verbose.write('Parsing ' + filepath + '...');
try {
result = YAML.load(src);
grunt.verbose.ok();
return result;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to parse "' + filepath + '" file (' + e.message + ').', e);
}
};
// Write a file.
file.write = function(filepath, contents, options) {
if (!options) { options = {}; }
var nowrite = grunt.option('no-write');
grunt.verbose.write((nowrite ? 'Not actually writing ' : 'Writing ') + filepath + '...');
// Create path, if necessary.
file.mkdir(path.dirname(filepath));
try {
// If contents is already a Buffer, don't try to encode it. If no encoding
// was specified, use the default.
if (!Buffer.isBuffer(contents)) {
contents = iconv.encode(contents, options.encoding || file.defaultEncoding);
}
// Actually write file.
if (!nowrite) {
fs.writeFileSync(filepath, contents, 'mode' in options ? {mode: options.mode} : {});
}
grunt.verbose.ok();
return true;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to write "' + filepath + '" file (Error code: ' + e.code + ').', e);
}
};
// Read a file, optionally processing its content, then write the output.
// Or read a directory, recursively creating directories, reading files,
// processing content, writing output.
file.copy = function copy(srcpath, destpath, options) {
if (file.isDir(srcpath)) {
// Copy a directory, recursively.
// Explicitly create new dest directory.
file.mkdir(destpath);
// Iterate over all sub-files/dirs, recursing.
fs.readdirSync(srcpath).forEach(function(filepath) {
copy(path.join(srcpath, filepath), path.join(destpath, filepath), options);
});
} else {
// Copy a single file.
file._copy(srcpath, destpath, options);
}
};
// Read a file, optionally processing its content, then write the output.
file._copy = function(srcpath, destpath, options) {
if (!options) { options = {}; }
// If a process function was specified, and noProcess isn't true or doesn't
// match the srcpath, process the file's source.
var process = options.process && options.noProcess !== true &&
!(options.noProcess && file.isMatch(options.noProcess, srcpath));
// If the file will be processed, use the encoding as-specified. Otherwise,
// use an encoding of null to force the file to be read/written as a Buffer.
var readWriteOptions = process ? options : {encoding: null};
// Actually read the file.
var contents = file.read(srcpath, readWriteOptions);
if (process) {
grunt.verbose.write('Processing source...');
try {
contents = options.process(contents, srcpath, destpath);
grunt.verbose.ok();
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Error while processing "' + srcpath + '" file.', e);
}
}
// Abort copy if the process function returns false.
if (contents === false) {
grunt.verbose.writeln('Write aborted.');
} else {
file.write(destpath, contents, readWriteOptions);
}
};
// Delete folders and files recursively
file.delete = function(filepath, options) {
filepath = String(filepath);
var nowrite = grunt.option('no-write');
if (!options) {
options = {force: grunt.option('force') || false};
}
grunt.verbose.write((nowrite ? 'Not actually deleting ' : 'Deleting ') + filepath + '...');
if (!file.exists(filepath)) {
grunt.verbose.error();
grunt.log.warn('Cannot delete nonexistent file.');
return false;
}
// Only delete cwd or outside cwd if --force enabled. Be careful, people!
if (!options.force) {
if (file.isPathCwd(filepath)) {
grunt.verbose.error();
grunt.fail.warn('Cannot delete the current working directory.');
return false;
} else if (!file.isPathInCwd(filepath)) {
grunt.verbose.error();
grunt.fail.warn('Cannot delete files outside the current working directory.');
return false;
}
}
try {
// Actually delete. Or not.
if (!nowrite) {
rimraf.sync(filepath);
}
grunt.verbose.ok();
return true;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to delete "' + filepath + '" file (' + e.message + ').', e);
}
};
// True if the file path exists.
file.exists = function() {
var filepath = path.join.apply(path, arguments);
return fs.existsSync(filepath);
};
// True if the file is a symbolic link.
file.isLink = function() {
var filepath = path.join.apply(path, arguments);
try {
return fs.lstatSync(filepath).isSymbolicLink();
} catch (e) {
if (e.code === 'ENOENT') {
// The file doesn't exist, so it's not a symbolic link.
return false;
}
throw grunt.util.error('Unable to read "' + filepath + '" file (Error code: ' + e.code + ').', e);
}
};
// True if the path is a directory.
file.isDir = function() {
var filepath = path.join.apply(path, arguments);
return file.exists(filepath) && fs.statSync(filepath).isDirectory();
};
// True if the path is a file.
file.isFile = function() {
var filepath = path.join.apply(path, arguments);
return file.exists(filepath) && fs.statSync(filepath).isFile();
};
// Is a given file path absolute?
file.isPathAbsolute = function() {
var filepath = path.join.apply(path, arguments);
return path.isAbsolute(filepath);
};
// Do all the specified paths refer to the same path?
file.arePathsEquivalent = function(first) {
first = path.resolve(first);
for (var i = 1; i < arguments.length; i++) {
if (first !== path.resolve(arguments[i])) { return false; }
}
return true;
};
// Are descendant path(s) contained within ancestor path? Note: does not test
// if paths actually exist.
file.doesPathContain = function(ancestor) {
ancestor = path.resolve(ancestor);
var relative;
for (var i = 1; i < arguments.length; i++) {
relative = path.relative(path.resolve(arguments[i]), ancestor);
if (relative === '' || /\w+/.test(relative)) { return false; }
}
return true;
};
// Test to see if a filepath is the CWD.
file.isPathCwd = function() {
var filepath = path.join.apply(path, arguments);
try {
return file.arePathsEquivalent(fs.realpathSync(process.cwd()), fs.realpathSync(filepath));
} catch (e) {
return false;
}
};
// Test to see if a filepath is contained within the CWD.
file.isPathInCwd = function() {
var filepath = path.join.apply(path, arguments);
try {
return file.doesPathContain(fs.realpathSync(process.cwd()), fs.realpathSync(filepath));
} catch (e) {
return false;
}
};
|
'use strict';
var grunt = require('../grunt');
// Nodejs libs.
var fs = require('fs');
var path = require('path');
// The module to be exported.
var file = module.exports = {};
// External libs.
file.glob = require('glob');
file.minimatch = require('minimatch');
file.findup = require('findup-sync');
var YAML = require('js-yaml');
var rimraf = require('rimraf');
var iconv = require('iconv-lite');
var mkdirp = require('mkdirp').sync;
// Windows?
var win32 = process.platform === 'win32';
// Normalize \\ paths to / paths.
var unixifyPath = function(filepath) {
if (win32) {
return filepath.replace(/\\/g, '/');
} else {
return filepath;
}
};
// Change the current base path (ie, CWD) to the specified path.
file.setBase = function() {
var dirpath = path.join.apply(path, arguments);
process.chdir(dirpath);
};
// Process specified wildcard glob patterns or filenames against a
// callback, excluding and uniquing files in the result set.
var processPatterns = function(patterns, fn) {
// Filepaths to return.
var result = [];
// Iterate over flattened patterns array.
grunt.util._.flattenDeep(patterns).forEach(function(pattern) {
// If the first character is ! it should be omitted
var exclusion = pattern.indexOf('!') === 0;
// If the pattern is an exclusion, remove the !
if (exclusion) { pattern = pattern.slice(1); }
// Find all matching files for this pattern.
var matches = fn(pattern);
if (exclusion) {
// If an exclusion, remove matching files.
result = grunt.util._.difference(result, matches);
} else {
// Otherwise add matching files.
result = grunt.util._.union(result, matches);
}
});
return result;
};
// Match a filepath or filepaths against one or more wildcard patterns. Returns
// all matching filepaths.
file.match = function(options, patterns, filepaths) {
if (grunt.util.kindOf(options) !== 'object') {
filepaths = patterns;
patterns = options;
options = {};
}
// Return empty set if either patterns or filepaths was omitted.
if (patterns == null || filepaths == null) { return []; }
// Normalize patterns and filepaths to arrays.
if (!Array.isArray(patterns)) { patterns = [patterns]; }
if (!Array.isArray(filepaths)) { filepaths = [filepaths]; }
// Return empty set if there are no patterns or filepaths.
if (patterns.length === 0 || filepaths.length === 0) { return []; }
// Return all matching filepaths.
return processPatterns(patterns, function(pattern) {
return file.minimatch.match(filepaths, pattern, options);
});
};
// Match a filepath or filepaths against one or more wildcard patterns. Returns
// true if any of the patterns match.
file.isMatch = function() {
return file.match.apply(file, arguments).length > 0;
};
// Return an array of all file paths that match the given wildcard patterns.
file.expand = function() {
var args = grunt.util.toArray(arguments);
// If the first argument is an options object, save those options to pass
// into the file.glob.sync method.
var options = grunt.util.kindOf(args[0]) === 'object' ? args.shift() : {};
// Use the first argument if it's an Array, otherwise convert the arguments
// object to an array and use that.
var patterns = Array.isArray(args[0]) ? args[0] : args;
// Return empty set if there are no patterns or filepaths.
if (patterns.length === 0) { return []; }
// Return all matching filepaths.
var matches = processPatterns(patterns, function(pattern) {
// Find all matching files for this pattern.
return file.glob.sync(pattern, options);
});
// Filter result set?
if (options.filter) {
matches = matches.filter(function(filepath) {
filepath = path.join(options.cwd || '', filepath);
try {
if (typeof options.filter === 'function') {
return options.filter(filepath);
} else {
// If the file is of the right type and exists, this should work.
return fs.statSync(filepath)[options.filter]();
}
} catch (e) {
// Otherwise, it's probably not the right type.
return false;
}
});
}
return matches;
};
var pathSeparatorRe = /[\/\\]/g;
// The "ext" option refers to either everything after the first dot (default)
// or everything after the last dot.
var extDotRe = {
first: /(\.[^\/]*)?$/,
last: /(\.[^\/\.]*)?$/,
};
// Build a multi task "files" object dynamically.
file.expandMapping = function(patterns, destBase, options) {
options = grunt.util._.defaults({}, options, {
extDot: 'first',
rename: function(destBase, destPath) {
return path.join(destBase || '', destPath);
}
});
var files = [];
var fileByDest = {};
// Find all files matching pattern, using passed-in options.
file.expand(options, patterns).forEach(function(src) {
var destPath = src;
// Flatten?
if (options.flatten) {
destPath = path.basename(destPath);
}
// Change the extension?
if ('ext' in options) {
destPath = destPath.replace(extDotRe[options.extDot], options.ext);
}
// Generate destination filename.
var dest = options.rename(destBase, destPath, options);
// Prepend cwd to src path if necessary.
if (options.cwd) { src = path.join(options.cwd, src); }
// Normalize filepaths to be unix-style.
dest = dest.replace(pathSeparatorRe, '/');
src = src.replace(pathSeparatorRe, '/');
// Map correct src path to dest path.
if (fileByDest[dest]) {
// If dest already exists, push this src onto that dest's src array.
fileByDest[dest].src.push(src);
} else {
// Otherwise create a new src-dest file mapping object.
files.push({
src: [src],
dest: dest,
});
// And store a reference for later use.
fileByDest[dest] = files[files.length - 1];
}
});
return files;
};
// Like mkdir -p. Create a directory and any intermediary directories.
file.mkdir = function(dirpath, mode) {
if (grunt.option('no-write')) { return; }
try {
mkdirp(dirpath, { mode: mode });
} catch (e) {
throw grunt.util.error('Unable to create directory "' + dirpath + '" (Error code: ' + e.code + ').', e);
}
};
// Recurse into a directory, executing callback for each file.
file.recurse = function recurse(rootdir, callback, subdir) {
var abspath = subdir ? path.join(rootdir, subdir) : rootdir;
fs.readdirSync(abspath).forEach(function(filename) {
var filepath = path.join(abspath, filename);
if (fs.statSync(filepath).isDirectory()) {
recurse(rootdir, callback, unixifyPath(path.join(subdir || '', filename || '')));
} else {
callback(unixifyPath(filepath), rootdir, subdir, filename);
}
});
};
// The default file encoding to use.
file.defaultEncoding = 'utf8';
// Whether to preserve the BOM on file.read rather than strip it.
file.preserveBOM = false;
// Read a file, return its contents.
file.read = function(filepath, options) {
if (!options) { options = {}; }
var contents;
grunt.verbose.write('Reading ' + filepath + '...');
try {
contents = fs.readFileSync(String(filepath));
// If encoding is not explicitly null, convert from encoded buffer to a
// string. If no encoding was specified, use the default.
if (options.encoding !== null) {
contents = iconv.decode(contents, options.encoding || file.defaultEncoding, {stripBOM: !file.preserveBOM});
}
grunt.verbose.ok();
return contents;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to read "' + filepath + '" file (Error code: ' + e.code + ').', e);
}
};
// Read a file, parse its contents, return an object.
file.readJSON = function(filepath, options) {
var src = file.read(filepath, options);
var result;
grunt.verbose.write('Parsing ' + filepath + '...');
try {
result = JSON.parse(src);
grunt.verbose.ok();
return result;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to parse "' + filepath + '" file (' + e.message + ').', e);
}
};
// Read a YAML file, parse its contents, return an object.
file.readYAML = function(filepath, options, yamlOptions) {
if (!options) { options = {}; }
if (!yamlOptions) { yamlOptions = {}; }
var src = file.read(filepath, options);
var result;
grunt.verbose.write('Parsing ' + filepath + '...');
try {
// use the recommended way of reading YAML files
// https://github.com/nodeca/js-yaml#safeload-string---options-
if (yamlOptions.unsafeLoad) {
result = YAML.load(src);
} else {
result = YAML.safeLoad(src);
}
grunt.verbose.ok();
return result;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to parse "' + filepath + '" file (' + e.message + ').', e);
}
};
// Write a file.
file.write = function(filepath, contents, options) {
if (!options) { options = {}; }
var nowrite = grunt.option('no-write');
grunt.verbose.write((nowrite ? 'Not actually writing ' : 'Writing ') + filepath + '...');
// Create path, if necessary.
file.mkdir(path.dirname(filepath));
try {
// If contents is already a Buffer, don't try to encode it. If no encoding
// was specified, use the default.
if (!Buffer.isBuffer(contents)) {
contents = iconv.encode(contents, options.encoding || file.defaultEncoding);
}
// Actually write file.
if (!nowrite) {
fs.writeFileSync(filepath, contents, 'mode' in options ? {mode: options.mode} : {});
}
grunt.verbose.ok();
return true;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to write "' + filepath + '" file (Error code: ' + e.code + ').', e);
}
};
// Read a file, optionally processing its content, then write the output.
// Or read a directory, recursively creating directories, reading files,
// processing content, writing output.
file.copy = function copy(srcpath, destpath, options) {
if (file.isDir(srcpath)) {
// Copy a directory, recursively.
// Explicitly create new dest directory.
file.mkdir(destpath);
// Iterate over all sub-files/dirs, recursing.
fs.readdirSync(srcpath).forEach(function(filepath) {
copy(path.join(srcpath, filepath), path.join(destpath, filepath), options);
});
} else {
// Copy a single file.
file._copy(srcpath, destpath, options);
}
};
// Read a file, optionally processing its content, then write the output.
file._copy = function(srcpath, destpath, options) {
if (!options) { options = {}; }
// If a process function was specified, and noProcess isn't true or doesn't
// match the srcpath, process the file's source.
var process = options.process && options.noProcess !== true &&
!(options.noProcess && file.isMatch(options.noProcess, srcpath));
// If the file will be processed, use the encoding as-specified. Otherwise,
// use an encoding of null to force the file to be read/written as a Buffer.
var readWriteOptions = process ? options : {encoding: null};
// Actually read the file.
var contents = file.read(srcpath, readWriteOptions);
if (process) {
grunt.verbose.write('Processing source...');
try {
contents = options.process(contents, srcpath, destpath);
grunt.verbose.ok();
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Error while processing "' + srcpath + '" file.', e);
}
}
// Abort copy if the process function returns false.
if (contents === false) {
grunt.verbose.writeln('Write aborted.');
} else {
file.write(destpath, contents, readWriteOptions);
}
};
// Delete folders and files recursively
file.delete = function(filepath, options) {
filepath = String(filepath);
var nowrite = grunt.option('no-write');
if (!options) {
options = {force: grunt.option('force') || false};
}
grunt.verbose.write((nowrite ? 'Not actually deleting ' : 'Deleting ') + filepath + '...');
if (!file.exists(filepath)) {
grunt.verbose.error();
grunt.log.warn('Cannot delete nonexistent file.');
return false;
}
// Only delete cwd or outside cwd if --force enabled. Be careful, people!
if (!options.force) {
if (file.isPathCwd(filepath)) {
grunt.verbose.error();
grunt.fail.warn('Cannot delete the current working directory.');
return false;
} else if (!file.isPathInCwd(filepath)) {
grunt.verbose.error();
grunt.fail.warn('Cannot delete files outside the current working directory.');
return false;
}
}
try {
// Actually delete. Or not.
if (!nowrite) {
rimraf.sync(filepath);
}
grunt.verbose.ok();
return true;
} catch (e) {
grunt.verbose.error();
throw grunt.util.error('Unable to delete "' + filepath + '" file (' + e.message + ').', e);
}
};
// True if the file path exists.
file.exists = function() {
var filepath = path.join.apply(path, arguments);
return fs.existsSync(filepath);
};
// True if the file is a symbolic link.
file.isLink = function() {
var filepath = path.join.apply(path, arguments);
try {
return fs.lstatSync(filepath).isSymbolicLink();
} catch (e) {
if (e.code === 'ENOENT') {
// The file doesn't exist, so it's not a symbolic link.
return false;
}
throw grunt.util.error('Unable to read "' + filepath + '" file (Error code: ' + e.code + ').', e);
}
};
// True if the path is a directory.
file.isDir = function() {
var filepath = path.join.apply(path, arguments);
return file.exists(filepath) && fs.statSync(filepath).isDirectory();
};
// True if the path is a file.
file.isFile = function() {
var filepath = path.join.apply(path, arguments);
return file.exists(filepath) && fs.statSync(filepath).isFile();
};
// Is a given file path absolute?
file.isPathAbsolute = function() {
var filepath = path.join.apply(path, arguments);
return path.isAbsolute(filepath);
};
// Do all the specified paths refer to the same path?
file.arePathsEquivalent = function(first) {
first = path.resolve(first);
for (var i = 1; i < arguments.length; i++) {
if (first !== path.resolve(arguments[i])) { return false; }
}
return true;
};
// Are descendant path(s) contained within ancestor path? Note: does not test
// if paths actually exist.
file.doesPathContain = function(ancestor) {
ancestor = path.resolve(ancestor);
var relative;
for (var i = 1; i < arguments.length; i++) {
relative = path.relative(path.resolve(arguments[i]), ancestor);
if (relative === '' || /\w+/.test(relative)) { return false; }
}
return true;
};
// Test to see if a filepath is the CWD.
file.isPathCwd = function() {
var filepath = path.join.apply(path, arguments);
try {
return file.arePathsEquivalent(fs.realpathSync(process.cwd()), fs.realpathSync(filepath));
} catch (e) {
return false;
}
};
// Test to see if a filepath is contained within the CWD.
file.isPathInCwd = function() {
var filepath = path.join.apply(path, arguments);
try {
return file.doesPathContain(fs.realpathSync(process.cwd()), fs.realpathSync(filepath));
} catch (e) {
return false;
}
};
|
4626_0
|
crossvul
|
js
|
CWE-1188
|
Initialization of a Resource with an Insecure Default - Developers often choose default values that leave the product as open and easy to use as possible out-of-the-box, under the assumption that the administrator can (or should) change the default value.
|
php
|
<?php
/**
* The WordPress version string
*
* @global string $wp_version
*/
$wp_version = '4.8-alpha-39760';
/**
* Holds the WordPress DB revision, increments when changes are made to the WordPress DB schema.
*
* @global int $wp_db_version
*/
$wp_db_version = 38590;
/**
* Holds the TinyMCE version
*
* @global string $tinymce_version
*/
$tinymce_version = '4403-20160901';
/**
* Holds the required PHP version
*
* @global string $required_php_version
*/
$required_php_version = '5.2.4';
/**
* Holds the required MySQL version
*
* @global string $required_mysql_version
*/
$required_mysql_version = '5.0';
|
<?php
/**
* The WordPress version string
*
* @global string $wp_version
*/
$wp_version = '4.8-alpha-39772';
/**
* Holds the WordPress DB revision, increments when changes are made to the WordPress DB schema.
*
* @global int $wp_db_version
*/
$wp_db_version = 38590;
/**
* Holds the TinyMCE version
*
* @global string $tinymce_version
*/
$tinymce_version = '4403-20160901';
/**
* Holds the required PHP version
*
* @global string $required_php_version
*/
$required_php_version = '5.2.4';
/**
* Holds the required MySQL version
*
* @global string $required_mysql_version
*/
$required_mysql_version = '5.0';
|
3098_0
|
crossvul
|
php
|
CWE-1188
|
Initialization of a Resource with an Insecure Default - Developers often choose default values that leave the product as open and easy to use as possible out-of-the-box, under the assumption that the administrator can (or should) change the default value.
|
php
|
<?php
/**
* Gets the email message from the user's mailbox to add as
* a WordPress post. Mailbox connection information must be
* configured under Settings > Writing
*
* @package WordPress
*/
/** Make sure that the WordPress bootstrap has run before continuing. */
require(dirname(__FILE__) . '/wp-load.php');
/** This filter is documented in wp-admin/options.php */
if ( ! apply_filters( 'enable_post_by_email_configuration', true ) )
wp_die( __( 'This action has been disabled by the administrator.' ), 403 );
/**
* Fires to allow a plugin to do a complete takeover of Post by Email.
*
* @since 2.9.0
*/
do_action( 'wp-mail.php' );
/** Get the POP3 class with which to access the mailbox. */
require_once( ABSPATH . WPINC . '/class-pop3.php' );
/** Only check at this interval for new messages. */
if ( !defined('WP_MAIL_INTERVAL') )
define('WP_MAIL_INTERVAL', 300); // 5 minutes
$last_checked = get_transient('mailserver_last_checked');
if ( $last_checked )
wp_die(__('Slow down cowboy, no need to check for new mails so often!'));
set_transient('mailserver_last_checked', true, WP_MAIL_INTERVAL);
$time_difference = get_option('gmt_offset') * HOUR_IN_SECONDS;
$phone_delim = '::';
$pop3 = new POP3();
if ( !$pop3->connect( get_option('mailserver_url'), get_option('mailserver_port') ) || !$pop3->user( get_option('mailserver_login') ) )
wp_die( esc_html( $pop3->ERROR ) );
$count = $pop3->pass( get_option('mailserver_pass') );
if( false === $count )
wp_die( esc_html( $pop3->ERROR ) );
if( 0 === $count ) {
$pop3->quit();
wp_die( __('There doesn’t seem to be any new mail.') );
}
for ( $i = 1; $i <= $count; $i++ ) {
$message = $pop3->get($i);
$bodysignal = false;
$boundary = '';
$charset = '';
$content = '';
$content_type = '';
$content_transfer_encoding = '';
$post_author = 1;
$author_found = false;
foreach ($message as $line) {
// Body signal.
if ( strlen($line) < 3 )
$bodysignal = true;
if ( $bodysignal ) {
$content .= $line;
} else {
if ( preg_match('/Content-Type: /i', $line) ) {
$content_type = trim($line);
$content_type = substr($content_type, 14, strlen($content_type) - 14);
$content_type = explode(';', $content_type);
if ( ! empty( $content_type[1] ) ) {
$charset = explode('=', $content_type[1]);
$charset = ( ! empty( $charset[1] ) ) ? trim($charset[1]) : '';
}
$content_type = $content_type[0];
}
if ( preg_match('/Content-Transfer-Encoding: /i', $line) ) {
$content_transfer_encoding = trim($line);
$content_transfer_encoding = substr($content_transfer_encoding, 27, strlen($content_transfer_encoding) - 27);
$content_transfer_encoding = explode(';', $content_transfer_encoding);
$content_transfer_encoding = $content_transfer_encoding[0];
}
if ( ( $content_type == 'multipart/alternative' ) && ( false !== strpos($line, 'boundary="') ) && ( '' == $boundary ) ) {
$boundary = trim($line);
$boundary = explode('"', $boundary);
$boundary = $boundary[1];
}
if (preg_match('/Subject: /i', $line)) {
$subject = trim($line);
$subject = substr($subject, 9, strlen($subject) - 9);
// Captures any text in the subject before $phone_delim as the subject
if ( function_exists('iconv_mime_decode') ) {
$subject = iconv_mime_decode($subject, 2, get_option('blog_charset'));
} else {
$subject = wp_iso_descrambler($subject);
}
$subject = explode($phone_delim, $subject);
$subject = $subject[0];
}
/*
* Set the author using the email address (From or Reply-To, the last used)
* otherwise use the site admin.
*/
if ( ! $author_found && preg_match( '/^(From|Reply-To): /', $line ) ) {
if ( preg_match('|[a-z0-9_.-]+@[a-z0-9_.-]+(?!.*<)|i', $line, $matches) )
$author = $matches[0];
else
$author = trim($line);
$author = sanitize_email($author);
if ( is_email($author) ) {
/* translators: Post author email address */
echo '<p>' . sprintf(__('Author is %s'), $author) . '</p>';
$userdata = get_user_by('email', $author);
if ( ! empty( $userdata ) ) {
$post_author = $userdata->ID;
$author_found = true;
}
}
}
if ( preg_match( '/Date: /i', $line ) ) { // of the form '20 Mar 2002 20:32:37 +0100'
$ddate = str_replace( 'Date: ', '', trim( $line ) );
$ddate = preg_replace( '!\s*\(.+\)\s*$!', '', $ddate ); // remove parenthesised timezone string if it exists, as this confuses strtotime
$ddate_U = strtotime( $ddate );
$post_date = gmdate( 'Y-m-d H:i:s', $ddate_U + $time_difference );
$post_date_gmt = gmdate( 'Y-m-d H:i:s', $ddate_U );
}
}
}
// Set $post_status based on $author_found and on author's publish_posts capability
if ( $author_found ) {
$user = new WP_User($post_author);
$post_status = ( $user->has_cap('publish_posts') ) ? 'publish' : 'pending';
} else {
// Author not found in DB, set status to pending. Author already set to admin.
$post_status = 'pending';
}
$subject = trim($subject);
if ( $content_type == 'multipart/alternative' ) {
$content = explode('--'.$boundary, $content);
$content = $content[2];
// Match case-insensitive content-transfer-encoding.
if ( preg_match( '/Content-Transfer-Encoding: quoted-printable/i', $content, $delim) ) {
$content = explode($delim[0], $content);
$content = $content[1];
}
$content = strip_tags($content, '<img><p><br><i><b><u><em><strong><strike><font><span><div>');
}
$content = trim($content);
/**
* Filters the original content of the email.
*
* Give Post-By-Email extending plugins full access to the content, either
* the raw content, or the content of the last quoted-printable section.
*
* @since 2.8.0
*
* @param string $content The original email content.
*/
$content = apply_filters( 'wp_mail_original_content', $content );
if ( false !== stripos($content_transfer_encoding, "quoted-printable") ) {
$content = quoted_printable_decode($content);
}
if ( function_exists('iconv') && ! empty( $charset ) ) {
$content = iconv($charset, get_option('blog_charset'), $content);
}
// Captures any text in the body after $phone_delim as the body
$content = explode($phone_delim, $content);
$content = empty( $content[1] ) ? $content[0] : $content[1];
$content = trim($content);
/**
* Filters the content of the post submitted by email before saving.
*
* @since 1.2.0
*
* @param string $content The email content.
*/
$post_content = apply_filters( 'phone_content', $content );
$post_title = xmlrpc_getposttitle($content);
if ($post_title == '') $post_title = $subject;
$post_category = array(get_option('default_email_category'));
$post_data = compact('post_content','post_title','post_date','post_date_gmt','post_author','post_category', 'post_status');
$post_data = wp_slash($post_data);
$post_ID = wp_insert_post($post_data);
if ( is_wp_error( $post_ID ) )
echo "\n" . $post_ID->get_error_message();
// We couldn't post, for whatever reason. Better move forward to the next email.
if ( empty( $post_ID ) )
continue;
/**
* Fires after a post submitted by email is published.
*
* @since 1.2.0
*
* @param int $post_ID The post ID.
*/
do_action( 'publish_phone', $post_ID );
echo "\n<p><strong>" . __( 'Author:' ) . '</strong> ' . esc_html( $post_author ) . '</p>';
echo "\n<p><strong>" . __( 'Posted title:' ) . '</strong> ' . esc_html( $post_title ) . '</p>';
if(!$pop3->delete($i)) {
echo '<p>' . sprintf(
/* translators: %s: POP3 error */
__( 'Oops: %s' ),
esc_html( $pop3->ERROR )
) . '</p>';
$pop3->reset();
exit;
} else {
echo '<p>' . sprintf(
/* translators: %s: the message ID */
__( 'Mission complete. Message %s deleted.' ),
'<strong>' . $i . '</strong>'
) . '</p>';
}
}
$pop3->quit();
|
<?php
/**
* Gets the email message from the user's mailbox to add as
* a WordPress post. Mailbox connection information must be
* configured under Settings > Writing
*
* @package WordPress
*/
/** Make sure that the WordPress bootstrap has run before continuing. */
require(dirname(__FILE__) . '/wp-load.php');
/** This filter is documented in wp-admin/options.php */
if ( ! apply_filters( 'enable_post_by_email_configuration', true ) )
wp_die( __( 'This action has been disabled by the administrator.' ), 403 );
$mailserver_url = get_option( 'mailserver_url' );
if ( 'mail.example.com' === $mailserver_url || empty( $mailserver_url ) ) {
wp_die( __( 'This action has been disabled by the administrator.' ), 403 );
}
/**
* Fires to allow a plugin to do a complete takeover of Post by Email.
*
* @since 2.9.0
*/
do_action( 'wp-mail.php' );
/** Get the POP3 class with which to access the mailbox. */
require_once( ABSPATH . WPINC . '/class-pop3.php' );
/** Only check at this interval for new messages. */
if ( !defined('WP_MAIL_INTERVAL') )
define('WP_MAIL_INTERVAL', 300); // 5 minutes
$last_checked = get_transient('mailserver_last_checked');
if ( $last_checked )
wp_die(__('Slow down cowboy, no need to check for new mails so often!'));
set_transient('mailserver_last_checked', true, WP_MAIL_INTERVAL);
$time_difference = get_option('gmt_offset') * HOUR_IN_SECONDS;
$phone_delim = '::';
$pop3 = new POP3();
if ( !$pop3->connect( get_option('mailserver_url'), get_option('mailserver_port') ) || !$pop3->user( get_option('mailserver_login') ) )
wp_die( esc_html( $pop3->ERROR ) );
$count = $pop3->pass( get_option('mailserver_pass') );
if( false === $count )
wp_die( esc_html( $pop3->ERROR ) );
if( 0 === $count ) {
$pop3->quit();
wp_die( __('There doesn’t seem to be any new mail.') );
}
for ( $i = 1; $i <= $count; $i++ ) {
$message = $pop3->get($i);
$bodysignal = false;
$boundary = '';
$charset = '';
$content = '';
$content_type = '';
$content_transfer_encoding = '';
$post_author = 1;
$author_found = false;
foreach ($message as $line) {
// Body signal.
if ( strlen($line) < 3 )
$bodysignal = true;
if ( $bodysignal ) {
$content .= $line;
} else {
if ( preg_match('/Content-Type: /i', $line) ) {
$content_type = trim($line);
$content_type = substr($content_type, 14, strlen($content_type) - 14);
$content_type = explode(';', $content_type);
if ( ! empty( $content_type[1] ) ) {
$charset = explode('=', $content_type[1]);
$charset = ( ! empty( $charset[1] ) ) ? trim($charset[1]) : '';
}
$content_type = $content_type[0];
}
if ( preg_match('/Content-Transfer-Encoding: /i', $line) ) {
$content_transfer_encoding = trim($line);
$content_transfer_encoding = substr($content_transfer_encoding, 27, strlen($content_transfer_encoding) - 27);
$content_transfer_encoding = explode(';', $content_transfer_encoding);
$content_transfer_encoding = $content_transfer_encoding[0];
}
if ( ( $content_type == 'multipart/alternative' ) && ( false !== strpos($line, 'boundary="') ) && ( '' == $boundary ) ) {
$boundary = trim($line);
$boundary = explode('"', $boundary);
$boundary = $boundary[1];
}
if (preg_match('/Subject: /i', $line)) {
$subject = trim($line);
$subject = substr($subject, 9, strlen($subject) - 9);
// Captures any text in the subject before $phone_delim as the subject
if ( function_exists('iconv_mime_decode') ) {
$subject = iconv_mime_decode($subject, 2, get_option('blog_charset'));
} else {
$subject = wp_iso_descrambler($subject);
}
$subject = explode($phone_delim, $subject);
$subject = $subject[0];
}
/*
* Set the author using the email address (From or Reply-To, the last used)
* otherwise use the site admin.
*/
if ( ! $author_found && preg_match( '/^(From|Reply-To): /', $line ) ) {
if ( preg_match('|[a-z0-9_.-]+@[a-z0-9_.-]+(?!.*<)|i', $line, $matches) )
$author = $matches[0];
else
$author = trim($line);
$author = sanitize_email($author);
if ( is_email($author) ) {
/* translators: Post author email address */
echo '<p>' . sprintf(__('Author is %s'), $author) . '</p>';
$userdata = get_user_by('email', $author);
if ( ! empty( $userdata ) ) {
$post_author = $userdata->ID;
$author_found = true;
}
}
}
if ( preg_match( '/Date: /i', $line ) ) { // of the form '20 Mar 2002 20:32:37 +0100'
$ddate = str_replace( 'Date: ', '', trim( $line ) );
$ddate = preg_replace( '!\s*\(.+\)\s*$!', '', $ddate ); // remove parenthesised timezone string if it exists, as this confuses strtotime
$ddate_U = strtotime( $ddate );
$post_date = gmdate( 'Y-m-d H:i:s', $ddate_U + $time_difference );
$post_date_gmt = gmdate( 'Y-m-d H:i:s', $ddate_U );
}
}
}
// Set $post_status based on $author_found and on author's publish_posts capability
if ( $author_found ) {
$user = new WP_User($post_author);
$post_status = ( $user->has_cap('publish_posts') ) ? 'publish' : 'pending';
} else {
// Author not found in DB, set status to pending. Author already set to admin.
$post_status = 'pending';
}
$subject = trim($subject);
if ( $content_type == 'multipart/alternative' ) {
$content = explode('--'.$boundary, $content);
$content = $content[2];
// Match case-insensitive content-transfer-encoding.
if ( preg_match( '/Content-Transfer-Encoding: quoted-printable/i', $content, $delim) ) {
$content = explode($delim[0], $content);
$content = $content[1];
}
$content = strip_tags($content, '<img><p><br><i><b><u><em><strong><strike><font><span><div>');
}
$content = trim($content);
/**
* Filters the original content of the email.
*
* Give Post-By-Email extending plugins full access to the content, either
* the raw content, or the content of the last quoted-printable section.
*
* @since 2.8.0
*
* @param string $content The original email content.
*/
$content = apply_filters( 'wp_mail_original_content', $content );
if ( false !== stripos($content_transfer_encoding, "quoted-printable") ) {
$content = quoted_printable_decode($content);
}
if ( function_exists('iconv') && ! empty( $charset ) ) {
$content = iconv($charset, get_option('blog_charset'), $content);
}
// Captures any text in the body after $phone_delim as the body
$content = explode($phone_delim, $content);
$content = empty( $content[1] ) ? $content[0] : $content[1];
$content = trim($content);
/**
* Filters the content of the post submitted by email before saving.
*
* @since 1.2.0
*
* @param string $content The email content.
*/
$post_content = apply_filters( 'phone_content', $content );
$post_title = xmlrpc_getposttitle($content);
if ($post_title == '') $post_title = $subject;
$post_category = array(get_option('default_email_category'));
$post_data = compact('post_content','post_title','post_date','post_date_gmt','post_author','post_category', 'post_status');
$post_data = wp_slash($post_data);
$post_ID = wp_insert_post($post_data);
if ( is_wp_error( $post_ID ) )
echo "\n" . $post_ID->get_error_message();
// We couldn't post, for whatever reason. Better move forward to the next email.
if ( empty( $post_ID ) )
continue;
/**
* Fires after a post submitted by email is published.
*
* @since 1.2.0
*
* @param int $post_ID The post ID.
*/
do_action( 'publish_phone', $post_ID );
echo "\n<p><strong>" . __( 'Author:' ) . '</strong> ' . esc_html( $post_author ) . '</p>';
echo "\n<p><strong>" . __( 'Posted title:' ) . '</strong> ' . esc_html( $post_title ) . '</p>';
if(!$pop3->delete($i)) {
echo '<p>' . sprintf(
/* translators: %s: POP3 error */
__( 'Oops: %s' ),
esc_html( $pop3->ERROR )
) . '</p>';
$pop3->reset();
exit;
} else {
echo '<p>' . sprintf(
/* translators: %s: the message ID */
__( 'Mission complete. Message %s deleted.' ),
'<strong>' . $i . '</strong>'
) . '</p>';
}
}
$pop3->quit();
|
3098_1
|
crossvul
|
php
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
php
|
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
# this list is in numerical order
TESTCASES = test1 test2 test3 test4 test5 test6 test7 test8 test9 \
test10 test11 test12 test13 test14 test15 test16 test17 test18 test19 \
test20 test21 test22 test23 test24 test25 test26 test27 test28 test29 \
test30 test31 test32 test33 test34 test35 test36 test37 test38 test39 \
test40 test41 test42 test43 test44 test45 test46 test47 test48 test49 \
test50 test51 test52 test53 test54 test55 test56 test57 test58 test59 \
test60 test61 test62 test63 test64 test65 test66 test67 test68 test69 \
test70 test71 test72 test73 test74 test75 test76 test77 test78 test79 \
test80 test81 test82 test83 test84 test85 test86 test87 test88 test89 \
test90 test91 test92 test93 test94 test95 test96 test97 test98 test99 \
test100 test101 test102 test103 test104 test105 test106 test107 test108 \
test109 test110 test111 test112 test113 test114 test115 test116 test117 \
test118 test119 test120 test121 test122 test123 test124 test125 test126 \
test127 test128 test129 test130 test131 test132 test133 test134 test135 \
test136 test137 test138 test139 test140 test141 test142 test143 test144 \
test145 test146 test147 test148 test149 test150 test151 test152 test153 \
test154 test155 test156 test157 test158 test159 test160 test161 test162 \
test163 test164 test165 test166 test167 test168 test169 test170 test171 \
test172 test173 test174 test175 test176 test177 test178 test179 test180 \
test181 test182 test183 test184 test185 test186 test187 test188 test189 \
test190 test191 test192 test193 test194 test195 test196 test197 test198 \
test199 test200 test201 test202 test203 test204 test205 test206 test207 \
test208 test209 test210 test211 test212 test213 test214 test215 test216 \
test217 test218 test219 test220 test221 test222 test223 test224 test225 \
test226 test227 test228 test229 test231 test233 test234 \
test235 test236 test237 test238 test239 test240 test241 test242 test243 \
test245 test246 test247 test248 test249 test250 test251 test252 \
test253 test254 test255 test256 test257 test258 test259 test260 test261 \
test262 test263 test264 test265 test266 test267 test268 test269 test270 \
test271 test272 test273 test274 test275 test276 test277 test278 test279 \
test280 test281 test282 test283 test284 test285 test286 test287 test288 \
test289 test290 test291 test292 test293 test294 test295 test296 test297 \
test298 test299 test300 test301 test302 test303 test304 test305 test306 \
test307 test308 test309 test310 test311 test312 test313 \
test320 test321 test322 test323 test324 \
test325 \
test350 test351 test352 test353 test354 \
\
test400 test401 test402 test403 test404 test405 test406 test407 test408 \
test409 \
\
test500 test501 test502 test503 test504 test505 test506 test507 test508 \
test509 test510 test511 test512 test513 test514 test515 test516 test517 \
test518 test519 test520 test521 test522 test523 test524 test525 test526 \
test527 test528 test529 test530 test531 test532 test533 test534 test535 \
test536 test537 test538 test539 test540 test541 test542 test543 test544 \
test545 test546 test547 test548 test549 test550 test551 test552 test553 \
test554 test555 test556 test557 test558 test560 test561 test562 \
test563 test564 test565 test566 test567 test568 test569 test570 test571 \
test572 test573 test574 test575 test576 test578 test579 test580 \
test581 test582 test583 test584 test585 test586 test587 test588 \
test590 test591 test592 test593 test594 test595 test596 test597 test598 \
test599 test600 test601 test602 test603 test604 test605 test606 test607 \
test608 test609 test610 test611 test612 test613 test614 test615 test616 \
test617 test618 test619 test620 test621 test622 test623 test624 test625 \
test626 test627 test628 test629 test630 test631 test632 test633 test634 \
test635 test636 test637 test638 test639 test640 test641 \
\
test700 test701 test702 test703 test704 test705 test706 test707 test708 \
test709 test710 test711 test712 test713 test714 test715 \
\
test800 test801 test802 test803 test804 test805 test806 test807 test808 \
test809 test810 test811 test812 test813 test814 test815 test816 test817 \
test818 test819 test820 test821 test822 test823 test824 test825 test826 \
test827 test828 test829 test830 test831 test832 test833 test834 test835 \
test836 test837 test838 test839 test840 test841 test842 test843 test844 \
test845 \
\
test850 test851 test852 test853 test854 test855 test856 test857 test858 \
test859 test860 test861 test862 test863 test864 test865 test866 test867 \
test868 test869 test870 test871 test872 test873 test874 test875 test876 \
test877 test878 test879 test880 test881 test882 test883 test884 test885 \
test886 test887 test888 test889 test890 \
\
test900 test901 test902 test903 test904 test905 test906 test907 test908 \
test909 test910 test911 test912 test913 test914 test915 test916 test917 \
test918 test919 test920 test921 test922 test923 test924 test925 test926 \
test927 test928 test929 test930 test931 test932 test933 test934 test935 \
test936 test937 test938 test939 test940 test941 test942 test943 test944 \
test945 test946 test947 test948 test949 \
\
test1000 test1001 test1002 test1003 test1004 test1005 test1006 test1007 \
test1008 test1009 test1010 test1011 test1012 test1013 test1014 test1015 \
test1016 test1017 test1018 test1019 test1020 test1021 test1022 test1023 \
test1024 test1025 test1026 test1027 test1028 test1029 test1030 test1031 \
test1032 test1033 test1034 test1035 test1036 test1037 test1038 test1039 \
test1040 test1041 test1042 test1043 test1044 test1045 test1046 test1047 \
test1048 test1049 test1050 test1051 test1052 test1053 test1054 test1055 \
test1056 test1057 test1058 test1059 test1060 test1061 test1062 test1063 \
test1064 test1065 test1066 test1067 test1068 test1069 test1070 test1071 \
test1072 test1073 test1074 test1075 test1076 test1077 test1078 test1079 \
test1080 test1081 test1082 test1083 test1084 test1085 test1086 test1087 \
test1088 test1089 test1090 test1091 test1092 test1093 test1094 test1095 \
test1096 test1097 test1098 test1099 test1100 test1101 test1102 test1103 \
test1104 test1105 test1106 test1107 test1108 test1109 test1110 test1111 \
test1112 test1113 test1114 test1115 test1116 test1117 test1118 test1119 \
test1120 test1121 test1122 test1123 test1124 test1125 test1126 test1127 \
test1128 test1129 test1130 test1131 test1132 test1133 test1134 test1135 \
test1136 test1137 test1138 test1139 test1140 test1141 test1142 test1143 \
test1144 test1145 test1146 \
test1200 test1201 test1202 test1203 test1204 test1205 test1206 test1207 \
test1208 test1209 test1210 test1211 test1212 test1213 test1214 test1215 \
test1216 test1217 test1218 test1219 \
test1220 test1221 test1222 test1223 test1224 test1225 test1226 test1227 \
test1228 test1229 test1230 test1231 test1232 test1233 test1234 test1235 \
test1236 test1237 test1238 test1239 test1240 test1241 test1242 test1243 \
test1244 test1245 test1246 test1247 test1248 test1249 test1250 test1251 \
test1252 test1253 test1254 test1255 test1256 test1257 test1258 test1259 \
test1260 \
\
test1280 test1281 test1282 test1283 test1284 test1285 test1286 test1287 \
test1288 \
\
test1300 test1301 test1302 test1303 test1304 test1305 test1306 test1307 \
test1308 test1309 test1310 test1311 test1312 test1313 test1314 test1315 \
test1316 test1317 test1318 test1319 test1320 test1321 test1322 \
test1325 test1326 test1327 test1328 test1329 test1330 test1331 \
test1332 test1333 test1334 test1335 test1336 test1337 test1338 test1339 \
test1340 test1341 test1342 test1343 test1344 test1345 test1346 test1347 \
test1348 test1349 test1350 test1351 test1352 test1353 test1354 test1355 \
test1356 test1357 test1358 test1359 test1360 test1361 test1362 test1363 \
test1364 test1365 test1366 test1367 test1368 test1369 test1370 test1371 \
test1372 test1373 test1374 test1375 test1376 test1377 test1378 test1379 \
test1380 test1381 test1382 test1383 test1384 test1385 test1386 test1387 \
test1388 test1389 test1390 test1391 test1392 test1393 test1394 test1395 \
test1396 test1397 test1398 \
\
test1400 test1401 test1402 test1403 test1404 test1405 test1406 test1407 \
test1408 test1409 test1410 test1411 test1412 test1413 test1414 test1415 \
test1416 test1417 test1418 test1419 test1420 test1421 test1422 test1423 \
test1424 \
test1428 test1429 test1430 test1431 test1432 test1433 test1434 test1435 \
test1436 test1437 test1438 test1439 \
\
test1500 test1501 test1502 test1503 test1504 test1505 test1506 test1507 \
test1508 test1509 test1510 test1511 test1512 test1513 test1514 test1515 \
test1516 test1517 \
\
test1520 \
\
test1525 test1526 test1527 test1528 test1529 test1530 test1531 test1532 \
test1533 test1534 test1535 test1536 \
\
test1600 test1601 test1602 test1603 test1604 test1605 \
\
test1700 test1701 test1702 \
\
test1800 test1801 \
\
test1900 test1901 test1902 test1903 \
\
test2000 test2001 test2002 test2003 test2004 test2005 test2006 test2007 \
test2008 test2009 test2010 test2011 test2012 test2013 test2014 test2015 \
test2016 test2017 test2018 test2019 test2020 test2021 test2022 test2023 \
test2024 test2025 test2026 test2027 test2028 test2029 test2030 test2031 \
test2032 test2033 test2034 test2035 test2036 test2037 test2038 test2039 \
test2040 test2041 test2042 test2043 test2044 test2045 test2046 test2047 \
test2048 test2049 test2050 test2051 test2052 test2053 test2054 test2055
|
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 1998 - 2017, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.haxx.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
###########################################################################
# this list is in numerical order
TESTCASES = test1 test2 test3 test4 test5 test6 test7 test8 test9 \
test10 test11 test12 test13 test14 test15 test16 test17 test18 test19 \
test20 test21 test22 test23 test24 test25 test26 test27 test28 test29 \
test30 test31 test32 test33 test34 test35 test36 test37 test38 test39 \
test40 test41 test42 test43 test44 test45 test46 test47 test48 test49 \
test50 test51 test52 test53 test54 test55 test56 test57 test58 test59 \
test60 test61 test62 test63 test64 test65 test66 test67 test68 test69 \
test70 test71 test72 test73 test74 test75 test76 test77 test78 test79 \
test80 test81 test82 test83 test84 test85 test86 test87 test88 test89 \
test90 test91 test92 test93 test94 test95 test96 test97 test98 test99 \
test100 test101 test102 test103 test104 test105 test106 test107 test108 \
test109 test110 test111 test112 test113 test114 test115 test116 test117 \
test118 test119 test120 test121 test122 test123 test124 test125 test126 \
test127 test128 test129 test130 test131 test132 test133 test134 test135 \
test136 test137 test138 test139 test140 test141 test142 test143 test144 \
test145 test146 test147 test148 test149 test150 test151 test152 test153 \
test154 test155 test156 test157 test158 test159 test160 test161 test162 \
test163 test164 test165 test166 test167 test168 test169 test170 test171 \
test172 test173 test174 test175 test176 test177 test178 test179 test180 \
test181 test182 test183 test184 test185 test186 test187 test188 test189 \
test190 test191 test192 test193 test194 test195 test196 test197 test198 \
test199 test200 test201 test202 test203 test204 test205 test206 test207 \
test208 test209 test210 test211 test212 test213 test214 test215 test216 \
test217 test218 test219 test220 test221 test222 test223 test224 test225 \
test226 test227 test228 test229 test231 test233 test234 \
test235 test236 test237 test238 test239 test240 test241 test242 test243 \
test245 test246 test247 test248 test249 test250 test251 test252 \
test253 test254 test255 test256 test257 test258 test259 test260 test261 \
test262 test263 test264 test265 test266 test267 test268 test269 test270 \
test271 test272 test273 test274 test275 test276 test277 test278 test279 \
test280 test281 test282 test283 test284 test285 test286 test287 test288 \
test289 test290 test291 test292 test293 test294 test295 test296 test297 \
test298 test299 test300 test301 test302 test303 test304 test305 test306 \
test307 test308 test309 test310 test311 test312 test313 \
test320 test321 test322 test323 test324 \
test325 \
test350 test351 test352 test353 test354 \
\
test400 test401 test402 test403 test404 test405 test406 test407 test408 \
test409 \
\
test500 test501 test502 test503 test504 test505 test506 test507 test508 \
test509 test510 test511 test512 test513 test514 test515 test516 test517 \
test518 test519 test520 test521 test522 test523 test524 test525 test526 \
test527 test528 test529 test530 test531 test532 test533 test534 test535 \
test536 test537 test538 test539 test540 test541 test542 test543 test544 \
test545 test546 test547 test548 test549 test550 test551 test552 test553 \
test554 test555 test556 test557 test558 test560 test561 test562 \
test563 test564 test565 test566 test567 test568 test569 test570 test571 \
test572 test573 test574 test575 test576 test578 test579 test580 \
test581 test582 test583 test584 test585 test586 test587 test588 \
test590 test591 test592 test593 test594 test595 test596 test597 test598 \
test599 test600 test601 test602 test603 test604 test605 test606 test607 \
test608 test609 test610 test611 test612 test613 test614 test615 test616 \
test617 test618 test619 test620 test621 test622 test623 test624 test625 \
test626 test627 test628 test629 test630 test631 test632 test633 test634 \
test635 test636 test637 test638 test639 test640 test641 \
\
test700 test701 test702 test703 test704 test705 test706 test707 test708 \
test709 test710 test711 test712 test713 test714 test715 \
\
test800 test801 test802 test803 test804 test805 test806 test807 test808 \
test809 test810 test811 test812 test813 test814 test815 test816 test817 \
test818 test819 test820 test821 test822 test823 test824 test825 test826 \
test827 test828 test829 test830 test831 test832 test833 test834 test835 \
test836 test837 test838 test839 test840 test841 test842 test843 test844 \
test845 \
\
test850 test851 test852 test853 test854 test855 test856 test857 test858 \
test859 test860 test861 test862 test863 test864 test865 test866 test867 \
test868 test869 test870 test871 test872 test873 test874 test875 test876 \
test877 test878 test879 test880 test881 test882 test883 test884 test885 \
test886 test887 test888 test889 test890 \
\
test900 test901 test902 test903 test904 test905 test906 test907 test908 \
test909 test910 test911 test912 test913 test914 test915 test916 test917 \
test918 test919 test920 test921 test922 test923 test924 test925 test926 \
test927 test928 test929 test930 test931 test932 test933 test934 test935 \
test936 test937 test938 test939 test940 test941 test942 test943 test944 \
test945 test946 test947 test948 test949 \
\
test1000 test1001 test1002 test1003 test1004 test1005 test1006 test1007 \
test1008 test1009 test1010 test1011 test1012 test1013 test1014 test1015 \
test1016 test1017 test1018 test1019 test1020 test1021 test1022 test1023 \
test1024 test1025 test1026 test1027 test1028 test1029 test1030 test1031 \
test1032 test1033 test1034 test1035 test1036 test1037 test1038 test1039 \
test1040 test1041 test1042 test1043 test1044 test1045 test1046 test1047 \
test1048 test1049 test1050 test1051 test1052 test1053 test1054 test1055 \
test1056 test1057 test1058 test1059 test1060 test1061 test1062 test1063 \
test1064 test1065 test1066 test1067 test1068 test1069 test1070 test1071 \
test1072 test1073 test1074 test1075 test1076 test1077 test1078 test1079 \
test1080 test1081 test1082 test1083 test1084 test1085 test1086 test1087 \
test1088 test1089 test1090 test1091 test1092 test1093 test1094 test1095 \
test1096 test1097 test1098 test1099 test1100 test1101 test1102 test1103 \
test1104 test1105 test1106 test1107 test1108 test1109 test1110 test1111 \
test1112 test1113 test1114 test1115 test1116 test1117 test1118 test1119 \
test1120 test1121 test1122 test1123 test1124 test1125 test1126 test1127 \
test1128 test1129 test1130 test1131 test1132 test1133 test1134 test1135 \
test1136 test1137 test1138 test1139 test1140 test1141 test1142 test1143 \
test1144 test1145 test1146 \
test1200 test1201 test1202 test1203 test1204 test1205 test1206 test1207 \
test1208 test1209 test1210 test1211 test1212 test1213 test1214 test1215 \
test1216 test1217 test1218 test1219 \
test1220 test1221 test1222 test1223 test1224 test1225 test1226 test1227 \
test1228 test1229 test1230 test1231 test1232 test1233 test1234 test1235 \
test1236 test1237 test1238 test1239 test1240 test1241 test1242 test1243 \
test1244 test1245 test1246 test1247 test1248 test1249 test1250 test1251 \
test1252 test1253 test1254 test1255 test1256 test1257 test1258 test1259 \
test1260 \
\
test1280 test1281 test1282 test1283 test1284 test1285 test1286 test1287 \
test1288 \
\
test1300 test1301 test1302 test1303 test1304 test1305 test1306 test1307 \
test1308 test1309 test1310 test1311 test1312 test1313 test1314 test1315 \
test1316 test1317 test1318 test1319 test1320 test1321 test1322 \
test1325 test1326 test1327 test1328 test1329 test1330 test1331 \
test1332 test1333 test1334 test1335 test1336 test1337 test1338 test1339 \
test1340 test1341 test1342 test1343 test1344 test1345 test1346 test1347 \
test1348 test1349 test1350 test1351 test1352 test1353 test1354 test1355 \
test1356 test1357 test1358 test1359 test1360 test1361 test1362 test1363 \
test1364 test1365 test1366 test1367 test1368 test1369 test1370 test1371 \
test1372 test1373 test1374 test1375 test1376 test1377 test1378 test1379 \
test1380 test1381 test1382 test1383 test1384 test1385 test1386 test1387 \
test1388 test1389 test1390 test1391 test1392 test1393 test1394 test1395 \
test1396 test1397 test1398 \
\
test1400 test1401 test1402 test1403 test1404 test1405 test1406 test1407 \
test1408 test1409 test1410 test1411 test1412 test1413 test1414 test1415 \
test1416 test1417 test1418 test1419 test1420 test1421 test1422 test1423 \
test1424 \
test1428 test1429 test1430 test1431 test1432 test1433 test1434 test1435 \
test1436 test1437 test1438 test1439 test1440 test1441 \
\
test1500 test1501 test1502 test1503 test1504 test1505 test1506 test1507 \
test1508 test1509 test1510 test1511 test1512 test1513 test1514 test1515 \
test1516 test1517 \
\
test1520 \
\
test1525 test1526 test1527 test1528 test1529 test1530 test1531 test1532 \
test1533 test1534 test1535 test1536 \
\
test1600 test1601 test1602 test1603 test1604 test1605 \
\
test1700 test1701 test1702 \
\
test1800 test1801 \
\
test1900 test1901 test1902 test1903 \
\
test2000 test2001 test2002 test2003 test2004 test2005 test2006 test2007 \
test2008 test2009 test2010 test2011 test2012 test2013 test2014 test2015 \
test2016 test2017 test2018 test2019 test2020 test2021 test2022 test2023 \
test2024 test2025 test2026 test2027 test2028 test2029 test2030 test2031 \
test2032 test2033 test2034 test2035 test2036 test2037 test2038 test2039 \
test2040 test2041 test2042 test2043 test2044 test2045 test2046 test2047 \
test2048 test2049 test2050 test2051 test2052 test2053 test2054 test2055
|
3259_1
|
crossvul
|
inc
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# Authors:
# Trevor Perrin
# Google (adapted by Sam Rushing) - NPN support
# Google - minimal padding
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Helper class for TLSConnection."""
from __future__ import generators
from .utils.compat import *
from .utils.cryptomath import *
from .utils.cipherfactory import createAES, createRC4, createTripleDES
from .utils.codec import *
from .errors import *
from .messages import *
from .mathtls import *
from .constants import *
from .utils.cryptomath import getRandomBytes
import socket
import errno
import traceback
class _ConnectionState(object):
def __init__(self):
self.macContext = None
self.encContext = None
self.seqnum = 0
def getSeqNumBytes(self):
w = Writer()
w.add(self.seqnum, 8)
self.seqnum += 1
return w.bytes
class TLSRecordLayer(object):
"""
This class handles data transmission for a TLS connection.
Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've
separated the code in this class from TLSConnection to make things
more readable.
@type sock: socket.socket
@ivar sock: The underlying socket object.
@type session: L{tlslite.Session.Session}
@ivar session: The session corresponding to this connection.
Due to TLS session resumption, multiple connections can correspond
to the same underlying session.
@type version: tuple
@ivar version: The TLS version being used for this connection.
(3,0) means SSL 3.0, and (3,1) means TLS 1.0.
@type closed: bool
@ivar closed: If this connection is closed.
@type resumed: bool
@ivar resumed: If this connection is based on a resumed session.
@type allegedSrpUsername: str or None
@ivar allegedSrpUsername: This is set to the SRP username
asserted by the client, whether the handshake succeeded or not.
If the handshake fails, this can be inspected to determine
if a guessing attack is in progress against a particular user
account.
@type closeSocket: bool
@ivar closeSocket: If the socket should be closed when the
connection is closed, defaults to True (writable).
If you set this to True, TLS Lite will assume the responsibility of
closing the socket when the TLS Connection is shutdown (either
through an error or through the user calling close()). The default
is False.
@type ignoreAbruptClose: bool
@ivar ignoreAbruptClose: If an abrupt close of the socket should
raise an error (writable).
If you set this to True, TLS Lite will not raise a
L{tlslite.errors.TLSAbruptCloseError} exception if the underlying
socket is unexpectedly closed. Such an unexpected closure could be
caused by an attacker. However, it also occurs with some incorrect
TLS implementations.
You should set this to True only if you're not worried about an
attacker truncating the connection, and only if necessary to avoid
spurious errors. The default is False.
@sort: __init__, read, readAsync, write, writeAsync, close, closeAsync,
getCipherImplementation, getCipherName
"""
def __init__(self, sock):
self.sock = sock
#My session object (Session instance; read-only)
self.session = None
#Am I a client or server?
self._client = None
#Buffers for processing messages
self._handshakeBuffer = []
self.clearReadBuffer()
self.clearWriteBuffer()
#Handshake digests
self._handshake_md5 = hashlib.md5()
self._handshake_sha = hashlib.sha1()
self._handshake_sha256 = hashlib.sha256()
#TLS Protocol Version
self.version = (0,0) #read-only
self._versionCheck = False #Once we choose a version, this is True
#Current and Pending connection states
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
self._pendingWriteState = _ConnectionState()
self._pendingReadState = _ConnectionState()
#Is the connection open?
self.closed = True #read-only
self._refCount = 0 #Used to trigger closure
#Is this a resumed session?
self.resumed = False #read-only
#What username did the client claim in his handshake?
self.allegedSrpUsername = None
#On a call to close(), do we close the socket? (writeable)
self.closeSocket = True
#If the socket is abruptly closed, do we ignore it
#and pretend the connection was shut down properly? (writeable)
self.ignoreAbruptClose = False
#Fault we will induce, for testing purposes
self.fault = None
def clearReadBuffer(self):
self._readBuffer = b''
def clearWriteBuffer(self):
self._send_writer = None
#*********************************************************
# Public Functions START
#*********************************************************
def read(self, max=None, min=1):
"""Read some data from the TLS connection.
This function will block until at least 'min' bytes are
available (or the connection is closed).
If an exception is raised, the connection will have been
automatically closed.
@type max: int
@param max: The maximum number of bytes to return.
@type min: int
@param min: The minimum number of bytes to return
@rtype: str
@return: A string of no more than 'max' bytes, and no fewer
than 'min' (unless the connection has been closed, in which
case fewer than 'min' bytes may be returned).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
for result in self.readAsync(max, min):
pass
return result
def readAsync(self, max=None, min=1):
"""Start a read operation on the TLS connection.
This function returns a generator which behaves similarly to
read(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or a string if the read operation has
completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
while len(self._readBuffer)<min and not self.closed:
try:
for result in self._getMsg(ContentType.application_data):
if result in (0,1):
yield result
applicationData = result
self._readBuffer += applicationData.write()
except TLSRemoteAlert as alert:
if alert.description != AlertDescription.close_notify:
raise
except TLSAbruptCloseError:
if not self.ignoreAbruptClose:
raise
else:
self._shutdown(True)
if max == None:
max = len(self._readBuffer)
returnBytes = self._readBuffer[:max]
self._readBuffer = self._readBuffer[max:]
yield bytes(returnBytes)
except GeneratorExit:
raise
except:
self._shutdown(False)
raise
def unread(self, b):
"""Add bytes to the front of the socket read buffer for future
reading. Be careful using this in the context of select(...): if you
unread the last data from a socket, that won't wake up selected waiters,
and those waiters may hang forever.
"""
self._readBuffer = b + self._readBuffer
def write(self, s):
"""Write some data to the TLS connection.
This function will block until all the data has been sent.
If an exception is raised, the connection will have been
automatically closed.
@type s: str
@param s: The data to transmit to the other party.
@raise socket.error: If a socket error occurs.
"""
for result in self.writeAsync(s):
pass
def writeAsync(self, s):
"""Start a write operation on the TLS connection.
This function returns a generator which behaves similarly to
write(). Successive invocations of the generator will return
1 if it is waiting to write to the socket, or will raise
StopIteration if the write operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
if self.closed:
raise TLSClosedConnectionError("attempt to write to closed connection")
index = 0
blockSize = 16384
randomizeFirstBlock = True
while 1:
startIndex = index * blockSize
endIndex = startIndex + blockSize
if startIndex >= len(s):
break
if endIndex > len(s):
endIndex = len(s)
block = bytearray(s[startIndex : endIndex])
applicationData = ApplicationData().create(block)
for result in self._sendMsg(applicationData, \
randomizeFirstBlock):
yield result
randomizeFirstBlock = False #only on 1st message
index += 1
except GeneratorExit:
raise
except Exception:
# Don't invalidate the session on write failure if abrupt closes are
# okay.
self._shutdown(self.ignoreAbruptClose)
raise
def close(self):
"""Close the TLS connection.
This function will block until it has exchanged close_notify
alerts with the other party. After doing so, it will shut down the
TLS connection. Further attempts to read through this connection
will return "". Further attempts to write through this connection
will raise ValueError.
If makefile() has been called on this connection, the connection
will be not be closed until the connection object and all file
objects have been closed.
Even if an exception is raised, the connection will have been
closed.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
if not self.closed:
for result in self._decrefAsync():
pass
# Python 3 callback
_decref_socketios = close
def closeAsync(self):
"""Start a close operation on the TLS connection.
This function returns a generator which behaves similarly to
close(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or will raise StopIteration if the
close operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
if not self.closed:
for result in self._decrefAsync():
yield result
def _decrefAsync(self):
self._refCount -= 1
if self._refCount == 0 and not self.closed:
try:
for result in self._sendMsg(Alert().create(\
AlertDescription.close_notify, AlertLevel.warning)):
yield result
alert = None
# By default close the socket, since it's been observed
# that some other libraries will not respond to the
# close_notify alert, thus leaving us hanging if we're
# expecting it
if self.closeSocket:
self._shutdown(True)
else:
while not alert:
for result in self._getMsg((ContentType.alert, \
ContentType.application_data)):
if result in (0,1):
yield result
if result.contentType == ContentType.alert:
alert = result
if alert.description == AlertDescription.close_notify:
self._shutdown(True)
else:
raise TLSRemoteAlert(alert)
except (socket.error, TLSAbruptCloseError):
#If the other side closes the socket, that's okay
self._shutdown(True)
except GeneratorExit:
raise
except:
self._shutdown(False)
raise
def getVersionName(self):
"""Get the name of this TLS version.
@rtype: str
@return: The name of the TLS version used with this connection.
Either None, 'SSL 3.0', 'TLS 1.0', 'TLS 1.1', or 'TLS 1.2'.
"""
if self.version == (3,0):
return "SSL 3.0"
elif self.version == (3,1):
return "TLS 1.0"
elif self.version == (3,2):
return "TLS 1.1"
elif self.version == (3,3):
return "TLS 1.2"
else:
return None
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
Either 'aes128', 'aes256', 'rc4', or '3des'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""Get the name of the cipher implementation used with
this connection.
@rtype: str
@return: The name of the cipher implementation used with
this connection. Either 'python', 'openssl', or 'pycrypto'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.implementation
#Emulate a socket, somewhat -
def send(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
return len(s)
def sendall(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
def recv(self, bufsize):
"""Get some data from the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
return self.read(bufsize)
def recv_into(self, b):
# XXX doc string
data = self.read(len(b))
if not data:
return None
b[:len(data)] = data
return len(data)
def makefile(self, mode='r', bufsize=-1):
"""Create a file object for the TLS connection (socket emulation).
@rtype: L{socket._fileobject}
"""
self._refCount += 1
# So, it is pretty fragile to be using Python internal objects
# like this, but it is probably the best/easiest way to provide
# matching behavior for socket emulation purposes. The 'close'
# argument is nice, its apparently a recent addition to this
# class, so that when fileobject.close() gets called, it will
# close() us, causing the refcount to be decremented (decrefAsync).
#
# If this is the last close() on the outstanding fileobjects /
# TLSConnection, then the "actual" close alerts will be sent,
# socket closed, etc.
if sys.version_info < (3,):
return socket._fileobject(self, mode, bufsize, close=True)
else:
# XXX need to wrap this further if buffering is requested
return socket.SocketIO(self, mode)
def getsockname(self):
"""Return the socket's own address (socket emulation)."""
return self.sock.getsockname()
def getpeername(self):
"""Return the remote address to which the socket is connected
(socket emulation)."""
return self.sock.getpeername()
def settimeout(self, value):
"""Set a timeout on blocking socket operations (socket emulation)."""
return self.sock.settimeout(value)
def gettimeout(self):
"""Return the timeout associated with socket operations (socket
emulation)."""
return self.sock.gettimeout()
def setsockopt(self, level, optname, value):
"""Set the value of the given socket option (socket emulation)."""
return self.sock.setsockopt(level, optname, value)
def shutdown(self, how):
"""Shutdown the underlying socket."""
return self.sock.shutdown(how)
def fileno(self):
"""Not implement in TLS Lite."""
raise NotImplementedError()
#*********************************************************
# Public Functions END
#*********************************************************
def _shutdown(self, resumable):
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
self.version = (0,0)
self._versionCheck = False
self.closed = True
if self.closeSocket:
self.sock.close()
#Even if resumable is False, we'll never toggle this on
if not resumable and self.session:
self.session.resumable = False
def _sendError(self, alertDescription, errorStr=None):
alert = Alert().create(alertDescription, AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
self._shutdown(False)
raise TLSLocalAlert(alert, errorStr)
def _sendMsgs(self, msgs):
randomizeFirstBlock = True
for msg in msgs:
for result in self._sendMsg(msg, randomizeFirstBlock):
yield result
randomizeFirstBlock = True
def _sendMsg(self, msg, randomizeFirstBlock = True):
#Whenever we're connected and asked to send an app data message,
#we first send the first byte of the message. This prevents
#an attacker from launching a chosen-plaintext attack based on
#knowing the next IV (a la BEAST).
if not self.closed and randomizeFirstBlock and self.version <= (3,1) \
and self._writeState.encContext \
and self._writeState.encContext.isBlockCipher \
and isinstance(msg, ApplicationData):
msgFirstByte = msg.splitFirstByte()
for result in self._sendMsg(msgFirstByte,
randomizeFirstBlock = False):
yield result
b = msg.write()
# If a 1-byte message was passed in, and we "split" the
# first(only) byte off above, we may have a 0-length msg:
if len(b) == 0:
return
contentType = msg.contentType
#Update handshake hashes
if contentType == ContentType.handshake:
self._handshake_md5.update(compat26Str(b))
self._handshake_sha.update(compat26Str(b))
self._handshake_sha256.update(compat26Str(b))
#Calculate MAC
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([contentType])))
if self.version == (3,0):
mac.update( compatHMAC( bytearray([len(b)//256] )))
mac.update( compatHMAC( bytearray([len(b)%256] )))
elif self.version in ((3,1), (3,2), (3,3)):
mac.update(compatHMAC( bytearray([self.version[0]] )))
mac.update(compatHMAC( bytearray([self.version[1]] )))
mac.update( compatHMAC( bytearray([len(b)//256] )))
mac.update( compatHMAC( bytearray([len(b)%256] )))
else:
raise AssertionError()
mac.update(compatHMAC(b))
macBytes = bytearray(mac.digest())
if self.fault == Fault.badMAC:
macBytes[0] = (macBytes[0]+1) % 256
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding and encrypt (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version >= (3,2):
b = self.fixedIVBlock + b
#Add padding: b = b+ (macBytes + paddingBytes)
currentLength = len(b) + len(macBytes)
blockLength = self._writeState.encContext.block_size
paddingLength = blockLength - 1 - (currentLength % blockLength)
paddingBytes = bytearray([paddingLength] * (paddingLength+1))
if self.fault == Fault.badPadding:
paddingBytes[0] = (paddingBytes[0]+1) % 256
endBytes = macBytes + paddingBytes
b += endBytes
#Encrypt
b = self._writeState.encContext.encrypt(b)
#Encrypt (for Stream Cipher)
else:
b += macBytes
b = self._writeState.encContext.encrypt(b)
#Add record header and send
r = RecordHeader3().create(self.version, contentType, len(b))
s = r.write() + b
while 1:
try:
bytesSent = self.sock.send(s) #Might raise socket.error
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 1
continue
else:
# The socket was unexpectedly closed. The tricky part
# is that there may be an alert sent by the other party
# sitting in the read buffer. So, if we get here after
# handshaking, we will just raise the error and let the
# caller read more data if it would like, thus stumbling
# upon the error.
#
# However, if we get here DURING handshaking, we take
# it upon ourselves to see if the next message is an
# Alert.
if contentType == ContentType.handshake:
# See if there's an alert record
# Could raise socket.error or TLSAbruptCloseError
for result in self._getNextRecord():
if result in (0,1):
yield result
# Closes the socket
self._shutdown(False)
# If we got an alert, raise it
recordHeader, p = result
if recordHeader.type == ContentType.alert:
alert = Alert().parse(p)
raise TLSRemoteAlert(alert)
else:
# If we got some other message who know what
# the remote side is doing, just go ahead and
# raise the socket.error
raise
if bytesSent == len(s):
return
s = s[bytesSent:]
yield 1
def _getMsg(self, expectedType, secondaryType=None, constructorType=None):
try:
if not isinstance(expectedType, tuple):
expectedType = (expectedType,)
#Spin in a loop, until we've got a non-empty record of a type we
#expect. The loop will be repeated if:
# - we receive a renegotiation attempt; we send no_renegotiation,
# then try again
# - we receive an empty application-data fragment; we try again
while 1:
for result in self._getNextRecord():
if result in (0,1):
yield result
recordHeader, p = result
#If this is an empty application-data fragment, try again
if recordHeader.type == ContentType.application_data:
if p.index == len(p.bytes):
continue
#If we received an unexpected record type...
if recordHeader.type not in expectedType:
#If we received an alert...
if recordHeader.type == ContentType.alert:
alert = Alert().parse(p)
#We either received a fatal error, a warning, or a
#close_notify. In any case, we're going to close the
#connection. In the latter two cases we respond with
#a close_notify, but ignore any socket errors, since
#the other side might have already closed the socket.
if alert.level == AlertLevel.warning or \
alert.description == AlertDescription.close_notify:
#If the sendMsg() call fails because the socket has
#already been closed, we will be forgiving and not
#report the error nor invalidate the "resumability"
#of the session.
try:
alertMsg = Alert()
alertMsg.create(AlertDescription.close_notify,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
except socket.error:
pass
if alert.description == \
AlertDescription.close_notify:
self._shutdown(True)
elif alert.level == AlertLevel.warning:
self._shutdown(False)
else: #Fatal alert:
self._shutdown(False)
#Raise the alert as an exception
raise TLSRemoteAlert(alert)
#If we received a renegotiation attempt...
if recordHeader.type == ContentType.handshake:
subType = p.get(1)
reneg = False
if self._client:
if subType == HandshakeType.hello_request:
reneg = True
else:
if subType == HandshakeType.client_hello:
reneg = True
#Send no_renegotiation, then try again
if reneg:
alertMsg = Alert()
alertMsg.create(AlertDescription.no_renegotiation,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
continue
#Otherwise: this is an unexpected record, but neither an
#alert nor renegotiation
for result in self._sendError(\
AlertDescription.unexpected_message,
"received type=%d" % recordHeader.type):
yield result
break
#Parse based on content_type
if recordHeader.type == ContentType.change_cipher_spec:
yield ChangeCipherSpec().parse(p)
elif recordHeader.type == ContentType.alert:
yield Alert().parse(p)
elif recordHeader.type == ContentType.application_data:
yield ApplicationData().parse(p)
elif recordHeader.type == ContentType.handshake:
#Convert secondaryType to tuple, if it isn't already
if not isinstance(secondaryType, tuple):
secondaryType = (secondaryType,)
#If it's a handshake message, check handshake header
if recordHeader.ssl2:
subType = p.get(1)
if subType != HandshakeType.client_hello:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Can only handle SSLv2 ClientHello messages"):
yield result
if HandshakeType.client_hello not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message):
yield result
subType = HandshakeType.client_hello
else:
subType = p.get(1)
if subType not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Expecting %s, got %s" % (str(secondaryType), subType)):
yield result
#Update handshake hashes
self._handshake_md5.update(compat26Str(p.bytes))
self._handshake_sha.update(compat26Str(p.bytes))
self._handshake_sha256.update(compat26Str(p.bytes))
#Parse based on handshake type
if subType == HandshakeType.client_hello:
yield ClientHello(recordHeader.ssl2).parse(p)
elif subType == HandshakeType.server_hello:
yield ServerHello().parse(p)
elif subType == HandshakeType.certificate:
yield Certificate(constructorType).parse(p)
elif subType == HandshakeType.certificate_request:
yield CertificateRequest(self.version).parse(p)
elif subType == HandshakeType.certificate_verify:
yield CertificateVerify(self.version).parse(p)
elif subType == HandshakeType.server_key_exchange:
yield ServerKeyExchange(constructorType).parse(p)
elif subType == HandshakeType.server_hello_done:
yield ServerHelloDone().parse(p)
elif subType == HandshakeType.client_key_exchange:
yield ClientKeyExchange(constructorType, \
self.version).parse(p)
elif subType == HandshakeType.finished:
yield Finished(self.version).parse(p)
elif subType == HandshakeType.next_protocol:
yield NextProtocol().parse(p)
else:
raise AssertionError()
#If an exception was raised by a Parser or Message instance:
except SyntaxError as e:
for result in self._sendError(AlertDescription.decode_error,
formatExceptionTrace(e)):
yield result
#Returns next record or next handshake message
def _getNextRecord(self):
#If there's a handshake message waiting, return it
if self._handshakeBuffer:
recordHeader, b = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(b))
return
#Otherwise...
#Read the next record header
b = bytearray(0)
recordHeaderLength = 1
ssl2 = False
while 1:
try:
s = self.sock.recv(recordHeaderLength-len(b))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#If the connection was abruptly closed, raise an error
if len(s)==0:
raise TLSAbruptCloseError()
b += bytearray(s)
if len(b)==1:
if b[0] in ContentType.all:
ssl2 = False
recordHeaderLength = 5
elif b[0] == 128:
ssl2 = True
recordHeaderLength = 2
else:
raise SyntaxError()
if len(b) == recordHeaderLength:
break
#Parse the record header
if ssl2:
r = RecordHeader2().parse(Parser(b))
else:
r = RecordHeader3().parse(Parser(b))
#Check the record header fields
if r.length > 18432:
for result in self._sendError(AlertDescription.record_overflow):
yield result
#Read the record contents
b = bytearray(0)
while 1:
try:
s = self.sock.recv(r.length - len(b))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#If the connection is closed, raise a socket error
if len(s)==0:
raise TLSAbruptCloseError()
b += bytearray(s)
if len(b) == r.length:
break
#Check the record header fields (2)
#We do this after reading the contents from the socket, so that
#if there's an error, we at least don't leave extra bytes in the
#socket..
#
# THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP.
# SO WE LEAVE IT OUT FOR NOW.
#
#if self._versionCheck and r.version != self.version:
# for result in self._sendError(AlertDescription.protocol_version,
# "Version in header field: %s, should be %s" % (str(r.version),
# str(self.version))):
# yield result
#Decrypt the record
for result in self._decryptRecord(r.type, b):
if result in (0,1): yield result
else: break
b = result
p = Parser(b)
#If it doesn't contain handshake messages, we can just return it
if r.type != ContentType.handshake:
yield (r, p)
#If it's an SSLv2 ClientHello, we can return it as well
elif r.ssl2:
yield (r, p)
else:
#Otherwise, we loop through and add the handshake messages to the
#handshake buffer
while 1:
if p.index == len(b): #If we're at the end
if not self._handshakeBuffer:
for result in self._sendError(\
AlertDescription.decode_error, \
"Received empty handshake record"):
yield result
break
#There needs to be at least 4 bytes to get a header
if p.index+4 > len(b):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (1)"):
yield result
p.get(1) # skip handshake type
msgLength = p.get(3)
if p.index+msgLength > len(b):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (2)"):
yield result
handshakePair = (r, b[p.index-4 : p.index+msgLength])
self._handshakeBuffer.append(handshakePair)
p.index += msgLength
#We've moved at least one handshake message into the
#handshakeBuffer, return the first one
recordHeader, b = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(b))
def _decryptRecord(self, recordType, b):
if self._readState.encContext:
#Decrypt if it's a block cipher
if self._readState.encContext.isBlockCipher:
blockLength = self._readState.encContext.block_size
if len(b) % blockLength != 0:
for result in self._sendError(\
AlertDescription.decryption_failed,
"Encrypted data not a multiple of blocksize"):
yield result
b = self._readState.encContext.decrypt(b)
if self.version >= (3,2): #For TLS 1.1, remove explicit IV
b = b[self._readState.encContext.block_size : ]
#Check padding
paddingGood = True
paddingLength = b[-1]
if (paddingLength+1) > len(b):
paddingGood=False
totalPaddingLength = 0
else:
if self.version == (3,0):
totalPaddingLength = paddingLength+1
elif self.version in ((3,1), (3,2), (3,3)):
totalPaddingLength = paddingLength+1
paddingBytes = b[-totalPaddingLength:-1]
for byte in paddingBytes:
if byte != paddingLength:
paddingGood = False
totalPaddingLength = 0
else:
raise AssertionError()
#Decrypt if it's a stream cipher
else:
paddingGood = True
b = self._readState.encContext.decrypt(b)
totalPaddingLength = 0
#Check MAC
macGood = True
macLength = self._readState.macContext.digest_size
endLength = macLength + totalPaddingLength
if endLength > len(b):
macGood = False
else:
#Read MAC
startIndex = len(b) - endLength
endIndex = startIndex + macLength
checkBytes = b[startIndex : endIndex]
#Calculate MAC
seqnumBytes = self._readState.getSeqNumBytes()
b = b[:-endLength]
mac = self._readState.macContext.copy()
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([recordType])))
if self.version == (3,0):
mac.update( compatHMAC(bytearray( [len(b)//256] ) ))
mac.update( compatHMAC(bytearray( [len(b)%256] ) ))
elif self.version in ((3,1), (3,2), (3,3)):
mac.update(compatHMAC(bytearray( [self.version[0]] ) ))
mac.update(compatHMAC(bytearray( [self.version[1]] ) ))
mac.update(compatHMAC(bytearray( [len(b)//256] ) ))
mac.update(compatHMAC(bytearray( [len(b)%256] ) ))
else:
raise AssertionError()
mac.update(compatHMAC(b))
macBytes = bytearray(mac.digest())
#Compare MACs
if macBytes != checkBytes:
macGood = False
if not (paddingGood and macGood):
for result in self._sendError(AlertDescription.bad_record_mac,
"MAC failure (or padding failure)"):
yield result
yield b
def _handshakeStart(self, client):
if not self.closed:
raise ValueError("Renegotiation disallowed for security reasons")
self._client = client
self._handshake_md5 = hashlib.md5()
self._handshake_sha = hashlib.sha1()
self._handshake_sha256 = hashlib.sha256()
self._handshakeBuffer = []
self.allegedSrpUsername = None
self._refCount = 1
def _handshakeDone(self, resumed):
self.resumed = resumed
self.closed = False
def _calcPendingStates(self, cipherSuite, masterSecret,
clientRandom, serverRandom, implementations):
if cipherSuite in CipherSuite.aes128Suites:
keyLength = 16
ivLength = 16
createCipherFunc = createAES
elif cipherSuite in CipherSuite.aes256Suites:
keyLength = 32
ivLength = 16
createCipherFunc = createAES
elif cipherSuite in CipherSuite.rc4Suites:
keyLength = 16
ivLength = 0
createCipherFunc = createRC4
elif cipherSuite in CipherSuite.tripleDESSuites:
keyLength = 24
ivLength = 8
createCipherFunc = createTripleDES
else:
raise AssertionError()
if cipherSuite in CipherSuite.shaSuites:
macLength = 20
digestmod = hashlib.sha1
elif cipherSuite in CipherSuite.sha256Suites:
macLength = 32
digestmod = hashlib.sha256
elif cipherSuite in CipherSuite.md5Suites:
macLength = 16
digestmod = hashlib.md5
if self.version == (3,0):
createMACFunc = createMAC_SSL
elif self.version in ((3,1), (3,2), (3,3)):
createMACFunc = createHMAC
outputLength = (macLength*2) + (keyLength*2) + (ivLength*2)
#Calculate Keying Material from Master Secret
if self.version == (3,0):
keyBlock = PRF_SSL(masterSecret,
serverRandom + clientRandom,
outputLength)
elif self.version in ((3,1), (3,2)):
keyBlock = PRF(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
elif self.version == (3,3):
keyBlock = PRF_1_2(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
else:
raise AssertionError()
#Slice up Keying Material
clientPendingState = _ConnectionState()
serverPendingState = _ConnectionState()
p = Parser(keyBlock)
clientMACBlock = p.getFixBytes(macLength)
serverMACBlock = p.getFixBytes(macLength)
clientKeyBlock = p.getFixBytes(keyLength)
serverKeyBlock = p.getFixBytes(keyLength)
clientIVBlock = p.getFixBytes(ivLength)
serverIVBlock = p.getFixBytes(ivLength)
clientPendingState.macContext = createMACFunc(
compatHMAC(clientMACBlock), digestmod=digestmod)
serverPendingState.macContext = createMACFunc(
compatHMAC(serverMACBlock), digestmod=digestmod)
clientPendingState.encContext = createCipherFunc(clientKeyBlock,
clientIVBlock,
implementations)
serverPendingState.encContext = createCipherFunc(serverKeyBlock,
serverIVBlock,
implementations)
#Assign new connection states to pending states
if self._client:
self._pendingWriteState = clientPendingState
self._pendingReadState = serverPendingState
else:
self._pendingWriteState = serverPendingState
self._pendingReadState = clientPendingState
if self.version >= (3,2) and ivLength:
#Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC
#residue to create the IV for each sent block)
self.fixedIVBlock = getRandomBytes(ivLength)
def _changeWriteState(self):
self._writeState = self._pendingWriteState
self._pendingWriteState = _ConnectionState()
def _changeReadState(self):
self._readState = self._pendingReadState
self._pendingReadState = _ConnectionState()
#Used for Finished messages and CertificateVerify messages in SSL v3
def _calcSSLHandshakeHash(self, masterSecret, label):
imac_md5 = self._handshake_md5.copy()
imac_sha = self._handshake_sha.copy()
imac_md5.update(compatHMAC(label + masterSecret + bytearray([0x36]*48)))
imac_sha.update(compatHMAC(label + masterSecret + bytearray([0x36]*40)))
md5Bytes = MD5(masterSecret + bytearray([0x5c]*48) + \
bytearray(imac_md5.digest()))
shaBytes = SHA1(masterSecret + bytearray([0x5c]*40) + \
bytearray(imac_sha.digest()))
return md5Bytes + shaBytes
|
# Authors:
# Trevor Perrin
# Google (adapted by Sam Rushing) - NPN support
# Google - minimal padding
# Martin von Loewis - python 3 port
# Yngve Pettersen (ported by Paul Sokolovsky) - TLS 1.2
#
# See the LICENSE file for legal information regarding use of this file.
"""Helper class for TLSConnection."""
from __future__ import generators
from .utils.compat import *
from .utils.cryptomath import *
from .utils.cipherfactory import createAES, createRC4, createTripleDES
from .utils.codec import *
from .errors import *
from .messages import *
from .mathtls import *
from .constants import *
from .utils.cryptomath import getRandomBytes
import socket
import errno
import traceback
class _ConnectionState(object):
def __init__(self):
self.macContext = None
self.encContext = None
self.seqnum = 0
def getSeqNumBytes(self):
w = Writer()
w.add(self.seqnum, 8)
self.seqnum += 1
return w.bytes
class TLSRecordLayer(object):
"""
This class handles data transmission for a TLS connection.
Its only subclass is L{tlslite.TLSConnection.TLSConnection}. We've
separated the code in this class from TLSConnection to make things
more readable.
@type sock: socket.socket
@ivar sock: The underlying socket object.
@type session: L{tlslite.Session.Session}
@ivar session: The session corresponding to this connection.
Due to TLS session resumption, multiple connections can correspond
to the same underlying session.
@type version: tuple
@ivar version: The TLS version being used for this connection.
(3,0) means SSL 3.0, and (3,1) means TLS 1.0.
@type closed: bool
@ivar closed: If this connection is closed.
@type resumed: bool
@ivar resumed: If this connection is based on a resumed session.
@type allegedSrpUsername: str or None
@ivar allegedSrpUsername: This is set to the SRP username
asserted by the client, whether the handshake succeeded or not.
If the handshake fails, this can be inspected to determine
if a guessing attack is in progress against a particular user
account.
@type closeSocket: bool
@ivar closeSocket: If the socket should be closed when the
connection is closed, defaults to True (writable).
If you set this to True, TLS Lite will assume the responsibility of
closing the socket when the TLS Connection is shutdown (either
through an error or through the user calling close()). The default
is False.
@type ignoreAbruptClose: bool
@ivar ignoreAbruptClose: If an abrupt close of the socket should
raise an error (writable).
If you set this to True, TLS Lite will not raise a
L{tlslite.errors.TLSAbruptCloseError} exception if the underlying
socket is unexpectedly closed. Such an unexpected closure could be
caused by an attacker. However, it also occurs with some incorrect
TLS implementations.
You should set this to True only if you're not worried about an
attacker truncating the connection, and only if necessary to avoid
spurious errors. The default is False.
@sort: __init__, read, readAsync, write, writeAsync, close, closeAsync,
getCipherImplementation, getCipherName
"""
def __init__(self, sock):
self.sock = sock
#My session object (Session instance; read-only)
self.session = None
#Am I a client or server?
self._client = None
#Buffers for processing messages
self._handshakeBuffer = []
self.clearReadBuffer()
self.clearWriteBuffer()
#Handshake digests
self._handshake_md5 = hashlib.md5()
self._handshake_sha = hashlib.sha1()
self._handshake_sha256 = hashlib.sha256()
#TLS Protocol Version
self.version = (0,0) #read-only
self._versionCheck = False #Once we choose a version, this is True
#Current and Pending connection states
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
self._pendingWriteState = _ConnectionState()
self._pendingReadState = _ConnectionState()
#Is the connection open?
self.closed = True #read-only
self._refCount = 0 #Used to trigger closure
#Is this a resumed session?
self.resumed = False #read-only
#What username did the client claim in his handshake?
self.allegedSrpUsername = None
#On a call to close(), do we close the socket? (writeable)
self.closeSocket = True
#If the socket is abruptly closed, do we ignore it
#and pretend the connection was shut down properly? (writeable)
self.ignoreAbruptClose = False
#Fault we will induce, for testing purposes
self.fault = None
def clearReadBuffer(self):
self._readBuffer = b''
def clearWriteBuffer(self):
self._send_writer = None
#*********************************************************
# Public Functions START
#*********************************************************
def read(self, max=None, min=1):
"""Read some data from the TLS connection.
This function will block until at least 'min' bytes are
available (or the connection is closed).
If an exception is raised, the connection will have been
automatically closed.
@type max: int
@param max: The maximum number of bytes to return.
@type min: int
@param min: The minimum number of bytes to return
@rtype: str
@return: A string of no more than 'max' bytes, and no fewer
than 'min' (unless the connection has been closed, in which
case fewer than 'min' bytes may be returned).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
for result in self.readAsync(max, min):
pass
return result
def readAsync(self, max=None, min=1):
"""Start a read operation on the TLS connection.
This function returns a generator which behaves similarly to
read(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or a string if the read operation has
completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
while len(self._readBuffer)<min and not self.closed:
try:
for result in self._getMsg(ContentType.application_data):
if result in (0,1):
yield result
applicationData = result
self._readBuffer += applicationData.write()
except TLSRemoteAlert as alert:
if alert.description != AlertDescription.close_notify:
raise
except TLSAbruptCloseError:
if not self.ignoreAbruptClose:
raise
else:
self._shutdown(True)
if max == None:
max = len(self._readBuffer)
returnBytes = self._readBuffer[:max]
self._readBuffer = self._readBuffer[max:]
yield bytes(returnBytes)
except GeneratorExit:
raise
except:
self._shutdown(False)
raise
def unread(self, b):
"""Add bytes to the front of the socket read buffer for future
reading. Be careful using this in the context of select(...): if you
unread the last data from a socket, that won't wake up selected waiters,
and those waiters may hang forever.
"""
self._readBuffer = b + self._readBuffer
def write(self, s):
"""Write some data to the TLS connection.
This function will block until all the data has been sent.
If an exception is raised, the connection will have been
automatically closed.
@type s: str
@param s: The data to transmit to the other party.
@raise socket.error: If a socket error occurs.
"""
for result in self.writeAsync(s):
pass
def writeAsync(self, s):
"""Start a write operation on the TLS connection.
This function returns a generator which behaves similarly to
write(). Successive invocations of the generator will return
1 if it is waiting to write to the socket, or will raise
StopIteration if the write operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
try:
if self.closed:
raise TLSClosedConnectionError("attempt to write to closed connection")
index = 0
blockSize = 16384
randomizeFirstBlock = True
while 1:
startIndex = index * blockSize
endIndex = startIndex + blockSize
if startIndex >= len(s):
break
if endIndex > len(s):
endIndex = len(s)
block = bytearray(s[startIndex : endIndex])
applicationData = ApplicationData().create(block)
for result in self._sendMsg(applicationData, \
randomizeFirstBlock):
yield result
randomizeFirstBlock = False #only on 1st message
index += 1
except GeneratorExit:
raise
except Exception:
# Don't invalidate the session on write failure if abrupt closes are
# okay.
self._shutdown(self.ignoreAbruptClose)
raise
def close(self):
"""Close the TLS connection.
This function will block until it has exchanged close_notify
alerts with the other party. After doing so, it will shut down the
TLS connection. Further attempts to read through this connection
will return "". Further attempts to write through this connection
will raise ValueError.
If makefile() has been called on this connection, the connection
will be not be closed until the connection object and all file
objects have been closed.
Even if an exception is raised, the connection will have been
closed.
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
if not self.closed:
for result in self._decrefAsync():
pass
# Python 3 callback
_decref_socketios = close
def closeAsync(self):
"""Start a close operation on the TLS connection.
This function returns a generator which behaves similarly to
close(). Successive invocations of the generator will return 0
if it is waiting to read from the socket, 1 if it is waiting
to write to the socket, or will raise StopIteration if the
close operation has completed.
@rtype: iterable
@return: A generator; see above for details.
"""
if not self.closed:
for result in self._decrefAsync():
yield result
def _decrefAsync(self):
self._refCount -= 1
if self._refCount == 0 and not self.closed:
try:
for result in self._sendMsg(Alert().create(\
AlertDescription.close_notify, AlertLevel.warning)):
yield result
alert = None
# By default close the socket, since it's been observed
# that some other libraries will not respond to the
# close_notify alert, thus leaving us hanging if we're
# expecting it
if self.closeSocket:
self._shutdown(True)
else:
while not alert:
for result in self._getMsg((ContentType.alert, \
ContentType.application_data)):
if result in (0,1):
yield result
if result.contentType == ContentType.alert:
alert = result
if alert.description == AlertDescription.close_notify:
self._shutdown(True)
else:
raise TLSRemoteAlert(alert)
except (socket.error, TLSAbruptCloseError):
#If the other side closes the socket, that's okay
self._shutdown(True)
except GeneratorExit:
raise
except:
self._shutdown(False)
raise
def getVersionName(self):
"""Get the name of this TLS version.
@rtype: str
@return: The name of the TLS version used with this connection.
Either None, 'SSL 3.0', 'TLS 1.0', 'TLS 1.1', or 'TLS 1.2'.
"""
if self.version == (3,0):
return "SSL 3.0"
elif self.version == (3,1):
return "TLS 1.0"
elif self.version == (3,2):
return "TLS 1.1"
elif self.version == (3,3):
return "TLS 1.2"
else:
return None
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
Either 'aes128', 'aes256', 'rc4', or '3des'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.name
def getCipherImplementation(self):
"""Get the name of the cipher implementation used with
this connection.
@rtype: str
@return: The name of the cipher implementation used with
this connection. Either 'python', 'openssl', or 'pycrypto'.
"""
if not self._writeState.encContext:
return None
return self._writeState.encContext.implementation
#Emulate a socket, somewhat -
def send(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
return len(s)
def sendall(self, s):
"""Send data to the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
"""
self.write(s)
def recv(self, bufsize):
"""Get some data from the TLS connection (socket emulation).
@raise socket.error: If a socket error occurs.
@raise tlslite.errors.TLSAbruptCloseError: If the socket is closed
without a preceding alert.
@raise tlslite.errors.TLSAlert: If a TLS alert is signalled.
"""
return self.read(bufsize)
def recv_into(self, b):
# XXX doc string
data = self.read(len(b))
if not data:
return None
b[:len(data)] = data
return len(data)
def makefile(self, mode='r', bufsize=-1):
"""Create a file object for the TLS connection (socket emulation).
@rtype: L{socket._fileobject}
"""
self._refCount += 1
# So, it is pretty fragile to be using Python internal objects
# like this, but it is probably the best/easiest way to provide
# matching behavior for socket emulation purposes. The 'close'
# argument is nice, its apparently a recent addition to this
# class, so that when fileobject.close() gets called, it will
# close() us, causing the refcount to be decremented (decrefAsync).
#
# If this is the last close() on the outstanding fileobjects /
# TLSConnection, then the "actual" close alerts will be sent,
# socket closed, etc.
if sys.version_info < (3,):
return socket._fileobject(self, mode, bufsize, close=True)
else:
# XXX need to wrap this further if buffering is requested
return socket.SocketIO(self, mode)
def getsockname(self):
"""Return the socket's own address (socket emulation)."""
return self.sock.getsockname()
def getpeername(self):
"""Return the remote address to which the socket is connected
(socket emulation)."""
return self.sock.getpeername()
def settimeout(self, value):
"""Set a timeout on blocking socket operations (socket emulation)."""
return self.sock.settimeout(value)
def gettimeout(self):
"""Return the timeout associated with socket operations (socket
emulation)."""
return self.sock.gettimeout()
def setsockopt(self, level, optname, value):
"""Set the value of the given socket option (socket emulation)."""
return self.sock.setsockopt(level, optname, value)
def shutdown(self, how):
"""Shutdown the underlying socket."""
return self.sock.shutdown(how)
def fileno(self):
"""Not implement in TLS Lite."""
raise NotImplementedError()
#*********************************************************
# Public Functions END
#*********************************************************
def _shutdown(self, resumable):
self._writeState = _ConnectionState()
self._readState = _ConnectionState()
self.version = (0,0)
self._versionCheck = False
self.closed = True
if self.closeSocket:
self.sock.close()
#Even if resumable is False, we'll never toggle this on
if not resumable and self.session:
self.session.resumable = False
def _sendError(self, alertDescription, errorStr=None):
alert = Alert().create(alertDescription, AlertLevel.fatal)
for result in self._sendMsg(alert):
yield result
self._shutdown(False)
raise TLSLocalAlert(alert, errorStr)
def _sendMsgs(self, msgs):
randomizeFirstBlock = True
for msg in msgs:
for result in self._sendMsg(msg, randomizeFirstBlock):
yield result
randomizeFirstBlock = True
def _sendMsg(self, msg, randomizeFirstBlock = True):
#Whenever we're connected and asked to send an app data message,
#we first send the first byte of the message. This prevents
#an attacker from launching a chosen-plaintext attack based on
#knowing the next IV (a la BEAST).
if not self.closed and randomizeFirstBlock and self.version <= (3,1) \
and self._writeState.encContext \
and self._writeState.encContext.isBlockCipher \
and isinstance(msg, ApplicationData):
msgFirstByte = msg.splitFirstByte()
for result in self._sendMsg(msgFirstByte,
randomizeFirstBlock = False):
yield result
b = msg.write()
# If a 1-byte message was passed in, and we "split" the
# first(only) byte off above, we may have a 0-length msg:
if len(b) == 0:
return
contentType = msg.contentType
#Update handshake hashes
if contentType == ContentType.handshake:
self._handshake_md5.update(compat26Str(b))
self._handshake_sha.update(compat26Str(b))
self._handshake_sha256.update(compat26Str(b))
#Calculate MAC
if self._writeState.macContext:
seqnumBytes = self._writeState.getSeqNumBytes()
mac = self._writeState.macContext.copy()
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([contentType])))
if self.version == (3,0):
mac.update( compatHMAC( bytearray([len(b)//256] )))
mac.update( compatHMAC( bytearray([len(b)%256] )))
elif self.version in ((3,1), (3,2), (3,3)):
mac.update(compatHMAC( bytearray([self.version[0]] )))
mac.update(compatHMAC( bytearray([self.version[1]] )))
mac.update( compatHMAC( bytearray([len(b)//256] )))
mac.update( compatHMAC( bytearray([len(b)%256] )))
else:
raise AssertionError()
mac.update(compatHMAC(b))
macBytes = bytearray(mac.digest())
if self.fault == Fault.badMAC:
macBytes[0] = (macBytes[0]+1) % 256
#Encrypt for Block or Stream Cipher
if self._writeState.encContext:
#Add padding and encrypt (for Block Cipher):
if self._writeState.encContext.isBlockCipher:
#Add TLS 1.1 fixed block
if self.version >= (3,2):
b = self.fixedIVBlock + b
#Add padding: b = b+ (macBytes + paddingBytes)
currentLength = len(b) + len(macBytes)
blockLength = self._writeState.encContext.block_size
paddingLength = blockLength - 1 - (currentLength % blockLength)
paddingBytes = bytearray([paddingLength] * (paddingLength+1))
if self.fault == Fault.badPadding:
paddingBytes[0] = (paddingBytes[0]+1) % 256
endBytes = macBytes + paddingBytes
b += endBytes
#Encrypt
b = self._writeState.encContext.encrypt(b)
#Encrypt (for Stream Cipher)
else:
b += macBytes
b = self._writeState.encContext.encrypt(b)
#Add record header and send
r = RecordHeader3().create(self.version, contentType, len(b))
s = r.write() + b
while 1:
try:
bytesSent = self.sock.send(s) #Might raise socket.error
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 1
continue
else:
# The socket was unexpectedly closed. The tricky part
# is that there may be an alert sent by the other party
# sitting in the read buffer. So, if we get here after
# handshaking, we will just raise the error and let the
# caller read more data if it would like, thus stumbling
# upon the error.
#
# However, if we get here DURING handshaking, we take
# it upon ourselves to see if the next message is an
# Alert.
if contentType == ContentType.handshake:
# See if there's an alert record
# Could raise socket.error or TLSAbruptCloseError
for result in self._getNextRecord():
if result in (0,1):
yield result
# Closes the socket
self._shutdown(False)
# If we got an alert, raise it
recordHeader, p = result
if recordHeader.type == ContentType.alert:
alert = Alert().parse(p)
raise TLSRemoteAlert(alert)
else:
# If we got some other message who know what
# the remote side is doing, just go ahead and
# raise the socket.error
raise
if bytesSent == len(s):
return
s = s[bytesSent:]
yield 1
def _getMsg(self, expectedType, secondaryType=None, constructorType=None):
try:
if not isinstance(expectedType, tuple):
expectedType = (expectedType,)
#Spin in a loop, until we've got a non-empty record of a type we
#expect. The loop will be repeated if:
# - we receive a renegotiation attempt; we send no_renegotiation,
# then try again
# - we receive an empty application-data fragment; we try again
while 1:
for result in self._getNextRecord():
if result in (0,1):
yield result
recordHeader, p = result
#If this is an empty application-data fragment, try again
if recordHeader.type == ContentType.application_data:
if p.index == len(p.bytes):
continue
#If we received an unexpected record type...
if recordHeader.type not in expectedType:
#If we received an alert...
if recordHeader.type == ContentType.alert:
alert = Alert().parse(p)
#We either received a fatal error, a warning, or a
#close_notify. In any case, we're going to close the
#connection. In the latter two cases we respond with
#a close_notify, but ignore any socket errors, since
#the other side might have already closed the socket.
if alert.level == AlertLevel.warning or \
alert.description == AlertDescription.close_notify:
#If the sendMsg() call fails because the socket has
#already been closed, we will be forgiving and not
#report the error nor invalidate the "resumability"
#of the session.
try:
alertMsg = Alert()
alertMsg.create(AlertDescription.close_notify,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
except socket.error:
pass
if alert.description == \
AlertDescription.close_notify:
self._shutdown(True)
elif alert.level == AlertLevel.warning:
self._shutdown(False)
else: #Fatal alert:
self._shutdown(False)
#Raise the alert as an exception
raise TLSRemoteAlert(alert)
#If we received a renegotiation attempt...
if recordHeader.type == ContentType.handshake:
subType = p.get(1)
reneg = False
if self._client:
if subType == HandshakeType.hello_request:
reneg = True
else:
if subType == HandshakeType.client_hello:
reneg = True
#Send no_renegotiation, then try again
if reneg:
alertMsg = Alert()
alertMsg.create(AlertDescription.no_renegotiation,
AlertLevel.warning)
for result in self._sendMsg(alertMsg):
yield result
continue
#Otherwise: this is an unexpected record, but neither an
#alert nor renegotiation
for result in self._sendError(\
AlertDescription.unexpected_message,
"received type=%d" % recordHeader.type):
yield result
break
#Parse based on content_type
if recordHeader.type == ContentType.change_cipher_spec:
yield ChangeCipherSpec().parse(p)
elif recordHeader.type == ContentType.alert:
yield Alert().parse(p)
elif recordHeader.type == ContentType.application_data:
yield ApplicationData().parse(p)
elif recordHeader.type == ContentType.handshake:
#Convert secondaryType to tuple, if it isn't already
if not isinstance(secondaryType, tuple):
secondaryType = (secondaryType,)
#If it's a handshake message, check handshake header
if recordHeader.ssl2:
subType = p.get(1)
if subType != HandshakeType.client_hello:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Can only handle SSLv2 ClientHello messages"):
yield result
if HandshakeType.client_hello not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message):
yield result
subType = HandshakeType.client_hello
else:
subType = p.get(1)
if subType not in secondaryType:
for result in self._sendError(\
AlertDescription.unexpected_message,
"Expecting %s, got %s" % (str(secondaryType), subType)):
yield result
#Update handshake hashes
self._handshake_md5.update(compat26Str(p.bytes))
self._handshake_sha.update(compat26Str(p.bytes))
self._handshake_sha256.update(compat26Str(p.bytes))
#Parse based on handshake type
if subType == HandshakeType.client_hello:
yield ClientHello(recordHeader.ssl2).parse(p)
elif subType == HandshakeType.server_hello:
yield ServerHello().parse(p)
elif subType == HandshakeType.certificate:
yield Certificate(constructorType).parse(p)
elif subType == HandshakeType.certificate_request:
yield CertificateRequest(self.version).parse(p)
elif subType == HandshakeType.certificate_verify:
yield CertificateVerify(self.version).parse(p)
elif subType == HandshakeType.server_key_exchange:
yield ServerKeyExchange(constructorType).parse(p)
elif subType == HandshakeType.server_hello_done:
yield ServerHelloDone().parse(p)
elif subType == HandshakeType.client_key_exchange:
yield ClientKeyExchange(constructorType, \
self.version).parse(p)
elif subType == HandshakeType.finished:
yield Finished(self.version).parse(p)
elif subType == HandshakeType.next_protocol:
yield NextProtocol().parse(p)
else:
raise AssertionError()
#If an exception was raised by a Parser or Message instance:
except SyntaxError as e:
for result in self._sendError(AlertDescription.decode_error,
formatExceptionTrace(e)):
yield result
#Returns next record or next handshake message
def _getNextRecord(self):
#If there's a handshake message waiting, return it
if self._handshakeBuffer:
recordHeader, b = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(b))
return
#Otherwise...
#Read the next record header
b = bytearray(0)
recordHeaderLength = 1
ssl2 = False
while 1:
try:
s = self.sock.recv(recordHeaderLength-len(b))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#If the connection was abruptly closed, raise an error
if len(s)==0:
raise TLSAbruptCloseError()
b += bytearray(s)
if len(b)==1:
if b[0] in ContentType.all:
ssl2 = False
recordHeaderLength = 5
elif b[0] == 128:
ssl2 = True
recordHeaderLength = 2
else:
raise SyntaxError()
if len(b) == recordHeaderLength:
break
#Parse the record header
if ssl2:
r = RecordHeader2().parse(Parser(b))
else:
r = RecordHeader3().parse(Parser(b))
#Check the record header fields
if r.length > 18432:
for result in self._sendError(AlertDescription.record_overflow):
yield result
#Read the record contents
b = bytearray(0)
while 1:
try:
s = self.sock.recv(r.length - len(b))
except socket.error as why:
if why.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN):
yield 0
continue
else:
raise
#If the connection is closed, raise a socket error
if len(s)==0:
raise TLSAbruptCloseError()
b += bytearray(s)
if len(b) == r.length:
break
#Check the record header fields (2)
#We do this after reading the contents from the socket, so that
#if there's an error, we at least don't leave extra bytes in the
#socket..
#
# THIS CHECK HAS NO SECURITY RELEVANCE (?), BUT COULD HURT INTEROP.
# SO WE LEAVE IT OUT FOR NOW.
#
#if self._versionCheck and r.version != self.version:
# for result in self._sendError(AlertDescription.protocol_version,
# "Version in header field: %s, should be %s" % (str(r.version),
# str(self.version))):
# yield result
#Decrypt the record
for result in self._decryptRecord(r.type, b):
if result in (0,1): yield result
else: break
b = result
p = Parser(b)
#If it doesn't contain handshake messages, we can just return it
if r.type != ContentType.handshake:
yield (r, p)
#If it's an SSLv2 ClientHello, we can return it as well
elif r.ssl2:
yield (r, p)
else:
#Otherwise, we loop through and add the handshake messages to the
#handshake buffer
while 1:
if p.index == len(b): #If we're at the end
if not self._handshakeBuffer:
for result in self._sendError(\
AlertDescription.decode_error, \
"Received empty handshake record"):
yield result
break
#There needs to be at least 4 bytes to get a header
if p.index+4 > len(b):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (1)"):
yield result
p.get(1) # skip handshake type
msgLength = p.get(3)
if p.index+msgLength > len(b):
for result in self._sendError(\
AlertDescription.decode_error,
"A record has a partial handshake message (2)"):
yield result
handshakePair = (r, b[p.index-4 : p.index+msgLength])
self._handshakeBuffer.append(handshakePair)
p.index += msgLength
#We've moved at least one handshake message into the
#handshakeBuffer, return the first one
recordHeader, b = self._handshakeBuffer[0]
self._handshakeBuffer = self._handshakeBuffer[1:]
yield (recordHeader, Parser(b))
def _decryptRecord(self, recordType, b):
if self._readState.encContext:
#Decrypt if it's a block cipher
if self._readState.encContext.isBlockCipher:
blockLength = self._readState.encContext.block_size
if len(b) % blockLength != 0:
for result in self._sendError(\
AlertDescription.decryption_failed,
"Encrypted data not a multiple of blocksize"):
yield result
b = self._readState.encContext.decrypt(b)
if self.version >= (3,2): #For TLS 1.1, remove explicit IV
b = b[self._readState.encContext.block_size : ]
if len(b) == 0:
for result in self._sendError(\
AlertDescription.decryption_failed,
"No data left after decryption and IV removal"):
yield result
#Check padding
paddingGood = True
paddingLength = b[-1]
if (paddingLength+1) > len(b):
paddingGood=False
totalPaddingLength = 0
else:
if self.version == (3,0):
totalPaddingLength = paddingLength+1
elif self.version in ((3,1), (3,2), (3,3)):
totalPaddingLength = paddingLength+1
paddingBytes = b[-totalPaddingLength:-1]
for byte in paddingBytes:
if byte != paddingLength:
paddingGood = False
totalPaddingLength = 0
else:
raise AssertionError()
#Decrypt if it's a stream cipher
else:
paddingGood = True
b = self._readState.encContext.decrypt(b)
totalPaddingLength = 0
#Check MAC
macGood = True
macLength = self._readState.macContext.digest_size
endLength = macLength + totalPaddingLength
if endLength > len(b):
macGood = False
else:
#Read MAC
startIndex = len(b) - endLength
endIndex = startIndex + macLength
checkBytes = b[startIndex : endIndex]
#Calculate MAC
seqnumBytes = self._readState.getSeqNumBytes()
b = b[:-endLength]
mac = self._readState.macContext.copy()
mac.update(compatHMAC(seqnumBytes))
mac.update(compatHMAC(bytearray([recordType])))
if self.version == (3,0):
mac.update( compatHMAC(bytearray( [len(b)//256] ) ))
mac.update( compatHMAC(bytearray( [len(b)%256] ) ))
elif self.version in ((3,1), (3,2), (3,3)):
mac.update(compatHMAC(bytearray( [self.version[0]] ) ))
mac.update(compatHMAC(bytearray( [self.version[1]] ) ))
mac.update(compatHMAC(bytearray( [len(b)//256] ) ))
mac.update(compatHMAC(bytearray( [len(b)%256] ) ))
else:
raise AssertionError()
mac.update(compatHMAC(b))
macBytes = bytearray(mac.digest())
#Compare MACs
if macBytes != checkBytes:
macGood = False
if not (paddingGood and macGood):
for result in self._sendError(AlertDescription.bad_record_mac,
"MAC failure (or padding failure)"):
yield result
yield b
def _handshakeStart(self, client):
if not self.closed:
raise ValueError("Renegotiation disallowed for security reasons")
self._client = client
self._handshake_md5 = hashlib.md5()
self._handshake_sha = hashlib.sha1()
self._handshake_sha256 = hashlib.sha256()
self._handshakeBuffer = []
self.allegedSrpUsername = None
self._refCount = 1
def _handshakeDone(self, resumed):
self.resumed = resumed
self.closed = False
def _calcPendingStates(self, cipherSuite, masterSecret,
clientRandom, serverRandom, implementations):
if cipherSuite in CipherSuite.aes128Suites:
keyLength = 16
ivLength = 16
createCipherFunc = createAES
elif cipherSuite in CipherSuite.aes256Suites:
keyLength = 32
ivLength = 16
createCipherFunc = createAES
elif cipherSuite in CipherSuite.rc4Suites:
keyLength = 16
ivLength = 0
createCipherFunc = createRC4
elif cipherSuite in CipherSuite.tripleDESSuites:
keyLength = 24
ivLength = 8
createCipherFunc = createTripleDES
else:
raise AssertionError()
if cipherSuite in CipherSuite.shaSuites:
macLength = 20
digestmod = hashlib.sha1
elif cipherSuite in CipherSuite.sha256Suites:
macLength = 32
digestmod = hashlib.sha256
elif cipherSuite in CipherSuite.md5Suites:
macLength = 16
digestmod = hashlib.md5
if self.version == (3,0):
createMACFunc = createMAC_SSL
elif self.version in ((3,1), (3,2), (3,3)):
createMACFunc = createHMAC
outputLength = (macLength*2) + (keyLength*2) + (ivLength*2)
#Calculate Keying Material from Master Secret
if self.version == (3,0):
keyBlock = PRF_SSL(masterSecret,
serverRandom + clientRandom,
outputLength)
elif self.version in ((3,1), (3,2)):
keyBlock = PRF(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
elif self.version == (3,3):
keyBlock = PRF_1_2(masterSecret,
b"key expansion",
serverRandom + clientRandom,
outputLength)
else:
raise AssertionError()
#Slice up Keying Material
clientPendingState = _ConnectionState()
serverPendingState = _ConnectionState()
p = Parser(keyBlock)
clientMACBlock = p.getFixBytes(macLength)
serverMACBlock = p.getFixBytes(macLength)
clientKeyBlock = p.getFixBytes(keyLength)
serverKeyBlock = p.getFixBytes(keyLength)
clientIVBlock = p.getFixBytes(ivLength)
serverIVBlock = p.getFixBytes(ivLength)
clientPendingState.macContext = createMACFunc(
compatHMAC(clientMACBlock), digestmod=digestmod)
serverPendingState.macContext = createMACFunc(
compatHMAC(serverMACBlock), digestmod=digestmod)
clientPendingState.encContext = createCipherFunc(clientKeyBlock,
clientIVBlock,
implementations)
serverPendingState.encContext = createCipherFunc(serverKeyBlock,
serverIVBlock,
implementations)
#Assign new connection states to pending states
if self._client:
self._pendingWriteState = clientPendingState
self._pendingReadState = serverPendingState
else:
self._pendingWriteState = serverPendingState
self._pendingReadState = clientPendingState
if self.version >= (3,2) and ivLength:
#Choose fixedIVBlock for TLS 1.1 (this is encrypted with the CBC
#residue to create the IV for each sent block)
self.fixedIVBlock = getRandomBytes(ivLength)
def _changeWriteState(self):
self._writeState = self._pendingWriteState
self._pendingWriteState = _ConnectionState()
def _changeReadState(self):
self._readState = self._pendingReadState
self._pendingReadState = _ConnectionState()
#Used for Finished messages and CertificateVerify messages in SSL v3
def _calcSSLHandshakeHash(self, masterSecret, label):
imac_md5 = self._handshake_md5.copy()
imac_sha = self._handshake_sha.copy()
imac_md5.update(compatHMAC(label + masterSecret + bytearray([0x36]*48)))
imac_sha.update(compatHMAC(label + masterSecret + bytearray([0x36]*40)))
md5Bytes = MD5(masterSecret + bytearray([0x5c]*48) + \
bytearray(imac_md5.digest()))
shaBytes = SHA1(masterSecret + bytearray([0x5c]*40) + \
bytearray(imac_sha.digest()))
return md5Bytes + shaBytes
|
1582_0
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import hashlib
import hmac
import json
import os
import subprocess
import time
import urllib
import passlib.hash
from keystone.common import logging
from keystone import config
from keystone import exception
CONF = config.CONF
config.register_int('crypt_strength', default=40000)
LOG = logging.getLogger(__name__)
MAX_PASSWORD_LENGTH = 4096
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
class SmarterEncoder(json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
return super(SmarterEncoder, self).default(obj)
class Ec2Signer(object):
"""Hacked up code from boto/connection.py"""
def __init__(self, secret_key):
secret_key = secret_key.encode()
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def generate(self, credentials):
"""Generate auth string according to what SignatureVersion is given."""
if credentials['params']['SignatureVersion'] == '0':
return self._calc_signature_0(credentials['params'])
if credentials['params']['SignatureVersion'] == '1':
return self._calc_signature_1(credentials['params'])
if credentials['params']['SignatureVersion'] == '2':
return self._calc_signature_2(credentials['params'],
credentials['verb'],
credentials['host'],
credentials['path'])
raise Exception(_('Unknown Signature Version: %s') %
credentials['params']['SignatureVersion'])
@staticmethod
def _get_utf8_value(value):
"""Get the UTF8-encoded version of a value."""
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def _calc_signature_0(self, params):
"""Generate AWS signature version 0 string."""
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
"""Generate AWS signature version 1 string."""
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
for key in keys:
self.hmac.update(key)
val = self._get_utf8_value(params[key])
self.hmac.update(val)
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
LOG.debug(_('using _calc_signature_2'))
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
current_hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
LOG.debug(_('query string: %s'), qs)
string_to_sign += qs
LOG.debug(_('string_to_sign: %s'), string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
LOG.debug(_('len(b64)=%d'), len(b64))
LOG.debug(_('base64 encoded digest: %s'), b64)
return b64
def trunc_password(password):
"""Truncate passwords to the MAX_PASSWORD_LENGTH."""
try:
if len(password) > MAX_PASSWORD_LENGTH:
return password[:MAX_PASSWORD_LENGTH]
else:
return password
except TypeError:
raise exception.ValidationError(attribute='string', target='password')
def hash_user_password(user):
"""Hash a user dict's password without modifying the passed-in dict"""
try:
password = user['password']
except KeyError:
return user
else:
return dict(user, password=hash_password(password))
def hash_ldap_user_password(user):
"""Hash a user dict's password without modifying the passed-in dict"""
try:
password = user['password']
except KeyError:
return user
else:
return dict(user, password=ldap_hash_password(password))
def hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
if passlib.hash.sha512_crypt.identify(password_utf8):
return password_utf8
h = passlib.hash.sha512_crypt.encrypt(password_utf8,
rounds=CONF.crypt_strength)
return h
def ldap_hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
h = passlib.hash.ldap_salted_sha1.encrypt(password_utf8)
return h
def ldap_check_password(password, hashed):
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.ldap_salted_sha1.verify(password_utf8, hashed)
def check_password(password, hashed):
"""Check that a plaintext password matches hashed.
hashpw returns the salt value concatenated with the actual hash value.
It extracts the actual salt if this value is then passed as the salt.
"""
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
# From python 2.7
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(['ls', '-l', '/dev/null'])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(['/bin/sh', '-c',
... 'ls -l non_existent_file ; exit 0'],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
LOG.debug(' '.join(popenargs[0]))
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get('args')
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def git(*args):
return check_output(['git'] + list(args))
def unixtime(dt_obj):
"""Format datetime object as unix timestamp
:param dt_obj: datetime.datetime object
:returns: float
"""
return time.mktime(dt_obj.utctimetuple())
def auth_str_equal(provided, known):
"""Constant-time string comparison.
:params provided: the first string
:params known: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. When using the function for this purpose, always
provide the user-provided password as the first argument. The time this
function will take is always a factor of the length of this string.
"""
result = 0
p_len = len(provided)
k_len = len(known)
for i in xrange(p_len):
a = ord(provided[i]) if i < p_len else 0
b = ord(known[i]) if i < k_len else 0
result |= a ^ b
return (p_len == k_len) & (result == 0)
def hash_signed_token(signed_text):
hash_ = hashlib.md5()
hash_.update(signed_text)
return hash_.hexdigest()
def setup_remote_pydev_debug():
if CONF.pydev_debug_host and CONF.pydev_debug_port:
error_msg = ('Error setting up the debug environment. Verify that the'
' option --debug-url has the format <host>:<port> and '
'that a debugger processes is listening on that port.')
try:
from pydev import pydevd
pydevd.settrace(CONF.pydev_debug_host,
port=CONF.pydev_debug_port,
stdoutToServer=True,
stderrToServer=True)
return True
except:
LOG.exception(_(error_msg))
raise
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 - 2012 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import hashlib
import hmac
import json
import os
import subprocess
import time
import urllib
import passlib.hash
from keystone.common import logging
from keystone import config
from keystone import exception
CONF = config.CONF
config.register_int('crypt_strength', default=40000)
LOG = logging.getLogger(__name__)
MAX_PASSWORD_LENGTH = 4096
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
class SmarterEncoder(json.JSONEncoder):
"""Help for JSON encoding dict-like objects."""
def default(self, obj):
if not isinstance(obj, dict) and hasattr(obj, 'iteritems'):
return dict(obj.iteritems())
return super(SmarterEncoder, self).default(obj)
class Ec2Signer(object):
"""Hacked up code from boto/connection.py"""
def __init__(self, secret_key):
secret_key = secret_key.encode()
self.hmac = hmac.new(secret_key, digestmod=hashlib.sha1)
if hashlib.sha256:
self.hmac_256 = hmac.new(secret_key, digestmod=hashlib.sha256)
def generate(self, credentials):
"""Generate auth string according to what SignatureVersion is given."""
if credentials['params']['SignatureVersion'] == '0':
return self._calc_signature_0(credentials['params'])
if credentials['params']['SignatureVersion'] == '1':
return self._calc_signature_1(credentials['params'])
if credentials['params']['SignatureVersion'] == '2':
return self._calc_signature_2(credentials['params'],
credentials['verb'],
credentials['host'],
credentials['path'])
raise Exception(_('Unknown Signature Version: %s') %
credentials['params']['SignatureVersion'])
@staticmethod
def _get_utf8_value(value):
"""Get the UTF8-encoded version of a value."""
if not isinstance(value, str) and not isinstance(value, unicode):
value = str(value)
if isinstance(value, unicode):
return value.encode('utf-8')
else:
return value
def _calc_signature_0(self, params):
"""Generate AWS signature version 0 string."""
s = params['Action'] + params['Timestamp']
self.hmac.update(s)
return base64.b64encode(self.hmac.digest())
def _calc_signature_1(self, params):
"""Generate AWS signature version 1 string."""
keys = params.keys()
keys.sort(cmp=lambda x, y: cmp(x.lower(), y.lower()))
for key in keys:
self.hmac.update(key)
val = self._get_utf8_value(params[key])
self.hmac.update(val)
return base64.b64encode(self.hmac.digest())
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
LOG.debug(_('using _calc_signature_2'))
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
params['SignatureMethod'] = 'HmacSHA256'
else:
current_hmac = self.hmac
params['SignatureMethod'] = 'HmacSHA1'
keys = params.keys()
keys.sort()
pairs = []
for key in keys:
val = self._get_utf8_value(params[key])
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
LOG.debug(_('query string: %s'), qs)
string_to_sign += qs
LOG.debug(_('string_to_sign: %s'), string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
LOG.debug(_('len(b64)=%d'), len(b64))
LOG.debug(_('base64 encoded digest: %s'), b64)
return b64
def trunc_password(password):
"""Truncate passwords to the MAX_PASSWORD_LENGTH."""
try:
if len(password) > MAX_PASSWORD_LENGTH:
return password[:MAX_PASSWORD_LENGTH]
else:
return password
except TypeError:
raise exception.ValidationError(attribute='string', target='password')
def hash_user_password(user):
"""Hash a user dict's password without modifying the passed-in dict"""
try:
password = user['password']
except KeyError:
return user
else:
return dict(user, password=hash_password(password))
def hash_ldap_user_password(user):
"""Hash a user dict's password without modifying the passed-in dict"""
try:
password = user['password']
except KeyError:
return user
else:
return dict(user, password=ldap_hash_password(password))
def hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
if passlib.hash.sha512_crypt.identify(password_utf8):
return password_utf8
h = passlib.hash.sha512_crypt.encrypt(password_utf8,
rounds=CONF.crypt_strength)
return h
def ldap_hash_password(password):
"""Hash a password. Hard."""
password_utf8 = trunc_password(password).encode('utf-8')
h = passlib.hash.ldap_salted_sha1.encrypt(password_utf8)
return h
def ldap_check_password(password, hashed):
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.ldap_salted_sha1.verify(password_utf8, hashed)
def check_password(password, hashed):
"""Check that a plaintext password matches hashed.
hashpw returns the salt value concatenated with the actual hash value.
It extracts the actual salt if this value is then passed as the salt.
"""
if password is None:
return False
password_utf8 = trunc_password(password).encode('utf-8')
return passlib.hash.sha512_crypt.verify(password_utf8, hashed)
# From python 2.7
def check_output(*popenargs, **kwargs):
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(['ls', '-l', '/dev/null'])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(['/bin/sh', '-c',
... 'ls -l non_existent_file ; exit 0'],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
LOG.debug(' '.join(popenargs[0]))
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get('args')
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
def git(*args):
return check_output(['git'] + list(args))
def unixtime(dt_obj):
"""Format datetime object as unix timestamp
:param dt_obj: datetime.datetime object
:returns: float
"""
return time.mktime(dt_obj.utctimetuple())
def auth_str_equal(provided, known):
"""Constant-time string comparison.
:params provided: the first string
:params known: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks. When using the function for this purpose, always
provide the user-provided password as the first argument. The time this
function will take is always a factor of the length of this string.
"""
result = 0
p_len = len(provided)
k_len = len(known)
for i in xrange(p_len):
a = ord(provided[i]) if i < p_len else 0
b = ord(known[i]) if i < k_len else 0
result |= a ^ b
return (p_len == k_len) & (result == 0)
def hash_signed_token(signed_text):
hash_ = hashlib.md5()
hash_.update(signed_text)
return hash_.hexdigest()
def setup_remote_pydev_debug():
if CONF.pydev_debug_host and CONF.pydev_debug_port:
error_msg = ('Error setting up the debug environment. Verify that the'
' option --debug-url has the format <host>:<port> and '
'that a debugger processes is listening on that port.')
try:
from pydev import pydevd
pydevd.settrace(CONF.pydev_debug_host,
port=CONF.pydev_debug_port,
stdoutToServer=True,
stderrToServer=True)
return True
except:
LOG.exception(_(error_msg))
raise
class LimitingReader(object):
"""Reader to limit the size of an incoming request."""
def __init__(self, data, limit):
"""
:param data: Underlying data object
:param limit: maximum number of bytes the reader should allow
"""
self.data = data
self.limit = limit
self.bytes_read = 0
def __iter__(self):
for chunk in self.data:
self.bytes_read += len(chunk)
if self.bytes_read > self.limit:
raise exception.RequestTooLarge()
else:
yield chunk
def read(self, i):
result = self.data.read(i)
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.RequestTooLarge()
return result
def read(self):
result = self.data.read()
self.bytes_read += len(result)
if self.bytes_read > self.limit:
raise exception.RequestTooLarge()
return result
|
5558_1
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import os
import sys
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
CONF = cfg.CONF
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError(_('Unable to locate specified logging '
'config file: %s') % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_list(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.ListOpt(*args, **kw), group=group)
def register_cli_list(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.ListOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
register_cli_bool('standard-threads', default=False)
register_cli_str('pydev-debug-host', default=None)
register_cli_int('pydev-debug-port', default=None)
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port', default=8774)
register_str('admin_port', default=35357)
register_str('public_port', default=5000)
register_str('onready')
register_str('auth_admin_prefix', default='')
register_str('policy_file', default='policy.json')
register_str('policy_default_rule', default=None)
#ssl options
register_bool('enable', group='ssl', default=False)
register_str('certfile', group='ssl', default=None)
register_str('keyfile', group='ssl', default=None)
register_str('ca_certs', group='ssl', default=None)
register_bool('cert_required', group='ssl', default=False)
#signing options
register_str('token_format', group='signing',
default="PKI")
register_str('certfile', group='signing',
default="/etc/keystone/ssl/certs/signing_cert.pem")
register_str('keyfile', group='signing',
default="/etc/keystone/ssl/private/signing_key.pem")
register_str('ca_certs', group='signing',
default="/etc/keystone/ssl/certs/ca.pem")
register_int('key_size', group='signing', default=1024)
register_int('valid_days', group='signing', default=3650)
register_str('ca_password', group='signing', default=None)
# sql options
register_str('connection', group='sql', default='sqlite:///keystone.db')
register_int('idle_timeout', group='sql', default=200)
register_str('driver', group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str('driver', group='identity',
default='keystone.identity.backends.sql.Identity')
register_str('driver', group='policy',
default='keystone.policy.backends.sql.Policy')
register_str('driver', group='token',
default='keystone.token.backends.kvs.Token')
register_str('driver', group='ec2',
default='keystone.contrib.ec2.backends.kvs.Ec2')
register_str('driver', group='stats',
default='keystone.contrib.stats.backends.kvs.Stats')
#ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default='dc=Manager,dc=example,dc=com')
register_str('password', group='ldap', default='freeipa4all')
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('dumb_member', group='ldap', default='cn=dumb,dc=nonexistent')
register_bool('allow_subtree_delete', group='ldap', default=False)
register_str('user_tree_dn', group='ldap', default=None)
register_str('user_filter', group='ldap', default=None)
register_str('user_objectclass', group='ldap', default='inetOrgPerson')
register_str('user_id_attribute', group='ldap', default='cn')
register_str('user_name_attribute', group='ldap', default='sn')
register_str('user_mail_attribute', group='ldap', default='email')
register_str('user_pass_attribute', group='ldap', default='userPassword')
register_str('user_enabled_attribute', group='ldap', default='enabled')
register_int('user_enabled_mask', group='ldap', default=0)
register_str('user_enabled_default', group='ldap', default='True')
register_list('user_attribute_ignore', group='ldap',
default='tenant_id,tenants')
register_bool('user_allow_create', group='ldap', default=True)
register_bool('user_allow_update', group='ldap', default=True)
register_bool('user_allow_delete', group='ldap', default=True)
register_str('tenant_tree_dn', group='ldap', default=None)
register_str('tenant_filter', group='ldap', default=None)
register_str('tenant_objectclass', group='ldap', default='groupOfNames')
register_str('tenant_id_attribute', group='ldap', default='cn')
register_str('tenant_member_attribute', group='ldap', default='member')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('tenant_desc_attribute', group='ldap', default='desc')
register_str('tenant_enabled_attribute', group='ldap', default='enabled')
register_list('tenant_attribute_ignore', group='ldap', default='')
register_bool('tenant_allow_create', group='ldap', default=True)
register_bool('tenant_allow_update', group='ldap', default=True)
register_bool('tenant_allow_delete', group='ldap', default=True)
register_str('role_tree_dn', group='ldap', default=None)
register_str('role_filter', group='ldap', default=None)
register_str('role_objectclass', group='ldap', default='organizationalRole')
register_str('role_id_attribute', group='ldap', default='cn')
register_str('role_name_attribute', group='ldap', default='ou')
register_str('role_member_attribute', group='ldap', default='roleOccupant')
register_list('role_attribute_ignore', group='ldap', default='')
register_bool('role_allow_create', group='ldap', default=True)
register_bool('role_allow_update', group='ldap', default=True)
register_bool('role_allow_delete', group='ldap', default=True)
register_str('group_tree_dn', group='ldap', default=None)
register_str('group_filter', group='ldap', default=None)
register_str('group_objectclass', group='ldap', default='groupOfNames')
register_str('group_id_attribute', group='ldap', default='cn')
register_str('group_name_attribute', group='ldap', default='ou')
register_str('group_member_attribute', group='ldap', default='member')
register_str('group_desc_attribute', group='ldap', default='desc')
register_list('group_attribute_ignore', group='ldap', default='')
register_bool('group_allow_create', group='ldap', default=True)
register_bool('group_allow_update', group='ldap', default=True)
register_bool('group_allow_delete', group='ldap', default=True)
#pam
register_str('url', group='pam', default=None)
register_str('userid', group='pam', default=None)
register_str('password', group='pam', default=None)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import os
import sys
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
CONF = cfg.CONF
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError(_('Unable to locate specified logging '
'config file: %s') % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_list(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.ListOpt(*args, **kw), group=group)
def register_cli_list(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.ListOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
register_cli_bool('standard-threads', default=False)
register_cli_str('pydev-debug-host', default=None)
register_cli_int('pydev-debug-port', default=None)
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port', default=8774)
register_str('admin_port', default=35357)
register_str('public_port', default=5000)
register_str('onready')
register_str('auth_admin_prefix', default='')
register_str('policy_file', default='policy.json')
register_str('policy_default_rule', default=None)
#default max request size is 112k
register_int('max_request_body_size', default=114688)
#ssl options
register_bool('enable', group='ssl', default=False)
register_str('certfile', group='ssl', default=None)
register_str('keyfile', group='ssl', default=None)
register_str('ca_certs', group='ssl', default=None)
register_bool('cert_required', group='ssl', default=False)
#signing options
register_str('token_format', group='signing',
default="PKI")
register_str('certfile', group='signing',
default="/etc/keystone/ssl/certs/signing_cert.pem")
register_str('keyfile', group='signing',
default="/etc/keystone/ssl/private/signing_key.pem")
register_str('ca_certs', group='signing',
default="/etc/keystone/ssl/certs/ca.pem")
register_int('key_size', group='signing', default=1024)
register_int('valid_days', group='signing', default=3650)
register_str('ca_password', group='signing', default=None)
# sql options
register_str('connection', group='sql', default='sqlite:///keystone.db')
register_int('idle_timeout', group='sql', default=200)
register_str('driver', group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str('driver', group='identity',
default='keystone.identity.backends.sql.Identity')
register_str('driver', group='policy',
default='keystone.policy.backends.sql.Policy')
register_str('driver', group='token',
default='keystone.token.backends.kvs.Token')
register_str('driver', group='ec2',
default='keystone.contrib.ec2.backends.kvs.Ec2')
register_str('driver', group='stats',
default='keystone.contrib.stats.backends.kvs.Stats')
#ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default='dc=Manager,dc=example,dc=com')
register_str('password', group='ldap', default='freeipa4all')
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('dumb_member', group='ldap', default='cn=dumb,dc=nonexistent')
register_bool('allow_subtree_delete', group='ldap', default=False)
register_str('user_tree_dn', group='ldap', default=None)
register_str('user_filter', group='ldap', default=None)
register_str('user_objectclass', group='ldap', default='inetOrgPerson')
register_str('user_id_attribute', group='ldap', default='cn')
register_str('user_name_attribute', group='ldap', default='sn')
register_str('user_mail_attribute', group='ldap', default='email')
register_str('user_pass_attribute', group='ldap', default='userPassword')
register_str('user_enabled_attribute', group='ldap', default='enabled')
register_int('user_enabled_mask', group='ldap', default=0)
register_str('user_enabled_default', group='ldap', default='True')
register_list('user_attribute_ignore', group='ldap',
default='tenant_id,tenants')
register_bool('user_allow_create', group='ldap', default=True)
register_bool('user_allow_update', group='ldap', default=True)
register_bool('user_allow_delete', group='ldap', default=True)
register_str('tenant_tree_dn', group='ldap', default=None)
register_str('tenant_filter', group='ldap', default=None)
register_str('tenant_objectclass', group='ldap', default='groupOfNames')
register_str('tenant_id_attribute', group='ldap', default='cn')
register_str('tenant_member_attribute', group='ldap', default='member')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('tenant_desc_attribute', group='ldap', default='desc')
register_str('tenant_enabled_attribute', group='ldap', default='enabled')
register_list('tenant_attribute_ignore', group='ldap', default='')
register_bool('tenant_allow_create', group='ldap', default=True)
register_bool('tenant_allow_update', group='ldap', default=True)
register_bool('tenant_allow_delete', group='ldap', default=True)
register_str('role_tree_dn', group='ldap', default=None)
register_str('role_filter', group='ldap', default=None)
register_str('role_objectclass', group='ldap', default='organizationalRole')
register_str('role_id_attribute', group='ldap', default='cn')
register_str('role_name_attribute', group='ldap', default='ou')
register_str('role_member_attribute', group='ldap', default='roleOccupant')
register_list('role_attribute_ignore', group='ldap', default='')
register_bool('role_allow_create', group='ldap', default=True)
register_bool('role_allow_update', group='ldap', default=True)
register_bool('role_allow_delete', group='ldap', default=True)
register_str('group_tree_dn', group='ldap', default=None)
register_str('group_filter', group='ldap', default=None)
register_str('group_objectclass', group='ldap', default='groupOfNames')
register_str('group_id_attribute', group='ldap', default='cn')
register_str('group_name_attribute', group='ldap', default='ou')
register_str('group_member_attribute', group='ldap', default='member')
register_str('group_desc_attribute', group='ldap', default='desc')
register_list('group_attribute_ignore', group='ldap', default='')
register_bool('group_allow_create', group='ldap', default=True)
register_bool('group_allow_update', group='ldap', default=True)
register_bool('group_allow_delete', group='ldap', default=True)
#pam
register_str('url', group='pam', default=None)
register_str('userid', group='pam', default=None)
register_str('password', group='pam', default=None)
|
5558_2
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from keystone.common import logging
from keystone import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(StandardError):
"""Base error class.
Child classes should define an HTTP status code, title, and a doc string.
"""
code = None
title = None
def __init__(self, message=None, **kwargs):
"""Use the doc string as the error message by default."""
try:
message = self._build_message(message, **kwargs)
except KeyError as e:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
LOG.warning('missing exception kwargs (programmer error)')
message = self.__doc__
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
return message or self.__doc__ % kwargs
def __str__(self):
"""Cleans up line breaks and indentation from doc strings."""
string = super(Error, self).__str__()
string = re.sub('[ \n]+', ' ', string)
string = string.strip()
return string
class ValidationError(Error):
"""Expecting to find %(attribute)s in %(target)s.
The server could not comply with the request since it is either malformed
or otherwise incorrect.
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class StringLengthExceeded(ValidationError):
"""The length of string "%(string)s" exceeded the limit of column
%(type)s(CHAR(%(length)d))."""
class SecurityError(Error):
"""Avoids exposing details of security failures, unless in debug mode."""
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in debug mode."""
if CONF.debug:
return message or self.__doc__ % kwargs
else:
return self.__doc__ % kwargs
class Unauthorized(SecurityError):
"""The request you have made requires authentication."""
code = 401
title = 'Not Authorized'
class Forbidden(SecurityError):
"""You are not authorized to perform the requested action."""
code = 403
title = 'Not Authorized'
class ForbiddenAction(Forbidden):
"""You are not authorized to perform the requested action: %(action)s"""
class NotFound(Error):
"""Could not find: %(target)s"""
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
"""Could not find endpoint: %(endpoint_id)s"""
class MetadataNotFound(NotFound):
"""An unhandled exception has occurred: Could not find metadata."""
# (dolph): metadata is not a user-facing concept,
# so this exception should not be exposed
class PolicyNotFound(NotFound):
"""Could not find policy: %(policy_id)s"""
class RoleNotFound(NotFound):
"""Could not find role: %(role_id)s"""
class ServiceNotFound(NotFound):
"""Could not find service: %(service_id)s"""
class DomainNotFound(NotFound):
"""Could not find domain: %(domain_id)s"""
class TenantNotFound(NotFound):
"""Could not find tenant: %(tenant_id)s"""
class ProjectNotFound(TenantNotFound):
"""Could not find project: %(project_id)s"""
class TokenNotFound(NotFound):
"""Could not find token: %(token_id)s"""
class UserNotFound(NotFound):
"""Could not find user: %(user_id)s"""
class GroupNotFound(NotFound):
"""Could not find group: %(group_id)s"""
class Conflict(Error):
"""Conflict occurred attempting to store %(type)s.
%(details)s
"""
code = 409
title = 'Conflict'
class UnexpectedError(Error):
"""An unexpected error prevented the server from fulfilling your request.
%(exception)s
"""
code = 500
title = 'Internal Server Error'
class MalformedEndpoint(UnexpectedError):
"""Malformed endpoint URL (see ERROR log for details): %(endpoint)s"""
class NotImplemented(Error):
"""The action you have requested has not been implemented."""
code = 501
title = 'Not Implemented'
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from keystone.common import logging
from keystone import config
CONF = config.CONF
LOG = logging.getLogger(__name__)
# Tests use this to make exception message format errors fatal
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(StandardError):
"""Base error class.
Child classes should define an HTTP status code, title, and a doc string.
"""
code = None
title = None
def __init__(self, message=None, **kwargs):
"""Use the doc string as the error message by default."""
try:
message = self._build_message(message, **kwargs)
except KeyError as e:
# if you see this warning in your logs, please raise a bug report
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
LOG.warning('missing exception kwargs (programmer error)')
message = self.__doc__
super(Error, self).__init__(message)
def _build_message(self, message, **kwargs):
"""Builds and returns an exception message.
:raises: KeyError given insufficient kwargs
"""
return message or self.__doc__ % kwargs
def __str__(self):
"""Cleans up line breaks and indentation from doc strings."""
string = super(Error, self).__str__()
string = re.sub('[ \n]+', ' ', string)
string = string.strip()
return string
class ValidationError(Error):
"""Expecting to find %(attribute)s in %(target)s.
The server could not comply with the request since it is either malformed
or otherwise incorrect.
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class StringLengthExceeded(ValidationError):
"""The length of string "%(string)s" exceeded the limit of column
%(type)s(CHAR(%(length)d))."""
class SecurityError(Error):
"""Avoids exposing details of security failures, unless in debug mode."""
def _build_message(self, message, **kwargs):
"""Only returns detailed messages in debug mode."""
if CONF.debug:
return message or self.__doc__ % kwargs
else:
return self.__doc__ % kwargs
class Unauthorized(SecurityError):
"""The request you have made requires authentication."""
code = 401
title = 'Not Authorized'
class Forbidden(SecurityError):
"""You are not authorized to perform the requested action."""
code = 403
title = 'Not Authorized'
class ForbiddenAction(Forbidden):
"""You are not authorized to perform the requested action: %(action)s"""
class NotFound(Error):
"""Could not find: %(target)s"""
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
"""Could not find endpoint: %(endpoint_id)s"""
class MetadataNotFound(NotFound):
"""An unhandled exception has occurred: Could not find metadata."""
# (dolph): metadata is not a user-facing concept,
# so this exception should not be exposed
class PolicyNotFound(NotFound):
"""Could not find policy: %(policy_id)s"""
class RoleNotFound(NotFound):
"""Could not find role: %(role_id)s"""
class ServiceNotFound(NotFound):
"""Could not find service: %(service_id)s"""
class DomainNotFound(NotFound):
"""Could not find domain: %(domain_id)s"""
class TenantNotFound(NotFound):
"""Could not find tenant: %(tenant_id)s"""
class ProjectNotFound(TenantNotFound):
"""Could not find project: %(project_id)s"""
class TokenNotFound(NotFound):
"""Could not find token: %(token_id)s"""
class UserNotFound(NotFound):
"""Could not find user: %(user_id)s"""
class GroupNotFound(NotFound):
"""Could not find group: %(group_id)s"""
class Conflict(Error):
"""Conflict occurred attempting to store %(type)s.
%(details)s
"""
code = 409
title = 'Conflict'
class RequestTooLarge(Error):
"""Request is too large."""
code = 413
title = 'Request is too large.'
class UnexpectedError(Error):
"""An unexpected error prevented the server from fulfilling your request.
%(exception)s
"""
code = 500
title = 'Internal Server Error'
class MalformedEndpoint(UnexpectedError):
"""Malformed endpoint URL (see ERROR log for details): %(endpoint)s"""
class NotImplemented(Error):
"""The action you have requested has not been implemented."""
code = 501
title = 'Not Implemented'
|
5558_3
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import serializer
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
CONF = config.CONF
# Header used to transmit the auth token
AUTH_TOKEN_HEADER = 'X-Auth-Token'
# Environment variable used to pass the request context
CONTEXT_ENV = wsgi.CONTEXT_ENV
# Environment variable used to pass the request params
PARAMS_ENV = wsgi.PARAMS_ENV
class TokenAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['token_id'] = token
request.environ[CONTEXT_ENV] = context
class AdminTokenAuthMiddleware(wsgi.Middleware):
"""A trivial filter that checks for a pre-defined admin token.
Sets 'is_admin' to true in the context, expected to be checked by
methods that are admin-only.
"""
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['is_admin'] = (token == CONF.admin_token)
request.environ[CONTEXT_ENV] = context
class PostParamsMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as POST parameters.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class JsonBodyMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as serialized JSON.
Accepting arguments as JSON is useful for accepting data that may be more
complex than simple primitives.
In this case we accept it as urlencoded data under the key 'json' as in
json=<urlencoded_json> but this could be extended to accept raw JSON
in the POST body.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
# Abort early if we don't have any work to do
params_json = request.body
if not params_json:
return
# Reject unrecognized content types. Empty string indicates
# the client did not explicitly set the header
if not request.content_type in ('application/json', ''):
e = exception.ValidationError(attribute='application/json',
target='Content-Type header')
return wsgi.render_exception(e)
params_parsed = {}
try:
params_parsed = jsonutils.loads(params_json)
except ValueError:
e = exception.ValidationError(attribute='valid JSON',
target='request body')
return wsgi.render_exception(e)
finally:
if not params_parsed:
params_parsed = {}
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class XmlBodyMiddleware(wsgi.Middleware):
"""De/serializes XML to/from JSON."""
def process_request(self, request):
"""Transform the request from XML to JSON."""
incoming_xml = 'application/xml' in str(request.content_type)
if incoming_xml and request.body:
request.content_type = 'application/json'
request.body = jsonutils.dumps(serializer.from_xml(request.body))
def process_response(self, request, response):
"""Transform the response from JSON to XML."""
outgoing_xml = 'application/xml' in str(request.accept)
if outgoing_xml and response.body:
response.content_type = 'application/xml'
try:
body_obj = jsonutils.loads(response.body)
response.body = serializer.to_xml(body_obj)
except Exception:
raise exception.Error(message=response.body)
return response
class NormalizingFilter(wsgi.Middleware):
"""Middleware filter to handle URL normalization."""
def process_request(self, request):
"""Normalizes URLs."""
# Removes a trailing slash from the given path, if any.
if (len(request.environ['PATH_INFO']) > 1 and
request.environ['PATH_INFO'][-1] == '/'):
request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1]
# Rewrites path to root if no path is given.
elif not request.environ['PATH_INFO']:
request.environ['PATH_INFO'] = '/'
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.dec
from keystone.common import serializer
from keystone.common import utils
from keystone.common import wsgi
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
CONF = config.CONF
# Header used to transmit the auth token
AUTH_TOKEN_HEADER = 'X-Auth-Token'
# Environment variable used to pass the request context
CONTEXT_ENV = wsgi.CONTEXT_ENV
# Environment variable used to pass the request params
PARAMS_ENV = wsgi.PARAMS_ENV
class TokenAuthMiddleware(wsgi.Middleware):
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['token_id'] = token
request.environ[CONTEXT_ENV] = context
class AdminTokenAuthMiddleware(wsgi.Middleware):
"""A trivial filter that checks for a pre-defined admin token.
Sets 'is_admin' to true in the context, expected to be checked by
methods that are admin-only.
"""
def process_request(self, request):
token = request.headers.get(AUTH_TOKEN_HEADER)
context = request.environ.get(CONTEXT_ENV, {})
context['is_admin'] = (token == CONF.admin_token)
request.environ[CONTEXT_ENV] = context
class PostParamsMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as POST parameters.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
params_parsed = request.params
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class JsonBodyMiddleware(wsgi.Middleware):
"""Middleware to allow method arguments to be passed as serialized JSON.
Accepting arguments as JSON is useful for accepting data that may be more
complex than simple primitives.
In this case we accept it as urlencoded data under the key 'json' as in
json=<urlencoded_json> but this could be extended to accept raw JSON
in the POST body.
Filters out the parameters `self`, `context` and anything beginning with
an underscore.
"""
def process_request(self, request):
# Abort early if we don't have any work to do
params_json = request.body
if not params_json:
return
# Reject unrecognized content types. Empty string indicates
# the client did not explicitly set the header
if not request.content_type in ('application/json', ''):
e = exception.ValidationError(attribute='application/json',
target='Content-Type header')
return wsgi.render_exception(e)
params_parsed = {}
try:
params_parsed = jsonutils.loads(params_json)
except ValueError:
e = exception.ValidationError(attribute='valid JSON',
target='request body')
return wsgi.render_exception(e)
finally:
if not params_parsed:
params_parsed = {}
params = {}
for k, v in params_parsed.iteritems():
if k in ('self', 'context'):
continue
if k.startswith('_'):
continue
params[k] = v
request.environ[PARAMS_ENV] = params
class XmlBodyMiddleware(wsgi.Middleware):
"""De/serializes XML to/from JSON."""
def process_request(self, request):
"""Transform the request from XML to JSON."""
incoming_xml = 'application/xml' in str(request.content_type)
if incoming_xml and request.body:
request.content_type = 'application/json'
request.body = jsonutils.dumps(serializer.from_xml(request.body))
def process_response(self, request, response):
"""Transform the response from JSON to XML."""
outgoing_xml = 'application/xml' in str(request.accept)
if outgoing_xml and response.body:
response.content_type = 'application/xml'
try:
body_obj = jsonutils.loads(response.body)
response.body = serializer.to_xml(body_obj)
except Exception:
raise exception.Error(message=response.body)
return response
class NormalizingFilter(wsgi.Middleware):
"""Middleware filter to handle URL normalization."""
def process_request(self, request):
"""Normalizes URLs."""
# Removes a trailing slash from the given path, if any.
if (len(request.environ['PATH_INFO']) > 1 and
request.environ['PATH_INFO'][-1] == '/'):
request.environ['PATH_INFO'] = request.environ['PATH_INFO'][:-1]
# Rewrites path to root if no path is given.
elif not request.environ['PATH_INFO']:
request.environ['PATH_INFO'] = '/'
class RequestBodySizeLimiter(wsgi.Middleware):
"""Limit the size of an incoming request."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if req.content_length > CONF.max_request_body_size:
raise exception.RequestTooLarge()
if req.content_length is None and req.is_body_readable:
limiter = utils.LimitingReader(req.body_file,
CONF.max_request_body_size)
req.body_file = limiter
return self.application
|
5558_4
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import os
import sys
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
CONF = cfg.CONF
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError('Unable to locate specified logging '
'config file: %s' % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port', default=8774)
register_str('admin_port', default=35357)
register_str('public_port', default=5000)
register_str('onready')
register_str('auth_admin_prefix', default='')
#ssl options
register_bool('enable', group='ssl', default=False)
register_str('certfile', group='ssl', default=None)
register_str('keyfile', group='ssl', default=None)
register_str('ca_certs', group='ssl', default=None)
register_bool('cert_required', group='ssl', default=False)
#signing options
register_str('token_format', group='signing',
default="UUID")
register_str('certfile', group='signing',
default="/etc/keystone/ssl/certs/signing_cert.pem")
register_str('keyfile', group='signing',
default="/etc/keystone/ssl/private/signing_key.pem")
register_str('ca_certs', group='signing',
default="/etc/keystone/ssl/certs/ca.pem")
register_int('key_size', group='signing', default=1024)
register_int('valid_days', group='signing', default=3650)
register_str('ca_password', group='signing', default=None)
# sql options
register_str('connection', group='sql', default='sqlite:///keystone.db')
register_int('idle_timeout', group='sql', default=200)
register_str('driver', group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str('driver', group='identity',
default='keystone.identity.backends.sql.Identity')
register_str('driver', group='policy',
default='keystone.policy.backends.rules.Policy')
register_str('driver', group='token',
default='keystone.token.backends.kvs.Token')
register_str('driver', group='ec2',
default='keystone.contrib.ec2.backends.kvs.Ec2')
register_str('driver', group='stats',
default='keystone.contrib.stats.backends.kvs.Stats')
#ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default='dc=Manager,dc=example,dc=com')
register_str('password', group='ldap', default='freeipa4all')
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('user_name_attribute', group='ldap', default='sn')
register_str('user_tree_dn', group='ldap', default=None)
register_str('user_objectclass', group='ldap', default='inetOrgPerson')
register_str('user_id_attribute', group='ldap', default='cn')
register_str('tenant_tree_dn', group='ldap', default=None)
register_str('tenant_objectclass', group='ldap', default='groupOfNames')
register_str('tenant_id_attribute', group='ldap', default='cn')
register_str('tenant_member_attribute', group='ldap', default='member')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('role_tree_dn', group='ldap', default=None)
register_str('role_objectclass', group='ldap', default='organizationalRole')
register_str('role_id_attribute', group='ldap', default='cn')
register_str('role_member_attribute', group='ldap', default='roleOccupant')
#pam
register_str('url', group='pam', default=None)
register_str('userid', group='pam', default=None)
register_str('password', group='pam', default=None)
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import os
import sys
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
CONF = cfg.CONF
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError('Unable to locate specified logging '
'config file: %s' % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = kw.pop('group', None)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port', default=8774)
register_str('admin_port', default=35357)
register_str('public_port', default=5000)
register_str('onready')
register_str('auth_admin_prefix', default='')
register_int('max_param_size', default=64)
# we allow tokens to be a bit larger to accomidate PKI
register_int('max_token_size', default=8192)
#ssl options
register_bool('enable', group='ssl', default=False)
register_str('certfile', group='ssl', default=None)
register_str('keyfile', group='ssl', default=None)
register_str('ca_certs', group='ssl', default=None)
register_bool('cert_required', group='ssl', default=False)
#signing options
register_str('token_format', group='signing',
default="UUID")
register_str('certfile', group='signing',
default="/etc/keystone/ssl/certs/signing_cert.pem")
register_str('keyfile', group='signing',
default="/etc/keystone/ssl/private/signing_key.pem")
register_str('ca_certs', group='signing',
default="/etc/keystone/ssl/certs/ca.pem")
register_int('key_size', group='signing', default=1024)
register_int('valid_days', group='signing', default=3650)
register_str('ca_password', group='signing', default=None)
# sql options
register_str('connection', group='sql', default='sqlite:///keystone.db')
register_int('idle_timeout', group='sql', default=200)
register_str('driver', group='catalog',
default='keystone.catalog.backends.sql.Catalog')
register_str('driver', group='identity',
default='keystone.identity.backends.sql.Identity')
register_str('driver', group='policy',
default='keystone.policy.backends.rules.Policy')
register_str('driver', group='token',
default='keystone.token.backends.kvs.Token')
register_str('driver', group='ec2',
default='keystone.contrib.ec2.backends.kvs.Ec2')
register_str('driver', group='stats',
default='keystone.contrib.stats.backends.kvs.Stats')
#ldap
register_str('url', group='ldap', default='ldap://localhost')
register_str('user', group='ldap', default='dc=Manager,dc=example,dc=com')
register_str('password', group='ldap', default='freeipa4all')
register_str('suffix', group='ldap', default='cn=example,cn=com')
register_bool('use_dumb_member', group='ldap', default=False)
register_str('user_name_attribute', group='ldap', default='sn')
register_str('user_tree_dn', group='ldap', default=None)
register_str('user_objectclass', group='ldap', default='inetOrgPerson')
register_str('user_id_attribute', group='ldap', default='cn')
register_str('tenant_tree_dn', group='ldap', default=None)
register_str('tenant_objectclass', group='ldap', default='groupOfNames')
register_str('tenant_id_attribute', group='ldap', default='cn')
register_str('tenant_member_attribute', group='ldap', default='member')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('role_tree_dn', group='ldap', default=None)
register_str('role_objectclass', group='ldap', default='organizationalRole')
register_str('role_id_attribute', group='ldap', default='cn')
register_str('role_member_attribute', group='ldap', default='roleOccupant')
#pam
register_str('url', group='pam', default=None)
register_str('userid', group='pam', default=None)
register_str('password', group='pam', default=None)
|
5559_0
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
class Error(StandardError):
"""Base error class.
Child classes should define an HTTP status code, title, and a doc string.
"""
code = None
title = None
def __init__(self, message=None, **kwargs):
"""Use the doc string as the error message by default."""
message = message or self.__doc__ % kwargs
super(Error, self).__init__(message)
def __str__(self):
"""Cleans up line breaks and indentation from doc strings."""
string = super(Error, self).__str__()
string = re.sub('[ \n]+', ' ', string)
string = string.strip()
return string
class ValidationError(Error):
"""Expecting to find %(attribute)s in %(target)s.
The server could not comply with the request since it is either malformed
or otherwise incorrect.
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class Unauthorized(Error):
"""The request you have made requires authentication."""
code = 401
title = 'Not Authorized'
class Forbidden(Error):
"""You are not authorized to perform the requested action."""
code = 403
title = 'Not Authorized'
class ForbiddenAction(Forbidden):
"""You are not authorized to perform the requested action: %(action)s"""
class NotFound(Error):
"""Could not find: %(target)s"""
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
"""Could not find endpoint: %(endpoint_id)s"""
class MetadataNotFound(NotFound):
"""An unhandled exception has occurred: Could not find metadata."""
# (dolph): metadata is not a user-facing concept,
# so this exception should not be exposed
class RoleNotFound(NotFound):
"""Could not find role: %(role_id)s"""
class ServiceNotFound(NotFound):
"""Could not find service: %(service_id)s"""
class TenantNotFound(NotFound):
"""Could not find tenant: %(tenant_id)s"""
class TokenNotFound(NotFound):
"""Could not find token: %(token_id)s"""
class UserNotFound(NotFound):
"""Could not find user: %(user_id)s"""
class Conflict(Error):
"""Conflict occurred attempting to store %(type)s.
%(details)s
"""
code = 409
title = 'Conflict'
class UnexpectedError(Error):
"""An unexpected error prevented the server from fulfilling your request.
%(exception)s
"""
code = 500
title = 'Internal Server Error'
class MalformedEndpoint(UnexpectedError):
"""Malformed endpoint URL (see ERROR log for details): %(endpoint)s"""
class NotImplemented(Error):
"""The action you have requested has not been implemented."""
code = 501
title = 'Not Implemented'
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
class Error(StandardError):
"""Base error class.
Child classes should define an HTTP status code, title, and a doc string.
"""
code = None
title = None
def __init__(self, message=None, **kwargs):
"""Use the doc string as the error message by default."""
message = message or self.__doc__ % kwargs
super(Error, self).__init__(message)
def __str__(self):
"""Cleans up line breaks and indentation from doc strings."""
string = super(Error, self).__str__()
string = re.sub('[ \n]+', ' ', string)
string = string.strip()
return string
class ValidationError(Error):
"""Expecting to find %(attribute)s in %(target)s.
The server could not comply with the request since it is either malformed
or otherwise incorrect.
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class ValidationSizeError(Error):
"""Request attribute %(attribute)s must be less than or equal to %(size)i.
The server could not comply with the request because the attribute
size is invalid (too large).
The client is assumed to be in error.
"""
code = 400
title = 'Bad Request'
class Unauthorized(Error):
"""The request you have made requires authentication."""
code = 401
title = 'Not Authorized'
class Forbidden(Error):
"""You are not authorized to perform the requested action."""
code = 403
title = 'Not Authorized'
class ForbiddenAction(Forbidden):
"""You are not authorized to perform the requested action: %(action)s"""
class NotFound(Error):
"""Could not find: %(target)s"""
code = 404
title = 'Not Found'
class EndpointNotFound(NotFound):
"""Could not find endpoint: %(endpoint_id)s"""
class MetadataNotFound(NotFound):
"""An unhandled exception has occurred: Could not find metadata."""
# (dolph): metadata is not a user-facing concept,
# so this exception should not be exposed
class RoleNotFound(NotFound):
"""Could not find role: %(role_id)s"""
class ServiceNotFound(NotFound):
"""Could not find service: %(service_id)s"""
class TenantNotFound(NotFound):
"""Could not find tenant: %(tenant_id)s"""
class TokenNotFound(NotFound):
"""Could not find token: %(token_id)s"""
class UserNotFound(NotFound):
"""Could not find user: %(user_id)s"""
class Conflict(Error):
"""Conflict occurred attempting to store %(type)s.
%(details)s
"""
code = 409
title = 'Conflict'
class UnexpectedError(Error):
"""An unexpected error prevented the server from fulfilling your request.
%(exception)s
"""
code = 500
title = 'Internal Server Error'
class MalformedEndpoint(UnexpectedError):
"""Malformed endpoint URL (see ERROR log for details): %(endpoint)s"""
class NotImplemented(Error):
"""The action you have requested has not been implemented."""
code = 501
title = 'Not Implemented'
|
5559_1
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import routes
import json
from keystone import config
from keystone import catalog
from keystone.common import cms
from keystone.common import logging
from keystone.common import wsgi
from keystone import exception
from keystone import identity
from keystone.openstack.common import timeutils
from keystone import policy
from keystone import token
LOG = logging.getLogger(__name__)
class AdminRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('admin')
mapper.connect('/',
controller=version_controller,
action='get_version')
# Token Operations
auth_controller = TokenController()
mapper.connect('/tokens',
controller=auth_controller,
action='authenticate',
conditions=dict(method=['POST']))
mapper.connect('/tokens/revoked',
controller=auth_controller,
action='revocation_list',
conditions=dict(method=['GET']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='validate_token',
conditions=dict(method=['GET']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='validate_token_head',
conditions=dict(method=['HEAD']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='delete_token',
conditions=dict(method=['DELETE']))
mapper.connect('/tokens/{token_id}/endpoints',
controller=auth_controller,
action='endpoints',
conditions=dict(method=['GET']))
# Certificates used to verify auth tokens
mapper.connect('/certificates/ca',
controller=auth_controller,
action='ca_cert',
conditions=dict(method=['GET']))
mapper.connect('/certificates/signing',
controller=auth_controller,
action='signing_cert',
conditions=dict(method=['GET']))
# Miscellaneous Operations
extensions_controller = AdminExtensionsController()
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
identity_router = identity.AdminRouter()
routers = [identity_router]
super(AdminRouter, self).__init__(mapper, routers)
class PublicRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('public')
mapper.connect('/',
controller=version_controller,
action='get_version')
# Token Operations
auth_controller = TokenController()
mapper.connect('/tokens',
controller=auth_controller,
action='authenticate',
conditions=dict(method=['POST']))
mapper.connect('/certificates/ca',
controller=auth_controller,
action='ca_cert',
conditions=dict(method=['GET']))
mapper.connect('/certificates/signing',
controller=auth_controller,
action='signing_cert',
conditions=dict(method=['GET']))
# Miscellaneous
extensions_controller = PublicExtensionsController()
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
identity_router = identity.PublicRouter()
routers = [identity_router]
super(PublicRouter, self).__init__(mapper, routers)
class PublicVersionRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('public')
mapper.connect('/',
controller=version_controller,
action='get_versions')
routers = []
super(PublicVersionRouter, self).__init__(mapper, routers)
class AdminVersionRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('admin')
mapper.connect('/',
controller=version_controller,
action='get_versions')
routers = []
super(AdminVersionRouter, self).__init__(mapper, routers)
class VersionController(wsgi.Application):
def __init__(self, version_type):
self.catalog_api = catalog.Manager()
self.url_key = '%sURL' % version_type
super(VersionController, self).__init__()
def _get_identity_url(self, context):
catalog_ref = self.catalog_api.get_catalog(context=context,
user_id=None,
tenant_id=None)
for region, region_ref in catalog_ref.iteritems():
for service, service_ref in region_ref.iteritems():
if service == 'identity':
return service_ref[self.url_key]
raise exception.NotImplemented()
def _get_versions_list(self, context):
"""The list of versions is dependent on the context."""
identity_url = self._get_identity_url(context)
if not identity_url.endswith('/'):
identity_url = identity_url + '/'
versions = {}
versions['v2.0'] = {
'id': 'v2.0',
'status': 'beta',
'updated': '2011-11-19T00:00:00Z',
'links': [
{
'rel': 'self',
'href': identity_url,
}, {
'rel': 'describedby',
'type': 'text/html',
'href': 'http://docs.openstack.org/api/openstack-'
'identity-service/2.0/content/'
}, {
'rel': 'describedby',
'type': 'application/pdf',
'href': 'http://docs.openstack.org/api/openstack-'
'identity-service/2.0/identity-dev-guide-'
'2.0.pdf'
}
],
'media-types': [
{
'base': 'application/json',
'type': 'application/vnd.openstack.identity-v2.0'
'+json'
}, {
'base': 'application/xml',
'type': 'application/vnd.openstack.identity-v2.0'
'+xml'
}
]
}
return versions
def get_versions(self, context):
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
'values': versions.values()
}
})
def get_version(self, context):
versions = self._get_versions_list(context)
return wsgi.render_response(body={
'version': versions['v2.0']
})
class NoopController(wsgi.Application):
def __init__(self):
super(NoopController, self).__init__()
def noop(self, context):
return {}
class TokenController(wsgi.Application):
def __init__(self):
self.catalog_api = catalog.Manager()
self.identity_api = identity.Manager()
self.token_api = token.Manager()
self.policy_api = policy.Manager()
super(TokenController, self).__init__()
def ca_cert(self, context, auth=None):
ca_file = open(config.CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
def signing_cert(self, context, auth=None):
cert_file = open(config.CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if 'passwordCredentials' in auth:
user_id = auth['passwordCredentials'].get('userId', None)
username = auth['passwordCredentials'].get('username', '')
password = auth['passwordCredentials'].get('password', '')
tenant_name = auth.get('tenantName', None)
if username:
try:
user_ref = self.identity_api.get_user_by_name(
context=context, user_name=username)
user_id = user_ref['id']
except exception.UserNotFound:
raise exception.Unauthorized()
# more compat
tenant_id = auth.get('tenantId', None)
if tenant_name:
try:
tenant_ref = self.identity_api.get_tenant_by_name(
context=context, tenant_name=tenant_name)
tenant_id = tenant_ref['id']
except exception.TenantNotFound:
raise exception.Unauthorized()
try:
auth_info = self.identity_api.authenticate(context=context,
user_id=user_id,
password=password,
tenant_id=tenant_id)
(user_ref, tenant_ref, metadata_ref) = auth_info
# If the user is disabled don't allow them to authenticate
if not user_ref.get('enabled', True):
LOG.warning('User %s is disabled' % user_id)
raise exception.Unauthorized()
# If the tenant is disabled don't allow them to authenticate
if tenant_ref and not tenant_ref.get('enabled', True):
LOG.warning('Tenant %s is disabled' % tenant_id)
raise exception.Unauthorized()
except AssertionError as e:
raise exception.Unauthorized(e.message)
auth_token_data = dict(zip(['user', 'tenant', 'metadata'],
auth_info))
expiry = self.token_api._get_default_expire_time(context=context)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
else:
catalog_ref = {}
elif 'token' in auth:
old_token = auth['token'].get('id', None)
tenant_name = auth.get('tenantName')
try:
old_token_ref = self.token_api.get_token(context=context,
token_id=old_token)
except exception.NotFound:
LOG.warning("Token not found: " + str(old_token))
raise exception.Unauthorized()
user_ref = old_token_ref['user']
user_id = user_ref['id']
current_user_ref = self.identity_api.get_user(context=context,
user_id=user_id)
# If the user is disabled don't allow them to authenticate
if not current_user_ref.get('enabled', True):
LOG.warning('User %s is disabled' % user_id)
raise exception.Unauthorized()
if tenant_name:
tenant_ref = self.identity_api.get_tenant_by_name(
context=context,
tenant_name=tenant_name)
tenant_id = tenant_ref['id']
else:
tenant_id = auth.get('tenantId', None)
tenants = self.identity_api.get_tenants_for_user(context, user_id)
if tenant_id:
if not tenant_id in tenants:
LOG.warning('User %s is authorized for tenant %s'
% (user_id, tenant_id))
raise exception.Unauthorized()
expiry = old_token_ref['expires']
try:
tenant_ref = self.identity_api.get_tenant(context=context,
tenant_id=tenant_id)
except exception.TenantNotFound:
tenant_ref = None
metadata_ref = {}
catalog_ref = {}
except exception.MetadataNotFound:
metadata_ref = {}
catalog_ref = {}
# If the tenant is disabled don't allow them to authenticate
if tenant_ref and not tenant_ref.get('enabled', True):
LOG.warning('Tenant %s is disabled' % tenant_id)
raise exception.Unauthorized()
if tenant_ref:
metadata_ref = self.identity_api.get_metadata(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'])
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
auth_token_data = dict(dict(user=current_user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
auth_token_data['expires'] = expiry
auth_token_data['id'] = 'placeholder'
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.identity_api.get_role(context, role_id)
roles_ref.append(dict(name=role_ref['name']))
token_data = self._format_token(auth_token_data, roles_ref)
service_catalog = self._format_catalog(catalog_ref)
token_data['access']['serviceCatalog'] = service_catalog
if config.CONF.signing.token_format == 'UUID':
token_id = uuid.uuid4().hex
elif config.CONF.signing.token_format == 'PKI':
token_id = cms.cms_sign_token(json.dumps(token_data),
config.CONF.signing.certfile,
config.CONF.signing.keyfile)
else:
raise exception.UnexpectedError(
'Invalid value for token_format: %s.'
' Allowed values are PKI or UUID.' %
config.CONF.signing.token_format)
try:
self.token_api.create_token(
context, token_id, dict(key=token_id,
id=token_id,
expires=auth_token_data['expires'],
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
except Exception as e:
# an identical token may have been created already.
# if so, return the token_data as it is also identical
try:
self.token_api.get_token(context=context,
token_id=token_id)
except exception.TokenNotFound:
raise e
token_data['access']['token']['id'] = token_id
return token_data
def _get_token_ref(self, context, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
if cms.is_ans1_token(token_id):
data = json.loads(cms.cms_verify(cms.token_to_cms(token_id),
config.CONF.signing.certfile,
config.CONF.signing.ca_certs))
data['access']['token']['user'] = data['access']['user']
data['access']['token']['metadata'] = data['access']['metadata']
if belongs_to:
assert data['access']['token']['tenant']['id'] == belongs_to
token_ref = data['access']['token']
else:
token_ref = self.token_api.get_token(context=context,
token_id=token_id)
return token_ref
# admin only
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
"""
belongs_to = context['query_string'].get('belongsTo')
assert self._get_token_ref(context, token_id, belongs_to)
# admin only
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
token_ref = self._get_token_ref(context, token_id, belongs_to)
# TODO(termie): optimize this call at some point and put it into the
# the return for metadata
# fill out the roles in the metadata
metadata_ref = token_ref['metadata']
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.identity_api.get_role(context, role_id))
# Get a service catalog if possible
# This is needed for on-behalf-of requests
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=token_ref['user']['id'],
tenant_id=token_ref['tenant']['id'],
metadata=metadata_ref)
return self._format_token(token_ref, roles_ref, catalog_ref)
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_api.delete_token(context=context, token_id=token_id)
def revocation_list(self, context, auth=None):
self.assert_admin(context)
tokens = self.token_api.list_revoked_tokens(context)
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, unicode)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = json.dumps(data)
signed_text = cms.cms_sign_text(json_data,
config.CONF.signing.certfile,
config.CONF.signing.keyfile)
return {'signed': signed_text}
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(context, token_id)
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=token_ref['user']['id'],
tenant_id=token_ref['tenant']['id'],
metadata=token_ref['metadata'])
return self._format_endpoint_list(catalog_ref)
def _format_authenticate(self, token_ref, roles_ref, catalog_ref):
o = self._format_token(token_ref, roles_ref)
o['access']['serviceCatalog'] = self._format_catalog(catalog_ref)
return o
def _format_token(self, token_ref, roles_ref, catalog_ref=None):
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
expires = token_ref['expires']
if expires is not None:
if not isinstance(expires, unicode):
expires = timeutils.isotime(expires)
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = self._format_catalog(catalog_ref)
if metadata_ref:
if 'is_admin' in metadata_ref:
o['access']['metadata'] = {'is_admin':
metadata_ref['is_admin']}
else:
o['access']['metadata'] = {'is_admin': 0}
if 'roles' in metadata_ref:
o['access']['metadata']['roles'] = metadata_ref['roles']
return o
def _format_catalog(self, catalog_ref):
"""Munge catalogs from internal to output format
Internal catalogs look like:
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return []
services = {}
for region, region_ref in catalog_ref.iteritems():
for service, service_ref in region_ref.iteritems():
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return services.values()
def _format_endpoint_list(self, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in catalog_ref.iteritems():
for service_type, service_ref in region_ref.iteritems():
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
class ExtensionsController(wsgi.Application):
"""Base extensions controller to be extended by public and admin API's."""
def __init__(self, extensions=None):
super(ExtensionsController, self).__init__()
self.extensions = extensions or {}
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
def get_extension_info(self, context, extension_alias):
try:
return {'extension': self.extensions[extension_alias]}
except KeyError:
raise exception.NotFound(target=extension_alias)
class PublicExtensionsController(ExtensionsController):
pass
class AdminExtensionsController(ExtensionsController):
def __init__(self, *args, **kwargs):
super(AdminExtensionsController, self).__init__(*args, **kwargs)
# TODO(dolph): Extensions should obviously provide this information
# themselves, but hardcoding it here allows us to match
# the API spec in the short term with minimal complexity.
self.extensions['OS-KSADM'] = {
'name': 'Openstack Keystone Admin',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-KSADM/v1.0',
'alias': 'OS-KSADM',
'updated': '2011-08-19T13:25:27-06:00',
'description': 'Openstack extensions to Keystone v2.0 API '
'enabling Admin Operations.',
'links': [
{
'rel': 'describedby',
# TODO(dolph): link needs to be revised after
# bug 928059 merges
'type': 'text/html',
'href': 'https://github.com/openstack/identity-api',
}
]
}
@logging.fail_gracefully
def public_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return PublicRouter()
@logging.fail_gracefully
def admin_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AdminRouter()
@logging.fail_gracefully
def public_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return PublicVersionRouter()
@logging.fail_gracefully
def admin_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AdminVersionRouter()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import routes
import json
from keystone import config
from keystone import catalog
from keystone.common import cms
from keystone.common import logging
from keystone.common import utils
from keystone.common import wsgi
from keystone import exception
from keystone import identity
from keystone.openstack.common import timeutils
from keystone import policy
from keystone import token
LOG = logging.getLogger(__name__)
MAX_PARAM_SIZE = config.CONF.max_param_size
MAX_TOKEN_SIZE = config.CONF.max_token_size
class AdminRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('admin')
mapper.connect('/',
controller=version_controller,
action='get_version')
# Token Operations
auth_controller = TokenController()
mapper.connect('/tokens',
controller=auth_controller,
action='authenticate',
conditions=dict(method=['POST']))
mapper.connect('/tokens/revoked',
controller=auth_controller,
action='revocation_list',
conditions=dict(method=['GET']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='validate_token',
conditions=dict(method=['GET']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='validate_token_head',
conditions=dict(method=['HEAD']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='delete_token',
conditions=dict(method=['DELETE']))
mapper.connect('/tokens/{token_id}/endpoints',
controller=auth_controller,
action='endpoints',
conditions=dict(method=['GET']))
# Certificates used to verify auth tokens
mapper.connect('/certificates/ca',
controller=auth_controller,
action='ca_cert',
conditions=dict(method=['GET']))
mapper.connect('/certificates/signing',
controller=auth_controller,
action='signing_cert',
conditions=dict(method=['GET']))
# Miscellaneous Operations
extensions_controller = AdminExtensionsController()
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
identity_router = identity.AdminRouter()
routers = [identity_router]
super(AdminRouter, self).__init__(mapper, routers)
class PublicRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('public')
mapper.connect('/',
controller=version_controller,
action='get_version')
# Token Operations
auth_controller = TokenController()
mapper.connect('/tokens',
controller=auth_controller,
action='authenticate',
conditions=dict(method=['POST']))
mapper.connect('/certificates/ca',
controller=auth_controller,
action='ca_cert',
conditions=dict(method=['GET']))
mapper.connect('/certificates/signing',
controller=auth_controller,
action='signing_cert',
conditions=dict(method=['GET']))
# Miscellaneous
extensions_controller = PublicExtensionsController()
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
identity_router = identity.PublicRouter()
routers = [identity_router]
super(PublicRouter, self).__init__(mapper, routers)
class PublicVersionRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('public')
mapper.connect('/',
controller=version_controller,
action='get_versions')
routers = []
super(PublicVersionRouter, self).__init__(mapper, routers)
class AdminVersionRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('admin')
mapper.connect('/',
controller=version_controller,
action='get_versions')
routers = []
super(AdminVersionRouter, self).__init__(mapper, routers)
class VersionController(wsgi.Application):
def __init__(self, version_type):
self.catalog_api = catalog.Manager()
self.url_key = '%sURL' % version_type
super(VersionController, self).__init__()
def _get_identity_url(self, context):
catalog_ref = self.catalog_api.get_catalog(context=context,
user_id=None,
tenant_id=None)
for region, region_ref in catalog_ref.iteritems():
for service, service_ref in region_ref.iteritems():
if service == 'identity':
return service_ref[self.url_key]
raise exception.NotImplemented()
def _get_versions_list(self, context):
"""The list of versions is dependent on the context."""
identity_url = self._get_identity_url(context)
if not identity_url.endswith('/'):
identity_url = identity_url + '/'
versions = {}
versions['v2.0'] = {
'id': 'v2.0',
'status': 'beta',
'updated': '2011-11-19T00:00:00Z',
'links': [
{
'rel': 'self',
'href': identity_url,
}, {
'rel': 'describedby',
'type': 'text/html',
'href': 'http://docs.openstack.org/api/openstack-'
'identity-service/2.0/content/'
}, {
'rel': 'describedby',
'type': 'application/pdf',
'href': 'http://docs.openstack.org/api/openstack-'
'identity-service/2.0/identity-dev-guide-'
'2.0.pdf'
}
],
'media-types': [
{
'base': 'application/json',
'type': 'application/vnd.openstack.identity-v2.0'
'+json'
}, {
'base': 'application/xml',
'type': 'application/vnd.openstack.identity-v2.0'
'+xml'
}
]
}
return versions
def get_versions(self, context):
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
'versions': {
'values': versions.values()
}
})
def get_version(self, context):
versions = self._get_versions_list(context)
return wsgi.render_response(body={
'version': versions['v2.0']
})
class NoopController(wsgi.Application):
def __init__(self):
super(NoopController, self).__init__()
def noop(self, context):
return {}
class TokenController(wsgi.Application):
def __init__(self):
self.catalog_api = catalog.Manager()
self.identity_api = identity.Manager()
self.token_api = token.Manager()
self.policy_api = policy.Manager()
super(TokenController, self).__init__()
def ca_cert(self, context, auth=None):
ca_file = open(config.CONF.signing.ca_certs, 'r')
data = ca_file.read()
ca_file.close()
return data
def signing_cert(self, context, auth=None):
cert_file = open(config.CONF.signing.certfile, 'r')
data = cert_file.read()
cert_file.close()
return data
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
if 'passwordCredentials' in auth:
user_id = auth['passwordCredentials'].get('userId', None)
if user_id and len(user_id) > MAX_PARAM_SIZE:
raise exception.ValidationSizeError(attribute='userId',
size=MAX_PARAM_SIZE)
username = auth['passwordCredentials'].get('username', '')
if len(username) > MAX_PARAM_SIZE:
raise exception.ValidationSizeError(attribute='username',
size=MAX_PARAM_SIZE)
password = auth['passwordCredentials'].get('password', '')
max_pw_size = utils.MAX_PASSWORD_LENGTH
if len(password) > max_pw_size:
raise exception.ValidationSizeError(attribute='password',
size=max_pw_size)
tenant_name = auth.get('tenantName', None)
if tenant_name and len(tenant_name) > MAX_PARAM_SIZE:
raise exception.ValidationSizeError(attribute='tenantName',
size=MAX_PARAM_SIZE)
if username:
try:
user_ref = self.identity_api.get_user_by_name(
context=context, user_name=username)
user_id = user_ref['id']
except exception.UserNotFound:
raise exception.Unauthorized()
# more compat
tenant_id = auth.get('tenantId', None)
if tenant_id and len(tenant_id) > MAX_PARAM_SIZE:
raise exception.ValidationSizeError(attribute='tenantId',
size=MAX_PARAM_SIZE)
if tenant_name:
try:
tenant_ref = self.identity_api.get_tenant_by_name(
context=context, tenant_name=tenant_name)
tenant_id = tenant_ref['id']
except exception.TenantNotFound:
raise exception.Unauthorized()
try:
auth_info = self.identity_api.authenticate(context=context,
user_id=user_id,
password=password,
tenant_id=tenant_id)
(user_ref, tenant_ref, metadata_ref) = auth_info
# If the user is disabled don't allow them to authenticate
if not user_ref.get('enabled', True):
LOG.warning('User %s is disabled' % user_id)
raise exception.Unauthorized()
# If the tenant is disabled don't allow them to authenticate
if tenant_ref and not tenant_ref.get('enabled', True):
LOG.warning('Tenant %s is disabled' % tenant_id)
raise exception.Unauthorized()
except AssertionError as e:
raise exception.Unauthorized(e.message)
auth_token_data = dict(zip(['user', 'tenant', 'metadata'],
auth_info))
expiry = self.token_api._get_default_expire_time(context=context)
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
else:
catalog_ref = {}
elif 'token' in auth:
old_token = auth['token'].get('id', None)
if len(old_token) > MAX_TOKEN_SIZE:
raise exception.ValidationSizeError(attribute='token',
size=MAX_TOKEN_SIZE)
tenant_name = auth.get('tenantName')
if tenant_name and len(tenant_name) > MAX_PARAM_SIZE:
raise exception.ValidationSizeError(attribute='tenantName',
size=MAX_PARAM_SIZE)
try:
old_token_ref = self.token_api.get_token(context=context,
token_id=old_token)
except exception.NotFound:
LOG.warning("Token not found: " + str(old_token))
raise exception.Unauthorized()
user_ref = old_token_ref['user']
user_id = user_ref['id']
current_user_ref = self.identity_api.get_user(context=context,
user_id=user_id)
# If the user is disabled don't allow them to authenticate
if not current_user_ref.get('enabled', True):
LOG.warning('User %s is disabled' % user_id)
raise exception.Unauthorized()
if tenant_name:
tenant_ref = self.identity_api.get_tenant_by_name(
context=context,
tenant_name=tenant_name)
tenant_id = tenant_ref['id']
else:
tenant_id = auth.get('tenantId', None)
tenants = self.identity_api.get_tenants_for_user(context, user_id)
if tenant_id:
if not tenant_id in tenants:
LOG.warning('User %s is authorized for tenant %s'
% (user_id, tenant_id))
raise exception.Unauthorized()
expiry = old_token_ref['expires']
try:
tenant_ref = self.identity_api.get_tenant(context=context,
tenant_id=tenant_id)
except exception.TenantNotFound:
tenant_ref = None
metadata_ref = {}
catalog_ref = {}
except exception.MetadataNotFound:
metadata_ref = {}
catalog_ref = {}
# If the tenant is disabled don't allow them to authenticate
if tenant_ref and not tenant_ref.get('enabled', True):
LOG.warning('Tenant %s is disabled' % tenant_id)
raise exception.Unauthorized()
if tenant_ref:
metadata_ref = self.identity_api.get_metadata(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'])
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
auth_token_data = dict(dict(user=current_user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
auth_token_data['expires'] = expiry
auth_token_data['id'] = 'placeholder'
roles_ref = []
for role_id in metadata_ref.get('roles', []):
role_ref = self.identity_api.get_role(context, role_id)
roles_ref.append(dict(name=role_ref['name']))
token_data = self._format_token(auth_token_data, roles_ref)
service_catalog = self._format_catalog(catalog_ref)
token_data['access']['serviceCatalog'] = service_catalog
if config.CONF.signing.token_format == 'UUID':
token_id = uuid.uuid4().hex
elif config.CONF.signing.token_format == 'PKI':
token_id = cms.cms_sign_token(json.dumps(token_data),
config.CONF.signing.certfile,
config.CONF.signing.keyfile)
else:
raise exception.UnexpectedError(
'Invalid value for token_format: %s.'
' Allowed values are PKI or UUID.' %
config.CONF.signing.token_format)
try:
self.token_api.create_token(
context, token_id, dict(key=token_id,
id=token_id,
expires=auth_token_data['expires'],
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
except Exception as e:
# an identical token may have been created already.
# if so, return the token_data as it is also identical
try:
self.token_api.get_token(context=context,
token_id=token_id)
except exception.TokenNotFound:
raise e
token_data['access']['token']['id'] = token_id
return token_data
def _get_token_ref(self, context, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
if cms.is_ans1_token(token_id):
data = json.loads(cms.cms_verify(cms.token_to_cms(token_id),
config.CONF.signing.certfile,
config.CONF.signing.ca_certs))
data['access']['token']['user'] = data['access']['user']
data['access']['token']['metadata'] = data['access']['metadata']
if belongs_to:
assert data['access']['token']['tenant']['id'] == belongs_to
token_ref = data['access']['token']
else:
token_ref = self.token_api.get_token(context=context,
token_id=token_id)
return token_ref
# admin only
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
"""
belongs_to = context['query_string'].get('belongsTo')
assert self._get_token_ref(context, token_id, belongs_to)
# admin only
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get('belongsTo')
token_ref = self._get_token_ref(context, token_id, belongs_to)
# TODO(termie): optimize this call at some point and put it into the
# the return for metadata
# fill out the roles in the metadata
metadata_ref = token_ref['metadata']
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.identity_api.get_role(context, role_id))
# Get a service catalog if possible
# This is needed for on-behalf-of requests
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=token_ref['user']['id'],
tenant_id=token_ref['tenant']['id'],
metadata=metadata_ref)
return self._format_token(token_ref, roles_ref, catalog_ref)
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_api.delete_token(context=context, token_id=token_id)
def revocation_list(self, context, auth=None):
self.assert_admin(context)
tokens = self.token_api.list_revoked_tokens(context)
for t in tokens:
expires = t['expires']
if not (expires and isinstance(expires, unicode)):
t['expires'] = timeutils.isotime(expires)
data = {'revoked': tokens}
json_data = json.dumps(data)
signed_text = cms.cms_sign_text(json_data,
config.CONF.signing.certfile,
config.CONF.signing.keyfile)
return {'signed': signed_text}
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
self.assert_admin(context)
token_ref = self._get_token_ref(context, token_id)
catalog_ref = None
if token_ref.get('tenant'):
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=token_ref['user']['id'],
tenant_id=token_ref['tenant']['id'],
metadata=token_ref['metadata'])
return self._format_endpoint_list(catalog_ref)
def _format_authenticate(self, token_ref, roles_ref, catalog_ref):
o = self._format_token(token_ref, roles_ref)
o['access']['serviceCatalog'] = self._format_catalog(catalog_ref)
return o
def _format_token(self, token_ref, roles_ref, catalog_ref=None):
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
expires = token_ref['expires']
if expires is not None:
if not isinstance(expires, unicode):
expires = timeutils.isotime(expires)
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = self._format_catalog(catalog_ref)
if metadata_ref:
if 'is_admin' in metadata_ref:
o['access']['metadata'] = {'is_admin':
metadata_ref['is_admin']}
else:
o['access']['metadata'] = {'is_admin': 0}
if 'roles' in metadata_ref:
o['access']['metadata']['roles'] = metadata_ref['roles']
return o
def _format_catalog(self, catalog_ref):
"""Munge catalogs from internal to output format
Internal catalogs look like:
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return []
services = {}
for region, region_ref in catalog_ref.iteritems():
for service, service_ref in region_ref.iteritems():
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return services.values()
def _format_endpoint_list(self, catalog_ref):
"""Formats a list of endpoints according to Identity API v2.
The v2.0 API wants an endpoint list to look like::
{
'endpoints': [
{
'id': $endpoint_id,
'name': $SERVICE[name],
'type': $SERVICE,
'tenantId': $tenant_id,
'region': $REGION,
}
],
'endpoints_links': [],
}
"""
if not catalog_ref:
return {}
endpoints = []
for region_name, region_ref in catalog_ref.iteritems():
for service_type, service_ref in region_ref.iteritems():
endpoints.append({
'id': service_ref.get('id'),
'name': service_ref.get('name'),
'type': service_type,
'region': region_name,
'publicURL': service_ref.get('publicURL'),
'internalURL': service_ref.get('internalURL'),
'adminURL': service_ref.get('adminURL'),
})
return {'endpoints': endpoints, 'endpoints_links': []}
class ExtensionsController(wsgi.Application):
"""Base extensions controller to be extended by public and admin API's."""
def __init__(self, extensions=None):
super(ExtensionsController, self).__init__()
self.extensions = extensions or {}
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
def get_extension_info(self, context, extension_alias):
try:
return {'extension': self.extensions[extension_alias]}
except KeyError:
raise exception.NotFound(target=extension_alias)
class PublicExtensionsController(ExtensionsController):
pass
class AdminExtensionsController(ExtensionsController):
def __init__(self, *args, **kwargs):
super(AdminExtensionsController, self).__init__(*args, **kwargs)
# TODO(dolph): Extensions should obviously provide this information
# themselves, but hardcoding it here allows us to match
# the API spec in the short term with minimal complexity.
self.extensions['OS-KSADM'] = {
'name': 'Openstack Keystone Admin',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-KSADM/v1.0',
'alias': 'OS-KSADM',
'updated': '2011-08-19T13:25:27-06:00',
'description': 'Openstack extensions to Keystone v2.0 API '
'enabling Admin Operations.',
'links': [
{
'rel': 'describedby',
# TODO(dolph): link needs to be revised after
# bug 928059 merges
'type': 'text/html',
'href': 'https://github.com/openstack/identity-api',
}
]
}
@logging.fail_gracefully
def public_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return PublicRouter()
@logging.fail_gracefully
def admin_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AdminRouter()
@logging.fail_gracefully
def public_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return PublicVersionRouter()
@logging.fail_gracefully
def admin_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AdminVersionRouter()
|
5559_2
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
python
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/common.py: Common code for Crypto.SelfTest.Hash
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-testing for PyCrypto hash modules"""
from __future__ import nested_scopes
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
import unittest
from binascii import a2b_hex, b2a_hex, hexlify
from Crypto.Util.py3compat import *
from Crypto.Util.strxor import strxor_c
# For compatibility with Python 2.1 and Python 2.2
if sys.hexversion < 0x02030000:
# Python 2.1 doesn't have a dict() function
# Python 2.2 dict() function raises TypeError if you do dict(MD5='blah')
def dict(**kwargs):
return kwargs.copy()
else:
dict = dict
class _NoDefault: pass # sentinel object
def _extract(d, k, default=_NoDefault):
"""Get an item from a dictionary, and remove it from the dictionary."""
try:
retval = d[k]
except KeyError:
if default is _NoDefault:
raise
return default
del d[k]
return retval
# Generic cipher test case
class CipherSelfTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
self.assoc_data = _extract(params, 'assoc_data', None)
self.mac = _extract(params, 'mac', None)
if self.assoc_data:
self.mac = b(self.mac)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is None:
self.iv = _extract(params, 'nonce', None)
if self.iv is not None:
self.iv = b(self.iv)
# Only relevant for OPENPGP mode
self.encrypted_iv = _extract(params, 'encrypted_iv', None)
if self.encrypted_iv is not None:
self.encrypted_iv = b(self.encrypted_iv)
else:
# Stream cipher
self.mode = None
self.iv = None
self.extra_params = params
def shortDescription(self):
return self.description
def _new(self, do_decryption=0):
params = self.extra_params.copy()
# Handle CTR mode parameters. By default, we use Counter.new(self.module.block_size)
if hasattr(self.module, "MODE_CTR") and self.mode == self.module.MODE_CTR:
from Crypto.Util import Counter
ctr_class = _extract(params, 'ctr_class', Counter.new)
ctr_params = _extract(params, 'ctr_params', {}).copy()
if ctr_params.has_key('prefix'): ctr_params['prefix'] = a2b_hex(b(ctr_params['prefix']))
if ctr_params.has_key('suffix'): ctr_params['suffix'] = a2b_hex(b(ctr_params['suffix']))
if not ctr_params.has_key('nbits'):
ctr_params['nbits'] = 8*(self.module.block_size - len(ctr_params.get('prefix', '')) - len(ctr_params.get('suffix', '')))
params['counter'] = ctr_class(**ctr_params)
if self.mode is None:
# Stream cipher
return self.module.new(a2b_hex(self.key), **params)
elif self.iv is None:
# Block cipher without iv
return self.module.new(a2b_hex(self.key), self.mode, **params)
else:
# Block cipher with iv
if do_decryption and self.mode == self.module.MODE_OPENPGP:
# In PGP mode, the IV to feed for decryption is the *encrypted* one
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.encrypted_iv), **params)
else:
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.iv), **params)
def isMode(self, name):
if not hasattr(self.module, "MODE_"+name):
return False
return self.mode == getattr(self.module, "MODE_"+name)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
assoc_data = []
if self.assoc_data:
assoc_data = [ a2b_hex(b(x)) for x in self.assoc_data]
ct = None
pt = None
#
# Repeat the same encryption or decryption twice and verify
# that the result is always the same
#
for i in xrange(2):
cipher = self._new()
decipher = self._new(1)
# Only AEAD modes
for comp in assoc_data:
cipher.update(comp)
decipher.update(comp)
ctX = b2a_hex(cipher.encrypt(plaintext))
if self.isMode("SIV"):
ptX = b2a_hex(decipher.decrypt_and_verify(ciphertext, a2b_hex(self.mac)))
else:
ptX = b2a_hex(decipher.decrypt(ciphertext))
if ct:
self.assertEqual(ct, ctX)
self.assertEqual(pt, ptX)
ct, pt = ctX, ptX
if self.isMode("OPENPGP"):
# In PGP mode, data returned by the first encrypt()
# is prefixed with the encrypted IV.
# Here we check it and then remove it from the ciphertexts.
eilen = len(self.encrypted_iv)
self.assertEqual(self.encrypted_iv, ct[:eilen])
ct = ct[eilen:]
self.assertEqual(self.ciphertext, ct) # encrypt
self.assertEqual(self.plaintext, pt) # decrypt
if self.mac:
mac = b2a_hex(cipher.digest())
self.assertEqual(self.mac, mac)
decipher.verify(a2b_hex(self.mac))
class CipherStreamingSelfTest(CipherSelfTest):
def shortDescription(self):
desc = self.module_name
if self.mode is not None:
desc += " in %s mode" % (self.mode_name,)
return "%s should behave like a stream cipher" % (desc,)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# The cipher should work like a stream cipher
# Test counter mode encryption, 3 bytes at a time
ct3 = []
cipher = self._new()
for i in range(0, len(plaintext), 3):
ct3.append(cipher.encrypt(plaintext[i:i+3]))
ct3 = b2a_hex(b("").join(ct3))
self.assertEqual(self.ciphertext, ct3) # encryption (3 bytes at a time)
# Test counter mode decryption, 3 bytes at a time
pt3 = []
cipher = self._new()
for i in range(0, len(ciphertext), 3):
pt3.append(cipher.encrypt(ciphertext[i:i+3]))
# PY3K: This is meant to be text, do not change to bytes (data)
pt3 = b2a_hex(b("").join(pt3))
self.assertEqual(self.plaintext, pt3) # decryption (3 bytes at a time)
class CTRSegfaultTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s.new(key, %s.MODE_CTR) should raise TypeError, not segfault""" % (self.module_name, self.module_name)
def runTest(self):
self.assertRaises(TypeError, self.module.new, a2b_hex(self.key), self.module.MODE_CTR)
class CTRWraparoundTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s with MODE_CTR raising OverflowError on wraparound""" % (self.module_name,)
def runTest(self):
from Crypto.Util import Counter
def pythonCounter():
state = [0]
def ctr():
# First block succeeds; Second and subsequent blocks raise OverflowError
if state[0] == 0:
state[0] = 1
return b("\xff") * self.module.block_size
else:
raise OverflowError
return ctr
for little_endian in (0, 1): # (False, True) Test both endiannesses
block = b("\x00") * self.module.block_size
# Test PyObject_CallObject code path: if the counter raises OverflowError
cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=pythonCounter())
cipher.encrypt(block)
self.assertRaises(OverflowError, cipher.encrypt, block)
self.assertRaises(OverflowError, cipher.encrypt, block)
# Test PyObject_CallObject code path: counter object should raise OverflowError
ctr = Counter.new(8*self.module.block_size, initial_value=2L**(8*self.module.block_size)-1, little_endian=little_endian)
ctr()
self.assertRaises(OverflowError, ctr)
self.assertRaises(OverflowError, ctr)
# Test the CTR-mode shortcut
ctr = Counter.new(8*self.module.block_size, initial_value=2L**(8*self.module.block_size)-1, little_endian=little_endian)
cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=ctr)
cipher.encrypt(block)
self.assertRaises(OverflowError, cipher.encrypt, block)
self.assertRaises(OverflowError, cipher.encrypt, block)
class CFBSegmentSizeTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.description = params['description']
def shortDescription(self):
return self.description
def runTest(self):
"""Regression test: m.new(key, m.MODE_CFB, segment_size=N) should require segment_size to be a multiple of 8 bits"""
for i in range(1, 8):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CFB, segment_size=i)
self.module.new(a2b_hex(self.key), self.module.MODE_CFB, "\0"*self.module.block_size, segment_size=8) # should succeed
class CCMMACLengthTest(unittest.TestCase):
"""CCM specific tests about MAC"""
def __init__(self, module):
unittest.TestCase.__init__(self)
self.module = module
self.key = b('\xFF')*16
self.iv = b('\x00')*10
def shortDescription(self):
return self.description
def runTest(self):
"""Verify that MAC can only be 4,6,8,..,16 bytes long."""
for i in range(3,16,2):
self.description = "CCM MAC length check (%d bytes)" % i
self.assertRaises(ValueError, self.module.new, self.key,
self.module.MODE_CCM, self.iv, msg_len=10, mac_len=i)
"""Verify that default MAC length is 16."""
self.description = "CCM default MAC length check"
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, msg_len=4)
cipher.encrypt(b('z')*4)
self.assertEqual(len(cipher.digest()), 16)
class CCMSplitEncryptionTest(unittest.TestCase):
"""CCM specific tests to validate how encrypt()
decrypt() can be called multiple times on the
same object."""
def __init__(self, module):
unittest.TestCase.__init__(self)
self.module = module
self.key = b('\xFF')*16
self.iv = b('\x00')*10
self.description = "CCM Split Encryption Test"
def shortDescription(self):
return self.description
def runTest(self):
"""Verify that CCM update()/encrypt() can be called multiple times,
provided that lengths are declared beforehand"""
data = b("AUTH DATA")
pt1 = b("PLAINTEXT1") # Short
pt2 = b("PLAINTEXT2") # Long
pt_ref = pt1+pt2
# REFERENCE: Run with 1 update() and 1 encrypt()
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv)
cipher.update(data)
ct_ref = cipher.encrypt(pt_ref)
mac_ref = cipher.digest()
# Verify that calling CCM encrypt()/decrypt() twice is not
# possible without the 'msg_len' parameter and regardless
# of the 'assoc_len' parameter
for ad_len in None, len(data):
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, assoc_len=ad_len)
cipher.update(data)
cipher.encrypt(pt1)
self.assertRaises(TypeError, cipher.encrypt, pt2)
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, assoc_len=ad_len)
cipher.update(data)
cipher.decrypt(ct_ref[:len(pt1)])
self.assertRaises(TypeError, cipher.decrypt, ct_ref[len(pt1):])
# Run with 2 encrypt()/decrypt(). Results must be the same
# regardless of the 'assoc_len' parameter
for ad_len in None, len(data):
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, assoc_len=ad_len, msg_len=len(pt_ref))
cipher.update(data)
ct = cipher.encrypt(pt1)
ct += cipher.encrypt(pt2)
mac = cipher.digest()
self.assertEqual(ct_ref, ct)
self.assertEqual(mac_ref, mac)
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, msg_len=len(pt1+pt2))
cipher.update(data)
pt = cipher.decrypt(ct[:len(pt1)])
pt += cipher.decrypt(ct[len(pt1):])
mac = cipher.verify(mac_ref)
self.assertEqual(pt_ref, pt)
class AEADTests(unittest.TestCase):
"""Tests generic to all AEAD modes"""
def __init__(self, module, mode_name, key_size):
unittest.TestCase.__init__(self)
self.module = module
self.mode_name = mode_name
self.mode = getattr(module, mode_name)
if not self.isMode("SIV"):
self.key = b('\xFF')*key_size
else:
self.key = b('\xFF')*key_size*2
self.iv = b('\x00')*10
self.description = "AEAD Test"
def isMode(self, name):
if not hasattr(self.module, "MODE_"+name):
return False
return self.mode == getattr(self.module, "MODE_"+name)
def right_mac_test(self):
"""Positive tests for MAC"""
self.description = "Test for right MAC in %s of %s" % \
(self.mode_name, self.module.__name__)
ad_ref = b("Reference AD")
pt_ref = b("Reference plaintext")
# Encrypt and create the reference MAC
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(ad_ref)
ct_ref = cipher.encrypt(pt_ref)
mac_ref = cipher.digest()
# Decrypt and verify that MAC is accepted
decipher = self.module.new(self.key, self.mode, self.iv)
decipher.update(ad_ref)
pt = decipher.decrypt_and_verify(ct_ref, mac_ref)
self.assertEqual(pt, pt_ref)
# Verify that hexverify work
decipher.hexverify(hexlify(mac_ref))
def wrong_mac_test(self):
"""Negative tests for MAC"""
self.description = "Test for wrong MAC in %s of %s" % \
(self.mode_name, self.module.__name__)
ad_ref = b("Reference AD")
pt_ref = b("Reference plaintext")
# Encrypt and create the reference MAC
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(ad_ref)
ct_ref = cipher.encrypt(pt_ref)
mac_ref = cipher.digest()
# Modify the MAC and verify it is NOT ACCEPTED
wrong_mac = strxor_c(mac_ref, 255)
decipher = self.module.new(self.key, self.mode, self.iv)
decipher.update(ad_ref)
self.assertRaises(ValueError, decipher.decrypt_and_verify,
ct_ref, wrong_mac)
def zero_data(self):
"""Verify transition from INITIALIZED to FINISHED"""
self.description = "Test for zero data in %s of %s" % \
(self.mode_name, self.module.__name__)
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.digest()
def multiple_updates(self):
"""Verify that update() can be called multiple times"""
self.description = "Test for multiple updates in %s of %s" % \
(self.mode_name, self.module.__name__)
# In all modes other than SIV, the associated data is a single
# component that can be arbitrarilly split and submitted to update().
#
# In SIV, associated data is instead organized in a vector or multiple
# components. Each component is passed to update() as a whole.
# This test is therefore not meaningful to SIV.
if self.isMode("SIV"):
return
ad = b("").join([bchr(x) for x in xrange(0,128)])
mac1, mac2, mac3 = (None,)*3
for chunk_length in 1,10,40,80,128:
chunks = [ad[i:i+chunk_length] for i in range(0, len(ad), chunk_length)]
# No encryption/decryption
cipher = self.module.new(self.key, self.mode, self.iv)
for c in chunks:
cipher.update(c)
if mac1:
cipher.verify(mac1)
else:
mac1 = cipher.digest()
# Encryption
cipher = self.module.new(self.key, self.mode, self.iv)
for c in chunks:
cipher.update(c)
ct = cipher.encrypt(b("PT"))
mac2 = cipher.digest()
# Decryption
cipher = self.module.new(self.key, self.mode, self.iv)
for c in chunks:
cipher.update(c)
cipher.decrypt(ct)
cipher.verify(mac2)
def no_mix_encrypt_decrypt(self):
"""Verify that encrypt and decrypt cannot be mixed up"""
self.description = "Test for mix of encrypt and decrypt in %s of %s" % \
(self.mode_name, self.module.__name__)
# Calling decrypt after encrypt raises an exception
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.encrypt(b("PT")*40)
self.assertRaises(TypeError, cipher.decrypt, b("XYZ")*40)
# Calling encrypt() after decrypt() raises an exception
# (excluded for SIV, since decrypt() is not valid)
if not self.isMode("SIV"):
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.decrypt(b("CT")*40)
self.assertRaises(TypeError, cipher.encrypt, b("XYZ")*40)
# Calling verify after encrypt raises an exception
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.encrypt(b("PT")*40)
self.assertRaises(TypeError, cipher.verify, b("XYZ"))
self.assertRaises(TypeError, cipher.hexverify, "12")
# Calling digest() after decrypt() raises an exception
# (excluded for SIV, since decrypt() is not valid)
if not self.isMode("SIV"):
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.decrypt(b("CT")*40)
self.assertRaises(TypeError, cipher.digest)
self.assertRaises(TypeError, cipher.hexdigest)
def no_late_update(self):
"""Verify that update cannot be called after encrypt or decrypt"""
self.description = "Test for late update in %s of %s" % \
(self.mode_name, self.module.__name__)
# Calling update after encrypt raises an exception
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(b("XX"))
cipher.encrypt(b("PT")*40)
self.assertRaises(TypeError, cipher.update, b("XYZ"))
# Calling update() after decrypt() raises an exception
# (excluded for SIV, since decrypt() is not valid)
if not self.isMode("SIV"):
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(b("XX"))
cipher.decrypt(b("CT")*40)
self.assertRaises(TypeError, cipher.update, b("XYZ"))
def loopback(self):
"""Verify composition of encrypt_and_digest() and decrypt_and_verify()
is the identity function."""
self.description = "Lookback test decrypt_and_verify(encrypt_and_digest)"\
"for %s in %s" % (self.mode_name,
self.module.__name__)
enc_cipher = self.module.new(self.key, self.mode, self.iv)
dec_cipher = self.module.new(self.key, self.mode, self.iv)
enc_cipher.update(b("XXX"))
dec_cipher.update(b("XXX"))
plaintext = b("Reference") * 10
ct, mac = enc_cipher.encrypt_and_digest(plaintext)
pt = dec_cipher.decrypt_and_verify(ct, mac)
self.assertEqual(plaintext, pt)
def runTest(self):
self.right_mac_test()
self.wrong_mac_test()
self.zero_data()
self.multiple_updates()
self.no_mix_encrypt_decrypt()
self.no_late_update()
self.loopback()
def shortDescription(self):
return self.description
class RoundtripTest(unittest.TestCase):
def __init__(self, module, params):
from Crypto import Random
unittest.TestCase.__init__(self)
self.module = module
self.iv = Random.get_random_bytes(module.block_size)
self.key = b(params['key'])
self.plaintext = 100 * b(params['plaintext'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """%s .decrypt() output of .encrypt() should not be garbled""" % (self.module_name,)
def runTest(self):
for mode in (self.module.MODE_ECB, self.module.MODE_CBC, self.module.MODE_CFB, self.module.MODE_OFB, self.module.MODE_OPENPGP):
encryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
ciphertext = encryption_cipher.encrypt(self.plaintext)
if mode != self.module.MODE_OPENPGP:
decryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
else:
eiv = ciphertext[:self.module.block_size+2]
ciphertext = ciphertext[self.module.block_size+2:]
decryption_cipher = self.module.new(a2b_hex(self.key), mode, eiv)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
class PGPTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "MODE_PGP was implemented incorrectly and insecurely. It's completely banished now."
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_PGP)
class IVLengthTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "Check that all modes except MODE_ECB and MODE_CTR require an IV of the proper length"
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CBC, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OPENPGP, "")
if hasattr(self.module, "MODE_CCM"):
for ivlen in (0,6,14):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CCM, bchr(0)*ivlen, msg_len=10)
self.module.new(a2b_hex(self.key), self.module.MODE_ECB, "")
self.module.new(a2b_hex(self.key), self.module.MODE_CTR, "", counter=self._dummy_counter)
def _dummy_counter(self):
return "\0" * self.module.block_size
def make_block_tests(module, module_name, test_data, additional_params=dict()):
tests = []
extra_tests_added = 0
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {'mode': 'ECB'}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
p_mode = p2.get('mode', 'ECB')
if p_mode == 'ECB':
_extract(p2, 'mode', 'ECB')
if p_description is not None:
description = p_description
elif p_mode == 'ECB' and not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
params.update(additional_params)
# Add extra test(s) to the test suite before the current test
if not extra_tests_added:
tests += [
CTRSegfaultTest(module, params),
CTRWraparoundTest(module, params),
CFBSegmentSizeTest(module, params),
RoundtripTest(module, params),
PGPTest(module, params),
IVLengthTest(module, params),
]
extra_tests_added = 1
# Extract associated data and MAC for AEAD modes
if p_mode in ('CCM', 'EAX', 'SIV', 'GCM'):
assoc_data, params['plaintext'] = params['plaintext'].split('|')
assoc_data2, params['ciphertext'], params['mac'] = params['ciphertext'].split('|')
params['assoc_data'] = assoc_data.split("-")
params['mac_len'] = len(params['mac'])>>1
# Add the current test to the test suite
tests.append(CipherSelfTest(module, params))
# When using CTR mode, test that the interface behaves like a stream cipher
if p_mode in ('OFB', 'CTR'):
tests.append(CipherStreamingSelfTest(module, params))
# When using CTR mode, test the non-shortcut code path.
if p_mode == 'CTR' and not params.has_key('ctr_class'):
params2 = params.copy()
params2['description'] += " (shortcut disabled)"
ctr_params2 = params.get('ctr_params', {}).copy()
params2['ctr_params'] = ctr_params2
if not params2['ctr_params'].has_key('disable_shortcut'):
params2['ctr_params']['disable_shortcut'] = 1
tests.append(CipherSelfTest(module, params2))
# Add tests that don't use test vectors
if hasattr(module, "MODE_CCM"):
tests += [
CCMMACLengthTest(module),
CCMSplitEncryptionTest(module),
]
for aead_mode in ("MODE_CCM","MODE_EAX", "MODE_SIV", "MODE_GCM"):
if hasattr(module, aead_mode):
key_sizes = []
try:
key_sizes += module.key_size
except TypeError:
key_sizes = [ module.key_size ]
for ks in key_sizes:
tests += [
AEADTests(module, aead_mode, ks),
]
return tests
def make_stream_tests(module, module_name, test_data):
tests = []
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
if p_description is not None:
description = p_description
elif not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add the test to the test suite
tests.append(CipherSelfTest(module, params))
tests.append(CipherStreamingSelfTest(module, params))
return tests
# vim:set ts=4 sw=4 sts=4 expandtab:
|
# -*- coding: utf-8 -*-
#
# SelfTest/Hash/common.py: Common code for Crypto.SelfTest.Hash
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-testing for PyCrypto hash modules"""
from __future__ import nested_scopes
__revision__ = "$Id$"
import sys
if sys.version_info[0] == 2 and sys.version_info[1] == 1:
from Crypto.Util.py21compat import *
import unittest
from binascii import a2b_hex, b2a_hex, hexlify
from Crypto.Util.py3compat import *
from Crypto.Util.strxor import strxor_c
# For compatibility with Python 2.1 and Python 2.2
if sys.hexversion < 0x02030000:
# Python 2.1 doesn't have a dict() function
# Python 2.2 dict() function raises TypeError if you do dict(MD5='blah')
def dict(**kwargs):
return kwargs.copy()
else:
dict = dict
class _NoDefault: pass # sentinel object
def _extract(d, k, default=_NoDefault):
"""Get an item from a dictionary, and remove it from the dictionary."""
try:
retval = d[k]
except KeyError:
if default is _NoDefault:
raise
return default
del d[k]
return retval
# Generic cipher test case
class CipherSelfTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
# Extract the parameters
params = params.copy()
self.description = _extract(params, 'description')
self.key = b(_extract(params, 'key'))
self.plaintext = b(_extract(params, 'plaintext'))
self.ciphertext = b(_extract(params, 'ciphertext'))
self.module_name = _extract(params, 'module_name', None)
self.assoc_data = _extract(params, 'assoc_data', None)
self.mac = _extract(params, 'mac', None)
if self.assoc_data:
self.mac = b(self.mac)
mode = _extract(params, 'mode', None)
self.mode_name = str(mode)
if mode is not None:
# Block cipher
self.mode = getattr(self.module, "MODE_" + mode)
self.iv = _extract(params, 'iv', None)
if self.iv is None:
self.iv = _extract(params, 'nonce', None)
if self.iv is not None:
self.iv = b(self.iv)
# Only relevant for OPENPGP mode
self.encrypted_iv = _extract(params, 'encrypted_iv', None)
if self.encrypted_iv is not None:
self.encrypted_iv = b(self.encrypted_iv)
else:
# Stream cipher
self.mode = None
self.iv = None
self.extra_params = params
def shortDescription(self):
return self.description
def _new(self, do_decryption=0):
params = self.extra_params.copy()
# Handle CTR mode parameters. By default, we use Counter.new(self.module.block_size)
if hasattr(self.module, "MODE_CTR") and self.mode == self.module.MODE_CTR:
from Crypto.Util import Counter
ctr_class = _extract(params, 'ctr_class', Counter.new)
ctr_params = _extract(params, 'ctr_params', {}).copy()
if ctr_params.has_key('prefix'): ctr_params['prefix'] = a2b_hex(b(ctr_params['prefix']))
if ctr_params.has_key('suffix'): ctr_params['suffix'] = a2b_hex(b(ctr_params['suffix']))
if not ctr_params.has_key('nbits'):
ctr_params['nbits'] = 8*(self.module.block_size - len(ctr_params.get('prefix', '')) - len(ctr_params.get('suffix', '')))
params['counter'] = ctr_class(**ctr_params)
if self.mode is None:
# Stream cipher
return self.module.new(a2b_hex(self.key), **params)
elif self.iv is None:
# Block cipher without iv
return self.module.new(a2b_hex(self.key), self.mode, **params)
else:
# Block cipher with iv
if do_decryption and self.mode == self.module.MODE_OPENPGP:
# In PGP mode, the IV to feed for decryption is the *encrypted* one
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.encrypted_iv), **params)
else:
return self.module.new(a2b_hex(self.key), self.mode, a2b_hex(self.iv), **params)
def isMode(self, name):
if not hasattr(self.module, "MODE_"+name):
return False
return self.mode == getattr(self.module, "MODE_"+name)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
assoc_data = []
if self.assoc_data:
assoc_data = [ a2b_hex(b(x)) for x in self.assoc_data]
ct = None
pt = None
#
# Repeat the same encryption or decryption twice and verify
# that the result is always the same
#
for i in xrange(2):
cipher = self._new()
decipher = self._new(1)
# Only AEAD modes
for comp in assoc_data:
cipher.update(comp)
decipher.update(comp)
ctX = b2a_hex(cipher.encrypt(plaintext))
if self.isMode("SIV"):
ptX = b2a_hex(decipher.decrypt_and_verify(ciphertext, a2b_hex(self.mac)))
else:
ptX = b2a_hex(decipher.decrypt(ciphertext))
if ct:
self.assertEqual(ct, ctX)
self.assertEqual(pt, ptX)
ct, pt = ctX, ptX
if self.isMode("OPENPGP"):
# In PGP mode, data returned by the first encrypt()
# is prefixed with the encrypted IV.
# Here we check it and then remove it from the ciphertexts.
eilen = len(self.encrypted_iv)
self.assertEqual(self.encrypted_iv, ct[:eilen])
ct = ct[eilen:]
self.assertEqual(self.ciphertext, ct) # encrypt
self.assertEqual(self.plaintext, pt) # decrypt
if self.mac:
mac = b2a_hex(cipher.digest())
self.assertEqual(self.mac, mac)
decipher.verify(a2b_hex(self.mac))
class CipherStreamingSelfTest(CipherSelfTest):
def shortDescription(self):
desc = self.module_name
if self.mode is not None:
desc += " in %s mode" % (self.mode_name,)
return "%s should behave like a stream cipher" % (desc,)
def runTest(self):
plaintext = a2b_hex(self.plaintext)
ciphertext = a2b_hex(self.ciphertext)
# The cipher should work like a stream cipher
# Test counter mode encryption, 3 bytes at a time
ct3 = []
cipher = self._new()
for i in range(0, len(plaintext), 3):
ct3.append(cipher.encrypt(plaintext[i:i+3]))
ct3 = b2a_hex(b("").join(ct3))
self.assertEqual(self.ciphertext, ct3) # encryption (3 bytes at a time)
# Test counter mode decryption, 3 bytes at a time
pt3 = []
cipher = self._new()
for i in range(0, len(ciphertext), 3):
pt3.append(cipher.encrypt(ciphertext[i:i+3]))
# PY3K: This is meant to be text, do not change to bytes (data)
pt3 = b2a_hex(b("").join(pt3))
self.assertEqual(self.plaintext, pt3) # decryption (3 bytes at a time)
class CTRSegfaultTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s.new(key, %s.MODE_CTR) should raise TypeError, not segfault""" % (self.module_name, self.module_name)
def runTest(self):
self.assertRaises(TypeError, self.module.new, a2b_hex(self.key), self.module.MODE_CTR)
class CTRWraparoundTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """Regression test: %s with MODE_CTR raising OverflowError on wraparound""" % (self.module_name,)
def runTest(self):
from Crypto.Util import Counter
def pythonCounter():
state = [0]
def ctr():
# First block succeeds; Second and subsequent blocks raise OverflowError
if state[0] == 0:
state[0] = 1
return b("\xff") * self.module.block_size
else:
raise OverflowError
return ctr
for little_endian in (0, 1): # (False, True) Test both endiannesses
block = b("\x00") * self.module.block_size
# Test PyObject_CallObject code path: if the counter raises OverflowError
cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=pythonCounter())
cipher.encrypt(block)
self.assertRaises(OverflowError, cipher.encrypt, block)
self.assertRaises(OverflowError, cipher.encrypt, block)
# Test PyObject_CallObject code path: counter object should raise OverflowError
ctr = Counter.new(8*self.module.block_size, initial_value=2L**(8*self.module.block_size)-1, little_endian=little_endian)
ctr()
self.assertRaises(OverflowError, ctr)
self.assertRaises(OverflowError, ctr)
# Test the CTR-mode shortcut
ctr = Counter.new(8*self.module.block_size, initial_value=2L**(8*self.module.block_size)-1, little_endian=little_endian)
cipher = self.module.new(a2b_hex(self.key), self.module.MODE_CTR, counter=ctr)
cipher.encrypt(block)
self.assertRaises(OverflowError, cipher.encrypt, block)
self.assertRaises(OverflowError, cipher.encrypt, block)
class CFBSegmentSizeTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
self.description = params['description']
def shortDescription(self):
return self.description
def runTest(self):
"""Regression test: m.new(key, m.MODE_CFB, segment_size=N) should require segment_size to be a multiple of 8 bits"""
for i in range(1, 8):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key), self.module.MODE_CFB, segment_size=i)
self.module.new(a2b_hex(self.key), self.module.MODE_CFB, "\0"*self.module.block_size, segment_size=8) # should succeed
class CCMMACLengthTest(unittest.TestCase):
"""CCM specific tests about MAC"""
def __init__(self, module):
unittest.TestCase.__init__(self)
self.module = module
self.key = b('\xFF')*16
self.iv = b('\x00')*10
def shortDescription(self):
return self.description
def runTest(self):
"""Verify that MAC can only be 4,6,8,..,16 bytes long."""
for i in range(3,16,2):
self.description = "CCM MAC length check (%d bytes)" % i
self.assertRaises(ValueError, self.module.new, self.key,
self.module.MODE_CCM, self.iv, msg_len=10, mac_len=i)
"""Verify that default MAC length is 16."""
self.description = "CCM default MAC length check"
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, msg_len=4)
cipher.encrypt(b('z')*4)
self.assertEqual(len(cipher.digest()), 16)
class CCMSplitEncryptionTest(unittest.TestCase):
"""CCM specific tests to validate how encrypt()
decrypt() can be called multiple times on the
same object."""
def __init__(self, module):
unittest.TestCase.__init__(self)
self.module = module
self.key = b('\xFF')*16
self.iv = b('\x00')*10
self.description = "CCM Split Encryption Test"
def shortDescription(self):
return self.description
def runTest(self):
"""Verify that CCM update()/encrypt() can be called multiple times,
provided that lengths are declared beforehand"""
data = b("AUTH DATA")
pt1 = b("PLAINTEXT1") # Short
pt2 = b("PLAINTEXT2") # Long
pt_ref = pt1+pt2
# REFERENCE: Run with 1 update() and 1 encrypt()
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv)
cipher.update(data)
ct_ref = cipher.encrypt(pt_ref)
mac_ref = cipher.digest()
# Verify that calling CCM encrypt()/decrypt() twice is not
# possible without the 'msg_len' parameter and regardless
# of the 'assoc_len' parameter
for ad_len in None, len(data):
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, assoc_len=ad_len)
cipher.update(data)
cipher.encrypt(pt1)
self.assertRaises(TypeError, cipher.encrypt, pt2)
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, assoc_len=ad_len)
cipher.update(data)
cipher.decrypt(ct_ref[:len(pt1)])
self.assertRaises(TypeError, cipher.decrypt, ct_ref[len(pt1):])
# Run with 2 encrypt()/decrypt(). Results must be the same
# regardless of the 'assoc_len' parameter
for ad_len in None, len(data):
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, assoc_len=ad_len, msg_len=len(pt_ref))
cipher.update(data)
ct = cipher.encrypt(pt1)
ct += cipher.encrypt(pt2)
mac = cipher.digest()
self.assertEqual(ct_ref, ct)
self.assertEqual(mac_ref, mac)
cipher = self.module.new(self.key, self.module.MODE_CCM,
self.iv, msg_len=len(pt1+pt2))
cipher.update(data)
pt = cipher.decrypt(ct[:len(pt1)])
pt += cipher.decrypt(ct[len(pt1):])
mac = cipher.verify(mac_ref)
self.assertEqual(pt_ref, pt)
class AEADTests(unittest.TestCase):
"""Tests generic to all AEAD modes"""
def __init__(self, module, mode_name, key_size):
unittest.TestCase.__init__(self)
self.module = module
self.mode_name = mode_name
self.mode = getattr(module, mode_name)
if not self.isMode("SIV"):
self.key = b('\xFF')*key_size
else:
self.key = b('\xFF')*key_size*2
self.iv = b('\x00')*10
self.description = "AEAD Test"
def isMode(self, name):
if not hasattr(self.module, "MODE_"+name):
return False
return self.mode == getattr(self.module, "MODE_"+name)
def right_mac_test(self):
"""Positive tests for MAC"""
self.description = "Test for right MAC in %s of %s" % \
(self.mode_name, self.module.__name__)
ad_ref = b("Reference AD")
pt_ref = b("Reference plaintext")
# Encrypt and create the reference MAC
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(ad_ref)
ct_ref = cipher.encrypt(pt_ref)
mac_ref = cipher.digest()
# Decrypt and verify that MAC is accepted
decipher = self.module.new(self.key, self.mode, self.iv)
decipher.update(ad_ref)
pt = decipher.decrypt_and_verify(ct_ref, mac_ref)
self.assertEqual(pt, pt_ref)
# Verify that hexverify work
decipher.hexverify(hexlify(mac_ref))
def wrong_mac_test(self):
"""Negative tests for MAC"""
self.description = "Test for wrong MAC in %s of %s" % \
(self.mode_name, self.module.__name__)
ad_ref = b("Reference AD")
pt_ref = b("Reference plaintext")
# Encrypt and create the reference MAC
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(ad_ref)
ct_ref = cipher.encrypt(pt_ref)
mac_ref = cipher.digest()
# Modify the MAC and verify it is NOT ACCEPTED
wrong_mac = strxor_c(mac_ref, 255)
decipher = self.module.new(self.key, self.mode, self.iv)
decipher.update(ad_ref)
self.assertRaises(ValueError, decipher.decrypt_and_verify,
ct_ref, wrong_mac)
def zero_data(self):
"""Verify transition from INITIALIZED to FINISHED"""
self.description = "Test for zero data in %s of %s" % \
(self.mode_name, self.module.__name__)
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.digest()
def multiple_updates(self):
"""Verify that update() can be called multiple times"""
self.description = "Test for multiple updates in %s of %s" % \
(self.mode_name, self.module.__name__)
# In all modes other than SIV, the associated data is a single
# component that can be arbitrarilly split and submitted to update().
#
# In SIV, associated data is instead organized in a vector or multiple
# components. Each component is passed to update() as a whole.
# This test is therefore not meaningful to SIV.
if self.isMode("SIV"):
return
ad = b("").join([bchr(x) for x in xrange(0,128)])
mac1, mac2, mac3 = (None,)*3
for chunk_length in 1,10,40,80,128:
chunks = [ad[i:i+chunk_length] for i in range(0, len(ad), chunk_length)]
# No encryption/decryption
cipher = self.module.new(self.key, self.mode, self.iv)
for c in chunks:
cipher.update(c)
if mac1:
cipher.verify(mac1)
else:
mac1 = cipher.digest()
# Encryption
cipher = self.module.new(self.key, self.mode, self.iv)
for c in chunks:
cipher.update(c)
ct = cipher.encrypt(b("PT"))
mac2 = cipher.digest()
# Decryption
cipher = self.module.new(self.key, self.mode, self.iv)
for c in chunks:
cipher.update(c)
cipher.decrypt(ct)
cipher.verify(mac2)
def no_mix_encrypt_decrypt(self):
"""Verify that encrypt and decrypt cannot be mixed up"""
self.description = "Test for mix of encrypt and decrypt in %s of %s" % \
(self.mode_name, self.module.__name__)
# Calling decrypt after encrypt raises an exception
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.encrypt(b("PT")*40)
self.assertRaises(TypeError, cipher.decrypt, b("XYZ")*40)
# Calling encrypt() after decrypt() raises an exception
# (excluded for SIV, since decrypt() is not valid)
if not self.isMode("SIV"):
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.decrypt(b("CT")*40)
self.assertRaises(TypeError, cipher.encrypt, b("XYZ")*40)
# Calling verify after encrypt raises an exception
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.encrypt(b("PT")*40)
self.assertRaises(TypeError, cipher.verify, b("XYZ"))
self.assertRaises(TypeError, cipher.hexverify, "12")
# Calling digest() after decrypt() raises an exception
# (excluded for SIV, since decrypt() is not valid)
if not self.isMode("SIV"):
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.decrypt(b("CT")*40)
self.assertRaises(TypeError, cipher.digest)
self.assertRaises(TypeError, cipher.hexdigest)
def no_late_update(self):
"""Verify that update cannot be called after encrypt or decrypt"""
self.description = "Test for late update in %s of %s" % \
(self.mode_name, self.module.__name__)
# Calling update after encrypt raises an exception
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(b("XX"))
cipher.encrypt(b("PT")*40)
self.assertRaises(TypeError, cipher.update, b("XYZ"))
# Calling update() after decrypt() raises an exception
# (excluded for SIV, since decrypt() is not valid)
if not self.isMode("SIV"):
cipher = self.module.new(self.key, self.mode, self.iv)
cipher.update(b("XX"))
cipher.decrypt(b("CT")*40)
self.assertRaises(TypeError, cipher.update, b("XYZ"))
def loopback(self):
"""Verify composition of encrypt_and_digest() and decrypt_and_verify()
is the identity function."""
self.description = "Lookback test decrypt_and_verify(encrypt_and_digest)"\
"for %s in %s" % (self.mode_name,
self.module.__name__)
enc_cipher = self.module.new(self.key, self.mode, self.iv)
dec_cipher = self.module.new(self.key, self.mode, self.iv)
enc_cipher.update(b("XXX"))
dec_cipher.update(b("XXX"))
plaintext = b("Reference") * 10
ct, mac = enc_cipher.encrypt_and_digest(plaintext)
pt = dec_cipher.decrypt_and_verify(ct, mac)
self.assertEqual(plaintext, pt)
def runTest(self):
self.right_mac_test()
self.wrong_mac_test()
self.zero_data()
self.multiple_updates()
self.no_mix_encrypt_decrypt()
self.no_late_update()
self.loopback()
def shortDescription(self):
return self.description
class RoundtripTest(unittest.TestCase):
def __init__(self, module, params):
from Crypto import Random
unittest.TestCase.__init__(self)
self.module = module
self.iv = Random.get_random_bytes(module.block_size)
self.key = b(params['key'])
self.plaintext = 100 * b(params['plaintext'])
self.module_name = params.get('module_name', None)
def shortDescription(self):
return """%s .decrypt() output of .encrypt() should not be garbled""" % (self.module_name,)
def runTest(self):
## ECB mode
mode = self.module.MODE_ECB
encryption_cipher = self.module.new(a2b_hex(self.key), mode)
ciphertext = encryption_cipher.encrypt(self.plaintext)
decryption_cipher = self.module.new(a2b_hex(self.key), mode)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
## OPENPGP mode
mode = self.module.MODE_OPENPGP
encryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
eiv_ciphertext = encryption_cipher.encrypt(self.plaintext)
eiv = eiv_ciphertext[:self.module.block_size+2]
ciphertext = eiv_ciphertext[self.module.block_size+2:]
decryption_cipher = self.module.new(a2b_hex(self.key), mode, eiv)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
## All other non-AEAD modes (but CTR)
for mode in (self.module.MODE_CBC, self.module.MODE_CFB, self.module.MODE_OFB):
encryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
ciphertext = encryption_cipher.encrypt(self.plaintext)
decryption_cipher = self.module.new(a2b_hex(self.key), mode, self.iv)
decrypted_plaintext = decryption_cipher.decrypt(ciphertext)
self.assertEqual(self.plaintext, decrypted_plaintext)
class PGPTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "MODE_PGP was implemented incorrectly and insecurely. It's completely banished now."
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_PGP)
class IVLengthTest(unittest.TestCase):
def __init__(self, module, params):
unittest.TestCase.__init__(self)
self.module = module
self.key = b(params['key'])
def shortDescription(self):
return "Check that all modes except MODE_ECB and MODE_CTR require an IV of the proper length"
def runTest(self):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CBC, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OFB, "")
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_OPENPGP, "")
if hasattr(self.module, "MODE_CCM"):
for ivlen in (0,6,14):
self.assertRaises(ValueError, self.module.new, a2b_hex(self.key),
self.module.MODE_CCM, bchr(0)*ivlen, msg_len=10)
self.module.new(a2b_hex(self.key), self.module.MODE_ECB, "")
self.module.new(a2b_hex(self.key), self.module.MODE_CTR, "", counter=self._dummy_counter)
def _dummy_counter(self):
return "\0" * self.module.block_size
def make_block_tests(module, module_name, test_data, additional_params=dict()):
tests = []
extra_tests_added = 0
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {'mode': 'ECB'}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
p_mode = p2.get('mode', 'ECB')
if p_mode == 'ECB':
_extract(p2, 'mode', 'ECB')
if p_description is not None:
description = p_description
elif p_mode == 'ECB' and not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
params.update(additional_params)
# Add extra test(s) to the test suite before the current test
if not extra_tests_added:
tests += [
CTRSegfaultTest(module, params),
CTRWraparoundTest(module, params),
CFBSegmentSizeTest(module, params),
RoundtripTest(module, params),
PGPTest(module, params),
IVLengthTest(module, params),
]
extra_tests_added = 1
# Extract associated data and MAC for AEAD modes
if p_mode in ('CCM', 'EAX', 'SIV', 'GCM'):
assoc_data, params['plaintext'] = params['plaintext'].split('|')
assoc_data2, params['ciphertext'], params['mac'] = params['ciphertext'].split('|')
params['assoc_data'] = assoc_data.split("-")
params['mac_len'] = len(params['mac'])>>1
# Add the current test to the test suite
tests.append(CipherSelfTest(module, params))
# When using CTR mode, test that the interface behaves like a stream cipher
if p_mode in ('OFB', 'CTR'):
tests.append(CipherStreamingSelfTest(module, params))
# When using CTR mode, test the non-shortcut code path.
if p_mode == 'CTR' and not params.has_key('ctr_class'):
params2 = params.copy()
params2['description'] += " (shortcut disabled)"
ctr_params2 = params.get('ctr_params', {}).copy()
params2['ctr_params'] = ctr_params2
if not params2['ctr_params'].has_key('disable_shortcut'):
params2['ctr_params']['disable_shortcut'] = 1
tests.append(CipherSelfTest(module, params2))
# Add tests that don't use test vectors
if hasattr(module, "MODE_CCM"):
tests += [
CCMMACLengthTest(module),
CCMSplitEncryptionTest(module),
]
for aead_mode in ("MODE_CCM","MODE_EAX", "MODE_SIV", "MODE_GCM"):
if hasattr(module, aead_mode):
key_sizes = []
try:
key_sizes += module.key_size
except TypeError:
key_sizes = [ module.key_size ]
for ks in key_sizes:
tests += [
AEADTests(module, aead_mode, ks),
]
return tests
def make_stream_tests(module, module_name, test_data):
tests = []
for i in range(len(test_data)):
row = test_data[i]
# Build the "params" dictionary
params = {}
if len(row) == 3:
(params['plaintext'], params['ciphertext'], params['key']) = row
elif len(row) == 4:
(params['plaintext'], params['ciphertext'], params['key'], params['description']) = row
elif len(row) == 5:
(params['plaintext'], params['ciphertext'], params['key'], params['description'], extra_params) = row
params.update(extra_params)
else:
raise AssertionError("Unsupported tuple size %d" % (len(row),))
# Build the display-name for the test
p2 = params.copy()
p_key = _extract(p2, 'key')
p_plaintext = _extract(p2, 'plaintext')
p_ciphertext = _extract(p2, 'ciphertext')
p_description = _extract(p2, 'description', None)
if p_description is not None:
description = p_description
elif not p2:
description = "p=%s, k=%s" % (p_plaintext, p_key)
else:
description = "p=%s, k=%s, %r" % (p_plaintext, p_key, p2)
name = "%s #%d: %s" % (module_name, i+1, description)
params['description'] = name
params['module_name'] = module_name
# Add the test to the test suite
tests.append(CipherSelfTest(module, params))
tests.append(CipherStreamingSelfTest(module, params))
return tests
# vim:set ts=4 sw=4 sts=4 expandtab:
|
5871_0
|
crossvul
|
py
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
javascript
|
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
var count = 0;
class A {
constructor() { count++; }
increment() { count++; }
}
class B extends A {
constructor() {
super();
((B) => { super.increment() })();
(A=> { super.increment() })();
let C = async (B) => { B };
let D = async A => { A };
}
}
let b = new B();
if (count !== 3) {
WScript.Echo('fail');
}
WScript.Echo('pass');
|
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
var count = 0;
class A {
constructor() { count++; }
increment() { count++; }
}
class B extends A {
constructor() {
super();
((B) => { super.increment() })();
(A=> { super.increment() })();
let C = async (B) => { B };
let D = async A => { A };
}
}
let b = new B();
class async extends A {
constructor() {
super();
let Q = async A => { A };
}
}
let a = new async();
if (count !== 4) {
WScript.Echo('fail');
}
WScript.Echo('pass');
|
2485_1
|
crossvul
|
js
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
javascript
|
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
WScript.LoadScriptFile("..\\UnitTestFramework\\UnitTestFramework.js");
var tests = [
{
name: "Split parameter scope in function definition",
body: function () {
function f1(a = 10, b = function () { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f1()(), "Function defined in the param scope captures the formals from the param scope not body scope");
function f2(a = 10, b = function () { return a; }, c = b() + a) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
assert.areEqual(20, c, "Initial value of the third parameter in the body scope should be twice the value of the first parameter");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f2()(), "Function defined in the param scope captures the formals from the param scope not body scope");
function f3(a = 10, b = function () { return a; }) {
assert.areEqual(1, a, "Initial value of parameter in the body scope should be the same as the one passed in");
var a = 20;
assert.areEqual(20, a, "Assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(f3(1)(), 1, "Function defined in the param scope captures the formals from the param scope even when the default expression is not applied for that param");
(function (a = 10, b = a += 10, c = function () { return a + b; }) {
assert.areEqual(20, a, "Initial value of parameter in the body scope should be same as the corresponding symbol's final value in the param scope");
var a2 = 40;
(function () { assert.areEqual(40, a2, "Symbols defined in the body scope should be unaffected by the duplicate formal symbols"); })();
assert.areEqual(40, c(), "Function defined in param scope uses the formals from param scope even when executed inside the body");
})();
(function (a = 10, b = function () { assert.areEqual(10, a, "Function defined in the param scope captures the formals from the param scope when executed from the param scope"); }, c = b()) {
})();
function f4(a = 10, b = function () { return a; }) {
a = 20;
return b;
}
assert.areEqual(10, f4()(), "Even if the formals are not redeclared in the function body the symbol in the param scope and body scope are different");
function f5(a = 10, b = function () { return function () { return a; }; }) {
var a = 20;
return b;
}
assert.areEqual(10, f5()()(), "Parameter scope works fine with nested functions");
var a1 = 10;
function f6(a, b = function () { a; return a1; }) {
assert.areEqual(undefined, a1, "Inside the function body the assignment hasn't happened yet");
var a1 = 20;
assert.areEqual(20, a1, "Assignment to the symbol inside the function changes the value");
return b;
}
assert.areEqual(10, f6()(), "Function in the param scope correctly binds to the outer variable");
function f7(a = 10, b = { iFnc () { return a; } }) {
a = 20;
return b;
}
assert.areEqual(10, f7().iFnc(), "Function definition inside the object literal should capture the formal from the param scope");
var f8 = function (a, b = ((function() { assert.areEqual('string1', a, "First arguemnt receives the right value"); })(), 1), c) {
var d = 'string3';
(function () { assert.areEqual('string3', d, "Var declaration in the body is initialized properly"); })();
return c;
};
assert.areEqual('string2', f8('string1', undefined, 'string2'), "Function returns the third argument properly");
function f9() {
var f10 = function (a = function () { c; }, b, c) {
assert.areEqual(1, c, "Third argument is properly populated");
arguments;
function f11() {};
};
f10(undefined, undefined, 1);
}
f9();
f9();
function f12() {
var result = ((a = (w = a => a * a) => w) => a)()()(10);
assert.areEqual(100, result, "The inner lambda function properly maps to the right symbol for a");
};
f12();
}
},
{
name: "Split parameter scope and function expressions with name",
body: function () {
function f1(a = 10, b = function c() { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope of the method should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope of the method updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f1()(), "Function expression defined in the param scope captures the formals from the param scope not body scope");
function f2(a = 10, b = function c(recurse = true) { return recurse ? c(false) : a; }) {
return b;
}
assert.areEqual(10, f2()(), "Recursive function expression defined in the param scope captures the formals from the param scope not body scope");
assert.areEqual(10, f2()(), "Recursive function expression defined in the param scope captures the formals from the param scope not body scope");
var f3 = function f4 (a = function ( ) { b; return f4(20); }, b) {
if (a == 20) {
return 10;
}
return a;
}
assert.areEqual(10, f3()(), "Recursive call to the function from the param scope returns the right value");
var f5 = function f6 (a = function ( ) { b; return f6; }, b) {
if (a == 20) {
return 10;
}
return a;
}
assert.areEqual(10, f5()()(20), "Recursive call to the function from the param scope returns the right value");
var f7 = function f8 (a = function ( ) { b; }, b) {
if (a == 20) {
return 10;
}
var a = function () { return f8(20); };
return a;
}
assert.areEqual(10, f7()(), "Recursive call to the function from the body scope returns the right value");
var f9 = function f10 (a = function ( ) { b; return f10(20); }, b) {
eval("");
if (a == 20) {
return 10;
}
return a;
}
assert.areEqual(10, f9()(), "Recursive call to the function from the param scope returns the right value when eval is there in the body");
var f11 = function f12 (a = function ( ) { b; }, b) {
eval("");
if (a == 20) {
return 10;
}
var a = function () { return f12(20); };
return a;
}
assert.areEqual(10, f11()(), "Recursive call to the function from the body scope returns the right value when eval is there in the body");
}
},
{
name: "Split parameter scope in member functions",
body: function () {
var o1 = {
f(a = 10, b = function () { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope of the method should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope of the method updates the variable's value in body scope");
return b;
}
}
assert.areEqual(o1.f()(), 10, "Function defined in the param scope of the object method captures the formals from the param scope not body scope");
var o2 = {
f1(a = 10, b = function () { return { f2 () { return a; } } }) {
var a = 20;
c = function () { return { f2 () { return a; } } };
return [b, c];
}
}
var result = o2.f1();
assert.areEqual(10, result[0]().f2(), "Short hand method defined in the param scope of the object method captures the formals from the param scope not body scope");
assert.areEqual(20, result[1]().f2(), "Short hand method defined in the param scope of the object method captures the formals from the param scope not body scope");
}
},
{
name: "Arrow functions in split param scope",
body: function () {
function f1(a = 10, b = () => { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f1()(), "Arrow functions defined in the param scope captures the formals from the param scope not body scope");
function f2(a = 10, b = () => { return a; }) {
assert.areEqual(1, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(1, f2(1)(), "Arrow functions defined in the param scope captures the formals from the param scope not body scope even when value is passed");
function f3(a = 10, b = () => a) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f3()(), "Arrow functions with concise body defined in the param scope captures the formals from the param scope not body scope");
((a = 10, b = a += 10, c = () => { assert.areEqual(20, a, "Value of the first formal inside the lambda should be same as the default value"); return a + b; }, d = c() * 10) => {
assert.areEqual(d, 400, "Initial value of the formal parameter inside the body should be the same as final value from the param scope");
})();
function f4(a = 10, b = () => { return () => a; }) {
a = 20;
return b;
}
assert.areEqual(10, f4()()(), "Nested lambda should capture the formal param value from the param scope");
assert.throws(function f4(a = () => x) { var x = 1; a(); }, ReferenceError, "Lambdas in param scope shouldn't be able to access the variables from body", "'x' is undefined");
assert.throws(function f5() { (function (a = () => x) { var x = 1; return a; })()(); }, ReferenceError, "Lambdas in param scope shouldn't be able to access the variables from body", "'x' is undefined");
assert.throws((a = () => 10, b = a() + c, c = 10) => {}, ReferenceError, "Formals defined to the right shouldn't be usable in lambdas", "Use before declaration");
}
},
{
name: "Split parameter scope with Rest",
body: function () {
var arr = [2, 3, 4];
function f1(a = 10, b = function () { return a; }, ...c) {
assert.areEqual(arr.length, c.length, "Rest parameter should contain the same number of elements as the spread arg");
for (i = 0; i < arr.length; i++) {
assert.areEqual(arr[i], c[i], "Elements in the rest and the spread should be in the same order");
}
return b;
}
assert.areEqual(f1(undefined, undefined, ...arr)(), 10, "Presence of rest parameter shouldn't affect the binding");
((a = 10, b = () => a, ...c) => {
assert.areEqual(arr.length, c.length, "Rest parameter should contain the same number of elements as the spread arg");
for (i = 0; i < arr.length; i++) {
assert.areEqual(arr[i], c[i], "Elements in the rest and the spread should be in the same order");
}
return b;
})(undefined, undefined, ...arr);
}
},
{
name: "Split parameter scope with this",
body: function () {
function f1(a = this.x, b = function() { assert.areEqual(100, this.x, "this object for the function in param scope is passed from the final call site"); return a; }) {
assert.areEqual(10, this.x, "this objects property retains the value from param scope");
a = 20;
return b;
}
assert.areEqual(10, f1.call({x : 10}).call({x : 100}), "Arrow functions defined in the param scope captures the formals from the param scope not body scope");
(function (a = this.x, b = function() {this.x = 20; return a;}) {
assert.areEqual(10, this.x, "this objects property retains the value in param scope before the inner function call");
b.call(this);
assert.areEqual(20, this.x, "Update to a this's property from the param scope is reflected in the body scope");
}).call({x : 10});
this.x = 10;
((a = this.x, b = function() { a; this.x = 20; }) => {
assert.areEqual(10, this.x, "this objects property retains the value in param scope before the inner function call in lambda");
b.call(this);
assert.areEqual(20, this.x, "Update to a this's property from the param scope of lambda function is reflected in the body scope");
})();
function f2(a = function() { return this.x; }, b = this.y, c = a.call({x : 20}) + b) {
assert.areEqual(undefined, this.x, "this object remains unaffected");
return c;
}
assert.areEqual(30, f2.call({y : 10}), "Properties are accessed from the right this object");
var thisObj = {x : 1, y : 20 };
function f3(a, b = () => { a; this.x = 10; return this.y; }) {
assert.areEqual(1, this.x, "Assignment from the param scope has not happened yet");
assert.areEqual(20, this.y, "y property of the this object is not affected");
return b;
}
assert.areEqual(20, f3.call(thisObj)(), "Lambda defined in the param scope returns the right property value from thisObj");
assert.areEqual(10, thisObj.x, "Assignment from the param scope method updates thisObj's property");
function f4(a, b = () => { a; return this; }) {
return b;
}
assert.areEqual(thisObj, f4.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
var thisObj = { x : 1 };
function f5() {
return (a = this, b = function() { return a; }) => b;
}
assert.areEqual(thisObj, f5.call(thisObj)()(), "This object is returned properly from the inner lambda method's child function");
function f6(a, b = function () { return a; }) {
return (a = this, b = function() { return a; }) => b;
}
assert.areEqual(thisObj, f6.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a split scoped function");
function f7(a, b = function () { return a; }) {
function f8() {
return (a = this, b = function() { return a; }) => b;
}
return f8.call(this);
}
assert.areEqual(thisObj, f7.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a nested split scoped function");
function f9(a, b = function () { return a; }) {
function f10(c, d = function () { c; }) {
return (a = this, b = function() { return a; }) => b;
}
return f10.call(this);
}
assert.areEqual(thisObj, f9.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a double nested split scoped function");
function f11(a = this.x * 10, b = () => { a; return this; }) {
assert.areEqual(10, a, "this should be accessible in the parameter scope");
assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
assert.isTrue(eval("thisObj == this"), "Eval should be able to access the this object properly");
return b;
}
assert.areEqual(thisObj, f11.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
function f12(a = this.x * 10, b = () => { a; return this; }) {
var c = 100;
assert.areEqual(10, a, "this should be accessible in the parameter scope");
assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
assert.isTrue(eval("thisObj == this"), "Eval should be able to access the this object properly");
assert.areEqual(thisObj, (() => this)(), "Lambda should capture the this object from body properly");
assert.areEqual(100, c, "Body variable should be unaffected by the slot allocation of this object");
return b;
}
assert.areEqual(thisObj, f12.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
function f13(a = 10, b = () => { a; return this; }) {
var c = 100;
assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
var d = () => this;
this.x = 5;
assert.isTrue(eval("this.x == 5"), "Eval should be able to access the this object properly after the field is updated");
assert.isTrue(eval("d().x == 5"), "Lambda should capture the this symbol from the body properly");
assert.isTrue(eval("a == 10"), "Eval should be able to access the first parameter properly");
assert.isTrue(eval("b().x == 5"), "Lambda from the param scope should capture the this symbol properly");
assert.isTrue(eval("d().x == 5"), "Lambda should capture the this symbol from the body properly");
return b;
}
assert.areEqual(5, f13.call(thisObj)().x, "Lambda defined in the param scope returns the same this object as the one in body");
}
},
{
name: "Split parameter scope and class",
body: function () {
class c {
f(a = 10, d, b = function () { return a; }, c) {
assert.areEqual(10, a, "Initial value of parameter in the body scope in class method should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "Assignment in the class method body updates the value of the variable");
return b;
}
}
assert.areEqual(10, (new c()).f()(), "Method defined in the param scope of the class should capture the formal from the param scope itself");
function f1(a = 10, d, b = class { method1() { return a; } }, c) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return b;
}
var result = f1();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined in param scope should capture the formals form that param scope itself");
class c2 {
f1(a = 10, d, b = function () { a = this.f2(); return a; }, c) {
assert.areEqual(30, this.f2(), "this object in the body points to the right this object");
return b;
};
f2() {
return 30;
}
}
var f2Obj = new c2();
assert.areEqual(100, f2Obj.f1().call({f2() { return 100; }}), "Method defined in the param uses its own this object while updating the formal");
function f2(a = 10, d, b = class { method1() { return class { method2() { return a; }} } }, c) {
a = 20;
return b;
}
var obj1 = f2();
var obj2 = (new obj1()).method1();
assert.areEqual(10, (new obj2()).method2(), "Nested class definition in the param scope should capture the formals from the param scope");
var actualArray = [2, 3, 4];
class c3 {
f(a = 10, b = () => { return c; }, ...c) {
assert.areEqual(actualArray.length, c.length, "Rest param and the actual array should have the same length");
for (var i = 0; i < c.length; i++) {
assert.areEqual(actualArray[i], c[i], "Rest parameter should have the same value as the actual array");
}
c = [];
return b;
}
}
result = (new c3()).f(undefined, undefined, ...[2, 3, 4])();
assert.areEqual(actualArray.length, result.length, "The result and the actual array should have the same length");
for (var i = 0; i < result.length; i++) {
assert.areEqual(actualArray[i], result[i], "The result array should have the same value as the actual array");
}
class c4 {
f({x:x = 10, y:y = () => { return x; }}) {
assert.areEqual(10, x, "Initial value of destructure parameter in the body scope in class method should be the same as the one in param scope");
x = 20;
assert.areEqual(20, x, "Assignment in the class method body updates the value of the variable");
return y;
}
}
assert.areEqual(10, (new c4()).f({})(), "The method defined as the default destructured value of the parameter should capture the formal from the param scope");
function f3(a = 10, d, b = (function () { return a; }, class { method1() { return a; } }), c) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return b;
}
result = f3();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after another function definition, in the param scope should capture the formals form that param scope itself");
function f4(a = 10, d, b = (function () { return a; }, class {}, class { method1() { return a; } }), c) {
var a = 20;
return b;
}
result = f4();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after another class definition, in the param scope should capture the formals form that param scope itself");
function f5(a = 10, d, b = (function () { return a; }, class {}, function () {}, class { method1() { return a; } }), c) {
var a = 20;
return b;
}
result = f5();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after a function and class, in the param scope should capture the formals form that param scope itself");
function f6(a = 10, d, b = (function () { return a; }, class {}, function (a, b = () => a) {}, class { method1() { return a; } }), c) {
var a = 20;
return b;
}
result = f6();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after a split scope function, in the param scope should capture the formals form that param scope itself");
function f7(a = 10, d, b = (function () { return a; }, class c1 { method1() { return a; } }), c) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return b;
}
result = f7();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class with name defined, after another function definition, in the param scope should capture the formals form that param scope itself");
function f8(a = 10, d, b = class c1 { method1() { return a; } }, c = (function () { return a; }, class c2 extends b { method2() { return a * a; } })) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return c;
}
result = f8();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class extending another class defined, after another function definition, in the param scope should capture the formals form that param scope itself");
assert.areEqual(100, (new result()).method2(), "Method in the derived class returns the right value");
}
},
{
name: "Split parameter scope in generator methods",
body: function () {
function *f1(a = 10, d, b = function () { return a; }, c) {
yield a;
var a = 20;
yield a;
yield b;
}
var f1Obj = f1();
assert.areEqual(10, f1Obj.next().value, "Initial value of the parameter in the body scope should be the same as the final value of the parameter in param scope");
assert.areEqual(20, f1Obj.next().value, "Assignment in the body scope updates the variable's value");
assert.areEqual(10, f1Obj.next().value(), "Function defined in the param scope captures the formal from the param scope itself");
function *f2(a = 10, d, b = function () { return a; }, c) {
yield a;
a = 20;
yield a;
yield b;
}
var f2Obj = f2();
assert.areEqual(10, f2Obj.next().value, "Initial value of the parameter in the body scope should be the same as the final value of the parameter in param scope");
assert.areEqual(20, f2Obj.next().value, "Assignment in the body scope updates the variable's value");
assert.areEqual(10, f2Obj.next().value(), "Function defined in the param scope captures the formal from the param scope itself even if it is not redeclared in the body");
function *f3(a = 10, d, b = function *() { yield a + c; }, c = 100) {
a = 20;
yield a;
yield b;
}
var f3Obj = f3();
assert.areEqual(20, f3Obj.next().value, "Assignment in the body scope updates the variable's value");
assert.areEqual(110, f3Obj.next().value().next().value, "Function defined in the param scope captures the formals from the param scope");
function *f4(a = 10, d, b = function *() { yield a; }, c) {
var a = 20;
yield function *() { yield a; };
yield b;
}
var f4Obj = f4();
assert.areEqual(20, f4Obj.next().value().next().value, "Generator defined inside the body captures the symbol from the body scope");
assert.areEqual(10, f4Obj.next().value().next().value, "Function defined in the param scope captures the formal from param scope even if it is captured in the body scope");
}
},
{
name: "Split parameter scope with destructuring",
body: function () {
function f1( {a:a1, b:b1}, c = function() { return a1 + b1; } ) {
assert.areEqual(10, a1, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
assert.areEqual(20, b1, "Initial value of the second destructuring parameter in the body scope should be the same as the one in param scope");
a1 = 1;
b1 = 2;
assert.areEqual(1, a1, "New assignment in the body scope updates the first formal's value in body scope");
assert.areEqual(2, b1, "New assignment in the body scope updates the second formal's value in body scope");
assert.areEqual(30, c(), "The param scope method should return the sum of the destructured formals from the param scope");
return c;
}
assert.areEqual(30, f1({ a : 10, b : 20 })(), "Returned method should return the sum of the destructured formals from the param scope");
function f2({x:x = 10, y:y = function () { return x; }}) {
assert.areEqual(10, x, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
x = 20;
assert.areEqual(20, x, "Assignment in the body updates the formal's value");
return y;
}
assert.areEqual(10, f2({ })(), "Returned method should return the value of the destructured formal from the param scope");
function f3({y:y = function () { return x; }, x:x = 10}) {
assert.areEqual(10, x, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
x = 20;
assert.areEqual(20, x, "Assignment in the body updates the formal's value");
return y;
}
assert.areEqual(10, f3({ })(), "Returned method should return the value of the destructured formal from the param scope even if declared after");
(({x:x = 10, y:y = function () { return x; }}) => {
assert.areEqual(10, x, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
x = 20;
assert.areEqual(10, y(), "Assignment in the body does not affect the formal captured from the param scope");
})({});
}
},
{
name: "Nested split scopes",
body: function () {
function f1(a = 10, b = function () { return a; }, c) {
function iFnc(d = 100, e = 200, pf1 = function () { return d + e; }) {
d = 1000;
e = 2000;
pf2 = function () { return d + e; };
return [pf1, pf2];
}
return [b].concat(iFnc());
}
var result = f1();
assert.areEqual(10, result[0](), "Function defined in the param scope of the outer function should capture the symbols from its own param scope");
assert.areEqual(300, result[1](), "Function defined in the param scope of the inner function should capture the symbols from its own param scope");
assert.areEqual(3000, result[2](), "Function defined in the body scope of the inner function should capture the symbols from its body scope");
function f2(a = 10, b = function () { return a; }, c) {
a = 1000;
c = 2000;
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc()];
}
result = f2();
assert.areEqual(10, result[0](), "Function defined in the param scope of the outer function should capture the symbols from its own param scope even if formals are with the same name in inner function");
assert.areEqual(300, result[1](), "Function defined in the param scope of the inner function should capture the symbols from its own param scope if formals are with the same name in the outer function");
function f3(a = 10, b = function () { return a; }, c) {
a = 1000;
c = 2000;
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc];
}
assert.areEqual(300, f3()[1]()(), "Function defined in the param scope of the inner function should capture the right formals even if the inner function is executed outside");
function f4(a = 10, b = function () { return a; }, c) {
a = 1000;
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc(undefined, b, c)];
}
result = f4(1, undefined, 3);
assert.areEqual(1, result[0](), "Function defined in the param scope of the outer function correctly captures the passed in value for the formal");
assert.areEqual(1, result[1](), "Function defined in the param scope of the inner function is replaced by the function definition from the param scope of the outer function");
function f5(a = 10, b = function () { return a; }, c) {
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc(a, undefined, c)];
}
result = f5(1, undefined, 3);
assert.areEqual(1, result[0](), "Function defined in the param scope of the outer function correctly captures the passed in value for the formal");
assert.areEqual(4, result[1](), "Function defined in the param scope of the inner function captures the passed values for the formals");
function f6(a , b, c) {
function iFnc(a = 1, b = function () { return a + c; }, c = 2) {
a = 10;
c = 20;
return b;
}
return iFnc;
}
assert.areEqual(3, f6()()(), "Function defined in the param scope captures the formals when defined inside another method without split scope");
function f7(a = 10 , b = 20, c = function () { return a + b; }) {
return (function () {
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [c, iFnc];
})();
}
result = f7();
assert.areEqual(30, result[0](), "Function defined in the param scope of the outer function should capture the symbols from its own param scope even in nested case");
assert.areEqual(300, result[1]()(), "Function defined in the param scope of the inner function should capture the symbols from its own param scope even when nested inside a normal method and a split scope");
function f8(a = 1, b = function (d = 10, e = function () { return a + d; }) { assert.areEqual(d, 10, "Split scope function defined in param scope should capture the right formal value"); d = 20; return e; }, c) {
a = 2;
return b;
}
assert.areEqual(11, f8()()(), "Split scope function defined within the param scope should capture the formals from the corresponding param scopes");
function f9(a = 1, b = function () { return function (d = 10, e = function () { return a + d; }) { d = 20; return e; } }, c) {
a = 2;
return b;
}
assert.areEqual(11, f9()()()(), "Split scope function defined within the param scope should capture the formals from the corresponding param scope in nested scope");
}
},
{
name: "Split scope with symbol shadowing",
body: function () {
function f1(a = 10, b = function () { return a; }) {
assert.areEqual(100, a(), "Function definition inside the body is hoisted");
function a () {
return 100;
}
return b;
}
assert.areEqual(10, f1()(), "Function definition in the param scope captures the symbol from the param scope");
function f2(a = 10, b = function () { return a; }, c = b) {
a = 20;
assert.areEqual(20, b(), "Function definition in the body scope captures the body symbol");
function b() {
return a;
}
return [c, b];
}
var result = f2();
assert.areEqual(10, result[0](), "Function definition in the param scope captures the param scope symbol");
assert.areEqual(20, result[1](), "Function definition in the body captures the body symbol");
var g = 1;
function f3(a = 10, b = function () { a; return g;}) {
assert.areEqual(10, g(), "Function definition inside the body is unaffected by the outer variable");
function g() {
return 10;
}
return b;
}
assert.areEqual(1, f3()(), "Function definition in the param scope captures the outer scoped var");
function f4(a = x1, b = function g() {
a;
return function h() {
assert.areEqual(10, x1, "x1 is captured from the outer scope");
};
}) {
var x1 = 100;
b()();
};
var x1 = 10;
f4();
var x2 = 1;
function f5(a = x2, b = function() { a; return x2; }) {
{
function x2() {
}
}
var x2 = 2;
return b;
}
assert.areEqual(1, f5()(), "Symbol capture at the param scope is unaffected by the inner definitions");
var x3 = 1;
function f6(a = x3, b = function(_x) { a; return x3; }) {
var x3 = 2;
return b;
}
assert.areEqual(1, f6()(), "Symbol capture at the param scope is unaffected by other references in the body and param");
}
},
{
name : "Split scope and arguments symbol",
body : function () {
assert.throws(function () { eval("function f(a = arguments, b = () => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f1() { function f2(a = arguments, b = () => a) { } }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope inside another function", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = arguments, b = () => a, c = eval('')) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with eval", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = arguments = [1, 2], b = () => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = () => a, c = arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = () => a, c = a = arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, b = () => { a; arguments}) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list when captured in lambda method", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = (c = arguments) => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list when captured in a lambda in split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, b = () => a, c = () => { return arguments; }) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list in split scope when captured by a lambda method", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = () => a, c = () => () => arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, arguments = function () { return a; } ) { }"); }, SyntaxError, "Use of arguments as a parameter name is not allowed in non-simple parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f({a, arguments = function () { return a; }}) { }"); }, SyntaxError, "Use of arguments as a parameter name is not allowed in destructuring parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f({a = arguments}, b = function () { return a; } ) { }"); }, SyntaxError, "Use of arguments is not allowed in destructuring parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments[0]) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 1, b = () => arguments[0]) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope at any position", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments[0] + b, b = 10) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope at any position", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments) { var arguments }"); }, SyntaxError, "Arguments cannot be captured in the param scope even when duplicate definition occurs in the body", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments) { function arguments() { } }"); }, SyntaxError, "Arguments cannot be captured in the param scope even when duplicate definition occurs in the body", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(arguments, b = () => arguments) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope even if it is a formal shadowing the actual arguments", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f({a, arguments}, b = () => a) { }"); }, SyntaxError, "Arguments cannot be used as a formal name when one of the formal is captured", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, {arguments, b = () => arguments}) { }"); }, SyntaxError, "Arguments cannot be used as a formal name when one of the formal is captured", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
function f1(a, b = () => a) {
eval("");
b = () => { return arguments; };
assert.areEqual(1, arguments[0], "Arguments object receives the first parameter properly");
assert.areEqual(1, b()[0], "First argument receives the right value passed in");
assert.areEqual(undefined, b()[1], "Second argument receives the right value passed in");
assert.areEqual(2, arguments.length, "Arguments should have only two elements in it");
}
f1(1, undefined);
function f2(a, b = () => { return a; }) {
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
(() => { arguments = [3, 4]; a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f2(1, undefined, 2)(), "Param scope method properly captures the first parameter");
function f3(a, b = () => { return a; }) {
eval("");
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
(() => { arguments = [3, 4]; a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f3(1, undefined, 2)(), "Param scope method properly captures the first parameter, with eval in the body");
function f4(a, b = function () { a; } ) {
var c = 10;
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
eval("");
}
f4(1);
function f5(a, b = function () { a; } ) {
var c = 10;
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
eval("");
}
f5(1);
function f6(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
}
f6(1);
function f7(a, b = function () { a; } ) {
assert.areEqual(5, arguments(), "Function definition is hoisted");
function arguments() { return 5; }
}
f7(1);
function f8(a, b = function () { a; } ) {
assert.areEqual(5, arguments(), "Function definition is hoisted");
function arguments() { return 5; }
eval("");
}
f8(1);
function f9(a, b = function () { a; } ) {
assert.areEqual(1, eval("a"), "Eval should be able to access the first argument properly");
assert.areEqual(1, eval("arguments[0]"), "Eval should be able to access the first argument properly from arguments object");
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual(100, eval("arguments"), "Updated value of arguments is visible in eval");
assert.areEqual(1, eval("a"), "First argument remains unchanged after the arguments are updated");
}
f9(1);
function f10(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
}
f10(1);
function f11(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
eval("");
}
f11(1);
function f12(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
b = () => arguments;
assert.areEqual(1, b()[0], "Lambda captures the right arguments symbol");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual(100, b(), "Lambda now gives the updated value");
eval("");
}
f12(1);
function f13(a, b = () => { return a; }) {
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
((c = arguments = [3, 4]) => { a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f13(1, undefined, 2)(), "Param scope method properly captures the first parameter");
function f14(a, b = () => { return a; }) {
eval("");
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
((c = arguments = [3, 4]) => { a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f14(1, undefined, 2)(), "Param scope method properly captures the first parameter, with eval in the body");
function f15(a, b = function () { a; }, ...c) {
assert.areEqual(1, arguments[0], "Checking first argument");
assert.areEqual(undefined, arguments[1], "Checking second argument");
assert.areEqual(2, arguments[2], "Checking third argument");
assert.areEqual(3, arguments[3], "Checking fourth argument");
assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual([2, 3], c, "Rest should remain unaffected when arguments is updated");
eval("");
}
f15(1, undefined, 2, 3);
var f16 = function f17(a, b = function () { a; }, ...c) {
if (a === 1) {
assert.areEqual(1, arguments[0], "Checking first argument");
assert.areEqual(undefined, arguments[1], "Checking second argument");
assert.areEqual(2, arguments[2], "Checking third argument");
assert.areEqual(3, arguments[3], "Checking fourth argument");
assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
return f17(undefined, undefined, ...c);
} else {
assert.areEqual(undefined, arguments[0], "Checking first argument on the recursive call");
assert.areEqual(undefined, arguments[1], "Checking second argument on the recursive call");
assert.areEqual(2, arguments[2], "Checking third argument on the recursive call");
assert.areEqual(3, arguments[3], "Checking fourth argument on the recursive call");
assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual([2, 3], c, "Rest should remain unaffected when arguments is updated");
return eval("c");
}
}
assert.areEqual([2, 3], f16(1, undefined, 2, 3), "Rest should remain unaffected when arguments is updated");
function f18(a, b = function arguments(c) {
if (!c) {
return arguments.callee(a, 10, 20);
}
return arguments;
}) {
assert.areEqual(10, b()[1], "Function defined in the param scope can be called recursively");
assert.areEqual(1, arguments[0], "Arguments symbol is unaffected by the function expression");
}
f18(1);
function f19(a, b = arguments) {
var c = function arguments(c) {
if (!arguments.length) {
return arguments.callee(a, 10, 20, 30);
}
return arguments;
}
assert.areEqual(30, c()[3], "In the function body the arguments function expression with name is not visible");
assert.areEqual(1, b[0], "In the param scope arguments symbol referes to the passed in values");
}
f19(1, undefined, 2, 3, 4);
function f20(a, b = function arguments(c) {
if (!c) {
return arguments.callee(a, 10, 20);
}
return eval("arguments");
}) {
assert.areEqual(1, b()[0], "Function defined in the param scope can be called recursively when eval occurs in its body");
assert.areEqual(1, arguments[0], "Arguments symbol is unaffected by the function expression");
}
f20(1);
function f21(a, b = arguments) {
var c = function arguments(c) {
if (!arguments.length) {
return arguments.callee(a, 10, 20, 30);
}
return arguments;
}
assert.areEqual(30, c()[3], "In the function body the arguments function expression with name is not visible when eval is there in the body");
assert.areEqual(3, eval("b[3]"), "In the param scope arguments symbol referes to the passed in values");
}
f21(1, undefined, 2, 3, 4);
function f22(a, b = () => a) {
assert.areEqual(1, arguments[0], "Function in block causes a var declaration to be hoisted and the initial value should be same as the arguments symbol");
{
{
function arguments() {
return 10;
}
}
}
assert.areEqual(1, b(), "Function defined in the param scope should be able to capture the formal even when arguments in overwritten the body");
assert.areEqual(10, arguments(), "Hoisted var binding is updated after the block is exected");
}
f22(1);
function f23(a, b = () => a) {
function f16() {
eval("");
this.arguments = 1;
}
a = 10;
var obj = new f16();
function arguments() {
return 10;
}
assert.areEqual(1, obj.arguments, "Inner function with eval should add the property named arguments when duplicate arguments definition occurs in the parent body");
assert.areEqual(1, b(), "Formal captured from the param scope should be constrained to the param scope");
};
f23(1);
}
},
{
name: "Split scope and super call",
body: function () {
class c1 {
constructor() {
return { x : 1 };
}
};
class c2 extends c1 {
constructor(a = 1, b = () => { assert.areEqual(1, super().x, "Super is accessible in the param scope"); return a; }) {
var c = 10;
a = 20;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
return {};
}
}
new c2();
class c3 extends c1 {
constructor(a = 1, b = () => { return a; }) {
(() => assert.areEqual(1, super().x, "Lambda should be able to access the super method properly in the body"))();
a = 10;
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
new c3();
class c4 extends c1 {
constructor(a = 1, b = () => { return a; }) {
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
assert.areEqual(1, eval("super().x"), "Eval should be able to access the super property properly");
}
}
new c4();
class c5 extends c1 {
constructor(a = super().x, b = () => { return a; }) {
assert.areEqual(1, a, "First formal calls the super from the param scope");
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
new c5();
}
},
{
name: "Split scope and super property",
body: function () {
class c1 {
foo () {
return 1;
}
};
class c2 extends c1 {
foo(a = 1, b = () => { assert.areEqual(1, super.foo(), "Super property access works fine from a lambda defined in the param scope"); return a; }) {
a = 20;
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
(new c2()).foo();
class c3 extends c1 {
foo(a = 1, b = () => { return a; }) {
var c = 10;
a = 20;
(() => assert.areEqual(1, super.foo(), "Super property access works fine from a lambda defined in the body scope"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
(new c3()).foo();
class c4 extends c1 {
foo(a = 1, b = () => { return a; }) {
var c = 10;
a = 20;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
assert.areEqual(1, eval("super.foo()"), "Eval should be able to access the super property properly from the body scope");
}
}
(new c4()).foo();
class c5 extends c1 {
foo(a = super.foo(), b = () => { return a; }) {
assert.areEqual(1, a, "First formal uses the super property from the param scope");
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
a = 20;
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
(new c5()).foo();
}
},
{
name: "Split scope and new.target",
body: function () {
class c1 {
constructor(newTarget) {
assert.isTrue(newTarget == new.target, "Base class should receive the right value for new.target");
}
};
class c2 extends c1 {
constructor(a = 1, b = () => { assert.isTrue(new.target == c2, "new.target should have the derived class value in the param scope"); return a; }) {
super(c2);
var c = 10;
a = 20;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
new c2();
class c3 extends c1 {
constructor(a = 1, b = () => { return a; }) {
super(c3);
var c = 10;
(() => assert.isTrue(new.target == c3, "new.target should be the derived class in the body scope when captured by lambda"))();
assert.isTrue(new.target == c3, "new.target should be the derived class in the body scope");
}
}
new c3();
class c4 extends c1 {
constructor(a = 1, b = () => { return a; }) {
super(c4);
assert.isTrue(eval("new.target == c4"), "new.target should be the derived class inside eval");
assert.isTrue(new.target == c4, "new.target should be the derived class in the body scope");
}
}
new c4();
class c5 extends c1 {
constructor(a = new.target, b = () => { return a; }) {
super(c5);
assert.isTrue(a == c5, "new.target accessed from the param scope should work fine");
}
}
new c5();
}
},
{
name: "Split parameter scope and eval",
body: function () {
function g() {
return 3 * 3;
}
function f1(h = () => eval("g()")) {
assert.areEqual(6, g(), "Right method is called in the body scope");
function g() {
return 2 * 3;
}
return h();
}
assert.areEqual(9, f1(), "Paramater scope remains split");
function f2(h = () => eval("g()")) {
assert.areEqual(6, eval("g()"), "Right method is called in the body scope");
function g() {
return 2 * 3;
}
return h();
}
assert.areEqual(9, f2(), "Paramater scope remains split");
}
},
{
name: "Split parameter scope with eval in body",
body: function () {
function f1(a = 10, b = function () { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
assert.areEqual(10, eval('a'), "Initial value of parameter in the body scope in eval should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
assert.areEqual(20, eval('a'), "New assignment in the body scope updates the variable's value when evaluated through eval in body scope");
return b;
}
assert.areEqual(10, f1()(), "Function defined in the param scope captures the formals from the param scope not body scope with eval");
function f2(a = 10, b = function () { return a; }) {
assert.areEqual(10, eval('b()'), "Eval of the function from param scope should return the right value for the formal");
var a = 20;
assert.areEqual(10, eval('b()'), "Eval of the function from param scope should return the right value for the formal even after assignment to the corresponding body symbol");
return b;
}
assert.areEqual(10, f2()(), "Function defined in the param scope captures the formals from the param scope not body scope with eval");
function f3(a = 10, b = function () { return a; }) {
assert.areEqual(100, eval('b()'), "Eval of the function from body scope should return the right value for the formal");
var a = 20;
function b () { return a * a; }
assert.areEqual(400, eval('b()'), "Eval of the function from body scope should return the right value after assignment to the corresponding body symbol");
return b;
}
assert.areEqual(400, f3()(), "Function defined in the body scope captures the symbol from the body scope with eval");
function f4 (a, b, c = function () { b; }, d = 1) {
var e = 10;
assert.areEqual(2, arguments[0], "Unmapped arguments value has the expected value in the body");
(function () {
eval('');
}());
};
f4.call(1, 2);
}
},
{
name: "Split scope and with",
body: function () {
function f1(a, b, c = function () { a; }) {
with ({}) {
var d = function () {
return 10;
};
assert.areEqual(10, d(), "With inside a split scope function should work fine");
}
}
f1();
function f2(a, b, c = function () { a; }) {
var d = function () {
return 10;
};
with ({}) {
assert.areEqual(10, d(), "With inside a split scope function should be able to access the function definition from the body");
}
}
f2();
function f3(a, b = function () { return 10; }, c = function () { a; }) {
with ({}) {
assert.areEqual(10, b(), "With inside a split scope function should be able to access the function definition from the param scope");
}
}
f3();
function f4(a, b = function () { return 10; }, c = function () { a; }) {
var d = {
e : function () { return 10; }
};
e = function () { return 100; };
with (d) {
assert.areEqual(10, e(), "With should use the function definition inside the object not the one from body");
}
}
f4();
function f5(a, b = { d : function () { return 10; } }, c = function () { a; }) {
var d = { };
with (b) {
assert.areEqual(10, d(), "With should use the function definition inside the object from the param scope not the one from body");
}
}
f5();
var v6 = 100
function f6(a, b, c = function () { a; }, e = function () { with({}) { assert.areEqual(100, v6, "With inside param scope should be able to access var from outside"); } }, f = e()) {
var v6 = { };
}
f6();
function f7(a, b, c = function () { a; }) {
with ({}) {
assert.areEqual(100, v6, "With inside body scope should be able to access var from outside");
}
}
f7();
function f8() {
function f9() {
return 1;
}
var v1 = 10;
function f10(a = 10, b = function f11() {
a;
assert.areEqual(10, v1, "Function in the param scope should be able to access the outside variable");
with ({}) {
assert.areEqual(1, f9(), "With construct inside a param scoped function should be able to execute functions from outside");
}
}) {
b();
};
f10();
}
f8();
f8();
function f12() {
function f13() {
return 1;
}
var v2 = 100;
function f14(a = 10, b = function () {
assert.areEqual(10, a, "Function in the param scope should be able to access the formal from parent");
return function () {
assert.areEqual(10, a, "Function nested in the param scope should be able to access the formal from the split scoped function");
assert.areEqual(100, v2, "Function in the param scope should be able to access the outside variable");
with ({}) {
assert.areEqual(1, f13(), "With construct inside a param scoped function should be able to execute functions from outside");
}
};
}) {
b()();
};
f14();
}
f12();
f12();
}
},
{
name: "Basic eval in parameter scope",
body: function () {
assert.areEqual(1,
function (a = eval("1")) { return a; }(),
"Eval with static constant works in parameter scope");
{
let b = 2;
assert.areEqual(2,
function (a = eval("b")) { return a; }(),
"Eval with parent var reference works in parameter scope");
}
assert.areEqual(1,
function (a, b = eval("arguments[0]")) { return b; }(1),
"Eval with arguments reference works in parameter scope");
function testSelf(a = eval("testSelf(1)")) {
return a;
}
assert.areEqual(1, testSelf(1), "Eval with reference to the current function works in parameter scope");
var testSelfExpr = function (a = eval("testSelfExpr(1)")) {
return a;
}
assert.areEqual(1, testSelfExpr(), "Eval with reference to the current function expression works in parameter scope");
{
let a = 1, b = 2, c = 3;
function testEvalRef(a = eval("a"), b = eval("b"), c = eval("c")) {
return [a, b, c];
}
assert.throws(function () { testEvalRef(); },
ReferenceError,
"Eval with reference to the current formal throws",
"Use before declaration");
function testEvalRef2(x = eval("a"), y = eval("b"), z = eval("c")) {
return [x, y, z];
}
assert.areEqual([1, 2, 3], testEvalRef2(), "Eval with references works in parameter scope");
}
function f1(a = 10, b = () => eval("a")) {
assert.areEqual(10, eval("a"), "In the body initial value of the symbol should be same as the final value from param scope");
a = 20;
assert.areEqual(20, eval("a"), "In the body after assignment the symbol value is updated");
assert.areEqual(10, b(), "Eval in the param scope captures the symbol from the param scope");
}
f1();
function f2(a = 10, b = () => eval("a")) {
a = 20;
assert.areEqual(10, b(), "Eval in the param scope captures the symbol from the param scope even when there is no eval in the body");
}
f2();
function f3(a = 10, b = function () { return eval("a"); }) {
a = 20;
assert.areEqual(10, b(), "Eval in the param scope captures the symbol from the param scope even when there is no eval in the body");
}
f3();
function f4(a = 10, b = () => eval("a"), c = a = 30) {
assert.areEqual(30, eval("a"), "In the body initial value of the symbol should be same as the final value from param scope");
a = 20;
assert.areEqual(20, eval("a"), "In the body after assignment the symbol value is updated");
assert.areEqual(30, b(), "Eval in the param scope captures the symbol from the param scope");
}
f4();
function f5(a = 10, b = () => eval("a")) {
assert.areEqual(30, eval("a"), "In the body initial value of the symbol should be same as the final value from param scope");
var a = 20;
assert.areEqual(20, eval("a"), "In the body after assignment the symbol value is updated");
assert.areEqual(30, b(), "Eval in the param scope captures the symbol from the param scope");
}
f5(30);
}
},
{
name: "Eval declarations in parameter scope",
body: function() {
// Redeclarations of formals - var
assert.throws(function () { return function (a = eval("var a = 2"), b = a) { return [a, b]; }() },
ReferenceError,
"Redeclaring the current formal using var inside an eval throws",
"Let/Const redeclaration");
assert.doesNotThrow(function () { "use strict"; return function (a = eval("var a = 2"), b = a) { return [a, b]; }() },
"Redeclaring the current formal using var inside a strict mode eval does not throw");
assert.doesNotThrow(function () { "use strict"; return function (a = eval("var a = 2"), b = a) { return [a, b]; }() },
"Redeclaring the current formal using var inside a strict mode function eval does not throw");
assert.throws(function () { function foo(a = eval("var b"), b, c = b) { return [a, b, c]; } foo(); },
ReferenceError,
"Redeclaring a future formal using var inside an eval throws",
"Let/Const redeclaration");
assert.throws(function () { function foo(a, b = eval("var a"), c = a) { return [a, b, c]; } foo(); },
ReferenceError,
"Redeclaring a previous formal using var inside an eval throws",
"Let/Const redeclaration");
// Let and const do not leak outside of an eval, so the test cases below should never throw.
// Redeclarations of formals - let
assert.doesNotThrow(function (a = eval("let a")) { return a; },
"Attempting to redeclare the current formal using let inside an eval does not leak");
assert.doesNotThrow(function (a = eval("let b"), b) { return [a, b]; },
"Attempting to redeclare a future formal using let inside an eval does not leak");
assert.doesNotThrow(function (a, b = eval("let a")) { return [a, b]; },
"Attempting to redeclare a previous formal using let inside an eval does not leak");
// Redeclarations of formals - const
assert.doesNotThrow(function (a = eval("const a = 1")) { return a; },
"Attempting to redeclare the current formal using const inside an eval does not leak");
assert.doesNotThrow(function (a = eval("const b = 1"), b) { return [a, b]; },
"Attempting to redeclare a future formal using const inside an eval does not leak");
assert.doesNotThrow(function (a, b = eval("const a = 1")) { return [a, b]; },
"Attempting to redeclare a previous formal using const inside an eval does not leak");
// Conditional declarations
function test(x = eval("var a1 = 1; let b1 = 2; const c1 = 3;")) {
// none should be visible
assert.throws(function () { a1 }, ReferenceError, "Ignoring the default value does not result in an eval declaration leaking", "'a1' is undefined");
assert.throws(function () { b1 }, ReferenceError, "Let declarations do not leak out of eval to parameter scope", "'b1' is undefined");
assert.throws(function () { c1 }, ReferenceError, "Const declarations do not leak out of eval to parameter scope when x is ", "'c1' is undefined");
}
test();
// Redefining locals
function foo(a = eval("var x = 1; assert.areEqual(1, x, 'Variable declared inside eval is accessible within eval');")) {
assert.areEqual(undefined, x, "Var declaration from eval is not visible in the body");
var x = 10;
assert.areEqual(10, x, "Var declaration from eval uses its new value in the body declaration");
}
assert.doesNotThrow(function() { foo(); }, "Redefining a local var with an eval var does not throw");
// Function bodies defined in eval
function funcArrow(a = eval("() => 1"), b = a) { function a() { return 10; }; return [a(), b()]; }
assert.areEqual([10,1], funcArrow(), "Defining an arrow function body inside an eval works at default parameter scope");
function funcDecl(a = eval("(function foo() { return 1; })"), b = a()) { return [a(), b]; }
assert.areEqual([1, 1], funcDecl(), "Defining a function inside an eval works at default parameter scope");
function funcDecl(a = eval("function foo() { return 1; }; foo"), b = a()) { return [a(), b]; }
assert.areEqual([1, 1], funcDecl(), "Defining a function inside an eval works at default parameter scope");
function genFuncDecl(a = eval("(function *foo() { yield 1; return 2; })"), b = a(), c = b.next()) { return [c, b.next()]; }
assert.areEqual([{value : 1, done : false}, {value : 2, done : true}], genFuncDecl(), "Declaring a generator function inside an eval works at default parameter scope");
function funcExpr(a = eval("f = function foo() { return 1; }"), b = f()) { return [a(), b, f()]; }
assert.areEqual([1, 1, 1], funcExpr(), "Declaring a function inside an eval works at default parameter scope");
assert.throws(function () { eval("function foo(a = eval('b'), b) {}; foo();"); }, ReferenceError, "Future default references using eval are not allowed", "Use before declaration");
}
},
];
testRunner.runTests(tests, { verbose: WScript.Arguments[0] != "summary" });
|
//-------------------------------------------------------------------------------------------------------
// Copyright (C) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.txt file in the project root for full license information.
//-------------------------------------------------------------------------------------------------------
WScript.LoadScriptFile("..\\UnitTestFramework\\UnitTestFramework.js");
var tests = [
{
name: "Split parameter scope in function definition",
body: function () {
function f1(a = 10, b = function () { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f1()(), "Function defined in the param scope captures the formals from the param scope not body scope");
function f2(a = 10, b = function () { return a; }, c = b() + a) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
assert.areEqual(20, c, "Initial value of the third parameter in the body scope should be twice the value of the first parameter");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f2()(), "Function defined in the param scope captures the formals from the param scope not body scope");
function f3(a = 10, b = function () { return a; }) {
assert.areEqual(1, a, "Initial value of parameter in the body scope should be the same as the one passed in");
var a = 20;
assert.areEqual(20, a, "Assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(f3(1)(), 1, "Function defined in the param scope captures the formals from the param scope even when the default expression is not applied for that param");
(function (a = 10, b = a += 10, c = function () { return a + b; }) {
assert.areEqual(20, a, "Initial value of parameter in the body scope should be same as the corresponding symbol's final value in the param scope");
var a2 = 40;
(function () { assert.areEqual(40, a2, "Symbols defined in the body scope should be unaffected by the duplicate formal symbols"); })();
assert.areEqual(40, c(), "Function defined in param scope uses the formals from param scope even when executed inside the body");
})();
(function (a = 10, b = function () { assert.areEqual(10, a, "Function defined in the param scope captures the formals from the param scope when executed from the param scope"); }, c = b()) {
})();
function f4(a = 10, b = function () { return a; }) {
a = 20;
return b;
}
assert.areEqual(10, f4()(), "Even if the formals are not redeclared in the function body the symbol in the param scope and body scope are different");
function f5(a = 10, b = function () { return function () { return a; }; }) {
var a = 20;
return b;
}
assert.areEqual(10, f5()()(), "Parameter scope works fine with nested functions");
var a1 = 10;
function f6(a, b = function () { a; return a1; }) {
assert.areEqual(undefined, a1, "Inside the function body the assignment hasn't happened yet");
var a1 = 20;
assert.areEqual(20, a1, "Assignment to the symbol inside the function changes the value");
return b;
}
assert.areEqual(10, f6()(), "Function in the param scope correctly binds to the outer variable");
function f7(a = 10, b = { iFnc () { return a; } }) {
a = 20;
return b;
}
assert.areEqual(10, f7().iFnc(), "Function definition inside the object literal should capture the formal from the param scope");
var f8 = function (a, b = ((function() { assert.areEqual('string1', a, "First arguemnt receives the right value"); })(), 1), c) {
var d = 'string3';
(function () { assert.areEqual('string3', d, "Var declaration in the body is initialized properly"); })();
return c;
};
assert.areEqual('string2', f8('string1', undefined, 'string2'), "Function returns the third argument properly");
function f9() {
var f10 = function (a = function () { c; }, b, c) {
assert.areEqual(1, c, "Third argument is properly populated");
arguments;
function f11() {};
};
f10(undefined, undefined, 1);
}
f9();
f9();
function f12() {
var result = ((a = (w = a => a * a) => w) => a)()()(10);
assert.areEqual(100, result, "The inner lambda function properly maps to the right symbol for a");
};
f12();
}
},
{
name: "Split parameter scope and function expressions with name",
body: function () {
function f1(a = 10, b = function c() { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope of the method should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope of the method updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f1()(), "Function expression defined in the param scope captures the formals from the param scope not body scope");
function f2(a = 10, b = function c(recurse = true) { return recurse ? c(false) : a; }) {
return b;
}
assert.areEqual(10, f2()(), "Recursive function expression defined in the param scope captures the formals from the param scope not body scope");
assert.areEqual(10, f2()(), "Recursive function expression defined in the param scope captures the formals from the param scope not body scope");
var f3 = function f4 (a = function ( ) { b; return f4(20); }, b) {
if (a == 20) {
return 10;
}
return a;
}
assert.areEqual(10, f3()(), "Recursive call to the function from the param scope returns the right value");
var f5 = function f6 (a = function ( ) { b; return f6; }, b) {
if (a == 20) {
return 10;
}
return a;
}
assert.areEqual(10, f5()()(20), "Recursive call to the function from the param scope returns the right value");
var f7 = function f8 (a = function ( ) { b; }, b) {
if (a == 20) {
return 10;
}
var a = function () { return f8(20); };
return a;
}
assert.areEqual(10, f7()(), "Recursive call to the function from the body scope returns the right value");
var f9 = function f10 (a = function ( ) { b; return f10(20); }, b) {
eval("");
if (a == 20) {
return 10;
}
return a;
}
assert.areEqual(10, f9()(), "Recursive call to the function from the param scope returns the right value when eval is there in the body");
var f11 = function f12 (a = function ( ) { b; }, b) {
eval("");
if (a == 20) {
return 10;
}
var a = function () { return f12(20); };
return a;
}
assert.areEqual(10, f11()(), "Recursive call to the function from the body scope returns the right value when eval is there in the body");
function f13() {
var a = function jnvgfg(sfgnmj = function ccunlk() { jnvgfg(undefined, 1); }, b) {
if (b) {
assert.areEqual(undefined, jnvgfg, "This refers to the instance in the body and the value of the function expression is not copied over");
}
var jnvgfg = 10;
if (!b) {
sfgnmj();
return 100;
}
};
assert.areEqual(100, a(), "After the recursion the right value is returned by the split scoped function");
};
f13();
}
},
{
name: "Split parameter scope in member functions",
body: function () {
var o1 = {
f(a = 10, b = function () { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope of the method should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope of the method updates the variable's value in body scope");
return b;
}
}
assert.areEqual(o1.f()(), 10, "Function defined in the param scope of the object method captures the formals from the param scope not body scope");
var o2 = {
f1(a = 10, b = function () { return { f2 () { return a; } } }) {
var a = 20;
c = function () { return { f2 () { return a; } } };
return [b, c];
}
}
var result = o2.f1();
assert.areEqual(10, result[0]().f2(), "Short hand method defined in the param scope of the object method captures the formals from the param scope not body scope");
assert.areEqual(20, result[1]().f2(), "Short hand method defined in the param scope of the object method captures the formals from the param scope not body scope");
}
},
{
name: "Arrow functions in split param scope",
body: function () {
function f1(a = 10, b = () => { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f1()(), "Arrow functions defined in the param scope captures the formals from the param scope not body scope");
function f2(a = 10, b = () => { return a; }) {
assert.areEqual(1, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(1, f2(1)(), "Arrow functions defined in the param scope captures the formals from the param scope not body scope even when value is passed");
function f3(a = 10, b = () => a) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
return b;
}
assert.areEqual(10, f3()(), "Arrow functions with concise body defined in the param scope captures the formals from the param scope not body scope");
((a = 10, b = a += 10, c = () => { assert.areEqual(20, a, "Value of the first formal inside the lambda should be same as the default value"); return a + b; }, d = c() * 10) => {
assert.areEqual(d, 400, "Initial value of the formal parameter inside the body should be the same as final value from the param scope");
})();
function f4(a = 10, b = () => { return () => a; }) {
a = 20;
return b;
}
assert.areEqual(10, f4()()(), "Nested lambda should capture the formal param value from the param scope");
assert.throws(function f4(a = () => x) { var x = 1; a(); }, ReferenceError, "Lambdas in param scope shouldn't be able to access the variables from body", "'x' is undefined");
assert.throws(function f5() { (function (a = () => x) { var x = 1; return a; })()(); }, ReferenceError, "Lambdas in param scope shouldn't be able to access the variables from body", "'x' is undefined");
assert.throws((a = () => 10, b = a() + c, c = 10) => {}, ReferenceError, "Formals defined to the right shouldn't be usable in lambdas", "Use before declaration");
}
},
{
name: "Split parameter scope with Rest",
body: function () {
var arr = [2, 3, 4];
function f1(a = 10, b = function () { return a; }, ...c) {
assert.areEqual(arr.length, c.length, "Rest parameter should contain the same number of elements as the spread arg");
for (i = 0; i < arr.length; i++) {
assert.areEqual(arr[i], c[i], "Elements in the rest and the spread should be in the same order");
}
return b;
}
assert.areEqual(f1(undefined, undefined, ...arr)(), 10, "Presence of rest parameter shouldn't affect the binding");
((a = 10, b = () => a, ...c) => {
assert.areEqual(arr.length, c.length, "Rest parameter should contain the same number of elements as the spread arg");
for (i = 0; i < arr.length; i++) {
assert.areEqual(arr[i], c[i], "Elements in the rest and the spread should be in the same order");
}
return b;
})(undefined, undefined, ...arr);
}
},
{
name: "Split parameter scope with this",
body: function () {
function f1(a = this.x, b = function() { assert.areEqual(100, this.x, "this object for the function in param scope is passed from the final call site"); return a; }) {
assert.areEqual(10, this.x, "this objects property retains the value from param scope");
a = 20;
return b;
}
assert.areEqual(10, f1.call({x : 10}).call({x : 100}), "Arrow functions defined in the param scope captures the formals from the param scope not body scope");
(function (a = this.x, b = function() {this.x = 20; return a;}) {
assert.areEqual(10, this.x, "this objects property retains the value in param scope before the inner function call");
b.call(this);
assert.areEqual(20, this.x, "Update to a this's property from the param scope is reflected in the body scope");
}).call({x : 10});
this.x = 10;
((a = this.x, b = function() { a; this.x = 20; }) => {
assert.areEqual(10, this.x, "this objects property retains the value in param scope before the inner function call in lambda");
b.call(this);
assert.areEqual(20, this.x, "Update to a this's property from the param scope of lambda function is reflected in the body scope");
})();
function f2(a = function() { return this.x; }, b = this.y, c = a.call({x : 20}) + b) {
assert.areEqual(undefined, this.x, "this object remains unaffected");
return c;
}
assert.areEqual(30, f2.call({y : 10}), "Properties are accessed from the right this object");
var thisObj = {x : 1, y : 20 };
function f3(a, b = () => { a; this.x = 10; return this.y; }) {
assert.areEqual(1, this.x, "Assignment from the param scope has not happened yet");
assert.areEqual(20, this.y, "y property of the this object is not affected");
return b;
}
assert.areEqual(20, f3.call(thisObj)(), "Lambda defined in the param scope returns the right property value from thisObj");
assert.areEqual(10, thisObj.x, "Assignment from the param scope method updates thisObj's property");
function f4(a, b = () => { a; return this; }) {
return b;
}
assert.areEqual(thisObj, f4.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
var thisObj = { x : 1 };
function f5() {
return (a = this, b = function() { return a; }) => b;
}
assert.areEqual(thisObj, f5.call(thisObj)()(), "This object is returned properly from the inner lambda method's child function");
function f6(a, b = function () { return a; }) {
return (a = this, b = function() { return a; }) => b;
}
assert.areEqual(thisObj, f6.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a split scoped function");
function f7(a, b = function () { return a; }) {
function f8() {
return (a = this, b = function() { return a; }) => b;
}
return f8.call(this);
}
assert.areEqual(thisObj, f7.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a nested split scoped function");
function f9(a, b = function () { return a; }) {
function f10(c, d = function () { c; }) {
return (a = this, b = function() { return a; }) => b;
}
return f10.call(this);
}
assert.areEqual(thisObj, f9.call(thisObj)()(), "This object is returned properly from the inner lambda defnied inside a double nested split scoped function");
function f11(a = this.x * 10, b = () => { a; return this; }) {
assert.areEqual(10, a, "this should be accessible in the parameter scope");
assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
assert.isTrue(eval("thisObj == this"), "Eval should be able to access the this object properly");
return b;
}
assert.areEqual(thisObj, f11.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
function f12(a = this.x * 10, b = () => { a; return this; }) {
var c = 100;
assert.areEqual(10, a, "this should be accessible in the parameter scope");
assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
assert.isTrue(eval("thisObj == this"), "Eval should be able to access the this object properly");
assert.areEqual(thisObj, (() => this)(), "Lambda should capture the this object from body properly");
assert.areEqual(100, c, "Body variable should be unaffected by the slot allocation of this object");
return b;
}
assert.areEqual(thisObj, f12.call(thisObj)(), "Lambda defined in the param scope returns the right this object");
function f13(a = 10, b = () => { a; return this; }) {
var c = 100;
assert.areEqual(thisObj, this, "Body scope should get the right value for this object");
var d = () => this;
this.x = 5;
assert.isTrue(eval("this.x == 5"), "Eval should be able to access the this object properly after the field is updated");
assert.isTrue(eval("d().x == 5"), "Lambda should capture the this symbol from the body properly");
assert.isTrue(eval("a == 10"), "Eval should be able to access the first parameter properly");
assert.isTrue(eval("b().x == 5"), "Lambda from the param scope should capture the this symbol properly");
assert.isTrue(eval("d().x == 5"), "Lambda should capture the this symbol from the body properly");
return b;
}
assert.areEqual(5, f13.call(thisObj)().x, "Lambda defined in the param scope returns the same this object as the one in body");
}
},
{
name: "Split parameter scope and class",
body: function () {
class c {
f(a = 10, d, b = function () { return a; }, c) {
assert.areEqual(10, a, "Initial value of parameter in the body scope in class method should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "Assignment in the class method body updates the value of the variable");
return b;
}
}
assert.areEqual(10, (new c()).f()(), "Method defined in the param scope of the class should capture the formal from the param scope itself");
function f1(a = 10, d, b = class { method1() { return a; } }, c) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return b;
}
var result = f1();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined in param scope should capture the formals form that param scope itself");
class c2 {
f1(a = 10, d, b = function () { a = this.f2(); return a; }, c) {
assert.areEqual(30, this.f2(), "this object in the body points to the right this object");
return b;
};
f2() {
return 30;
}
}
var f2Obj = new c2();
assert.areEqual(100, f2Obj.f1().call({f2() { return 100; }}), "Method defined in the param uses its own this object while updating the formal");
function f2(a = 10, d, b = class { method1() { return class { method2() { return a; }} } }, c) {
a = 20;
return b;
}
var obj1 = f2();
var obj2 = (new obj1()).method1();
assert.areEqual(10, (new obj2()).method2(), "Nested class definition in the param scope should capture the formals from the param scope");
var actualArray = [2, 3, 4];
class c3 {
f(a = 10, b = () => { return c; }, ...c) {
assert.areEqual(actualArray.length, c.length, "Rest param and the actual array should have the same length");
for (var i = 0; i < c.length; i++) {
assert.areEqual(actualArray[i], c[i], "Rest parameter should have the same value as the actual array");
}
c = [];
return b;
}
}
result = (new c3()).f(undefined, undefined, ...[2, 3, 4])();
assert.areEqual(actualArray.length, result.length, "The result and the actual array should have the same length");
for (var i = 0; i < result.length; i++) {
assert.areEqual(actualArray[i], result[i], "The result array should have the same value as the actual array");
}
class c4 {
f({x:x = 10, y:y = () => { return x; }}) {
assert.areEqual(10, x, "Initial value of destructure parameter in the body scope in class method should be the same as the one in param scope");
x = 20;
assert.areEqual(20, x, "Assignment in the class method body updates the value of the variable");
return y;
}
}
assert.areEqual(10, (new c4()).f({})(), "The method defined as the default destructured value of the parameter should capture the formal from the param scope");
function f3(a = 10, d, b = (function () { return a; }, class { method1() { return a; } }), c) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return b;
}
result = f3();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after another function definition, in the param scope should capture the formals form that param scope itself");
function f4(a = 10, d, b = (function () { return a; }, class {}, class { method1() { return a; } }), c) {
var a = 20;
return b;
}
result = f4();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after another class definition, in the param scope should capture the formals form that param scope itself");
function f5(a = 10, d, b = (function () { return a; }, class {}, function () {}, class { method1() { return a; } }), c) {
var a = 20;
return b;
}
result = f5();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after a function and class, in the param scope should capture the formals form that param scope itself");
function f6(a = 10, d, b = (function () { return a; }, class {}, function (a, b = () => a) {}, class { method1() { return a; } }), c) {
var a = 20;
return b;
}
result = f6();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class defined, after a split scope function, in the param scope should capture the formals form that param scope itself");
function f7(a = 10, d, b = (function () { return a; }, class c1 { method1() { return a; } }), c) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return b;
}
result = f7();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class with name defined, after another function definition, in the param scope should capture the formals form that param scope itself");
function f8(a = 10, d, b = class c1 { method1() { return a; } }, c = (function () { return a; }, class c2 extends b { method2() { return a * a; } })) {
var a = 20;
assert.areEqual(10, (new b()).method1(), "Class method defined within the param scope should capture the formal from the param scope");
return c;
}
result = f8();
assert.areEqual(10, (new result()).method1(), "Methods defined in a class extending another class defined, after another function definition, in the param scope should capture the formals form that param scope itself");
assert.areEqual(100, (new result()).method2(), "Method in the derived class returns the right value");
}
},
{
name: "Split parameter scope in generator methods",
body: function () {
function *f1(a = 10, d, b = function () { return a; }, c) {
yield a;
var a = 20;
yield a;
yield b;
}
var f1Obj = f1();
assert.areEqual(10, f1Obj.next().value, "Initial value of the parameter in the body scope should be the same as the final value of the parameter in param scope");
assert.areEqual(20, f1Obj.next().value, "Assignment in the body scope updates the variable's value");
assert.areEqual(10, f1Obj.next().value(), "Function defined in the param scope captures the formal from the param scope itself");
function *f2(a = 10, d, b = function () { return a; }, c) {
yield a;
a = 20;
yield a;
yield b;
}
var f2Obj = f2();
assert.areEqual(10, f2Obj.next().value, "Initial value of the parameter in the body scope should be the same as the final value of the parameter in param scope");
assert.areEqual(20, f2Obj.next().value, "Assignment in the body scope updates the variable's value");
assert.areEqual(10, f2Obj.next().value(), "Function defined in the param scope captures the formal from the param scope itself even if it is not redeclared in the body");
function *f3(a = 10, d, b = function *() { yield a + c; }, c = 100) {
a = 20;
yield a;
yield b;
}
var f3Obj = f3();
assert.areEqual(20, f3Obj.next().value, "Assignment in the body scope updates the variable's value");
assert.areEqual(110, f3Obj.next().value().next().value, "Function defined in the param scope captures the formals from the param scope");
function *f4(a = 10, d, b = function *() { yield a; }, c) {
var a = 20;
yield function *() { yield a; };
yield b;
}
var f4Obj = f4();
assert.areEqual(20, f4Obj.next().value().next().value, "Generator defined inside the body captures the symbol from the body scope");
assert.areEqual(10, f4Obj.next().value().next().value, "Function defined in the param scope captures the formal from param scope even if it is captured in the body scope");
}
},
{
name: "Split parameter scope with destructuring",
body: function () {
function f1( {a:a1, b:b1}, c = function() { return a1 + b1; } ) {
assert.areEqual(10, a1, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
assert.areEqual(20, b1, "Initial value of the second destructuring parameter in the body scope should be the same as the one in param scope");
a1 = 1;
b1 = 2;
assert.areEqual(1, a1, "New assignment in the body scope updates the first formal's value in body scope");
assert.areEqual(2, b1, "New assignment in the body scope updates the second formal's value in body scope");
assert.areEqual(30, c(), "The param scope method should return the sum of the destructured formals from the param scope");
return c;
}
assert.areEqual(30, f1({ a : 10, b : 20 })(), "Returned method should return the sum of the destructured formals from the param scope");
function f2({x:x = 10, y:y = function () { return x; }}) {
assert.areEqual(10, x, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
x = 20;
assert.areEqual(20, x, "Assignment in the body updates the formal's value");
return y;
}
assert.areEqual(10, f2({ })(), "Returned method should return the value of the destructured formal from the param scope");
function f3({y:y = function () { return x; }, x:x = 10}) {
assert.areEqual(10, x, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
x = 20;
assert.areEqual(20, x, "Assignment in the body updates the formal's value");
return y;
}
assert.areEqual(10, f3({ })(), "Returned method should return the value of the destructured formal from the param scope even if declared after");
(({x:x = 10, y:y = function () { return x; }}) => {
assert.areEqual(10, x, "Initial value of the first destructuring parameter in the body scope should be the same as the one in param scope");
x = 20;
assert.areEqual(10, y(), "Assignment in the body does not affect the formal captured from the param scope");
})({});
}
},
{
name: "Nested split scopes",
body: function () {
function f1(a = 10, b = function () { return a; }, c) {
function iFnc(d = 100, e = 200, pf1 = function () { return d + e; }) {
d = 1000;
e = 2000;
pf2 = function () { return d + e; };
return [pf1, pf2];
}
return [b].concat(iFnc());
}
var result = f1();
assert.areEqual(10, result[0](), "Function defined in the param scope of the outer function should capture the symbols from its own param scope");
assert.areEqual(300, result[1](), "Function defined in the param scope of the inner function should capture the symbols from its own param scope");
assert.areEqual(3000, result[2](), "Function defined in the body scope of the inner function should capture the symbols from its body scope");
function f2(a = 10, b = function () { return a; }, c) {
a = 1000;
c = 2000;
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc()];
}
result = f2();
assert.areEqual(10, result[0](), "Function defined in the param scope of the outer function should capture the symbols from its own param scope even if formals are with the same name in inner function");
assert.areEqual(300, result[1](), "Function defined in the param scope of the inner function should capture the symbols from its own param scope if formals are with the same name in the outer function");
function f3(a = 10, b = function () { return a; }, c) {
a = 1000;
c = 2000;
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc];
}
assert.areEqual(300, f3()[1]()(), "Function defined in the param scope of the inner function should capture the right formals even if the inner function is executed outside");
function f4(a = 10, b = function () { return a; }, c) {
a = 1000;
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc(undefined, b, c)];
}
result = f4(1, undefined, 3);
assert.areEqual(1, result[0](), "Function defined in the param scope of the outer function correctly captures the passed in value for the formal");
assert.areEqual(1, result[1](), "Function defined in the param scope of the inner function is replaced by the function definition from the param scope of the outer function");
function f5(a = 10, b = function () { return a; }, c) {
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [b, iFnc(a, undefined, c)];
}
result = f5(1, undefined, 3);
assert.areEqual(1, result[0](), "Function defined in the param scope of the outer function correctly captures the passed in value for the formal");
assert.areEqual(4, result[1](), "Function defined in the param scope of the inner function captures the passed values for the formals");
function f6(a , b, c) {
function iFnc(a = 1, b = function () { return a + c; }, c = 2) {
a = 10;
c = 20;
return b;
}
return iFnc;
}
assert.areEqual(3, f6()()(), "Function defined in the param scope captures the formals when defined inside another method without split scope");
function f7(a = 10 , b = 20, c = function () { return a + b; }) {
return (function () {
function iFnc(a = 100, b = function () { return a + c; }, c = 200) {
a = 1000;
c = 2000;
return b;
}
return [c, iFnc];
})();
}
result = f7();
assert.areEqual(30, result[0](), "Function defined in the param scope of the outer function should capture the symbols from its own param scope even in nested case");
assert.areEqual(300, result[1]()(), "Function defined in the param scope of the inner function should capture the symbols from its own param scope even when nested inside a normal method and a split scope");
function f8(a = 1, b = function (d = 10, e = function () { return a + d; }) { assert.areEqual(d, 10, "Split scope function defined in param scope should capture the right formal value"); d = 20; return e; }, c) {
a = 2;
return b;
}
assert.areEqual(11, f8()()(), "Split scope function defined within the param scope should capture the formals from the corresponding param scopes");
function f9(a = 1, b = function () { return function (d = 10, e = function () { return a + d; }) { d = 20; return e; } }, c) {
a = 2;
return b;
}
assert.areEqual(11, f9()()()(), "Split scope function defined within the param scope should capture the formals from the corresponding param scope in nested scope");
}
},
{
name: "Split scope with symbol shadowing",
body: function () {
function f1(a = 10, b = function () { return a; }) {
assert.areEqual(100, a(), "Function definition inside the body is hoisted");
function a () {
return 100;
}
return b;
}
assert.areEqual(10, f1()(), "Function definition in the param scope captures the symbol from the param scope");
function f2(a = 10, b = function () { return a; }, c = b) {
a = 20;
assert.areEqual(20, b(), "Function definition in the body scope captures the body symbol");
function b() {
return a;
}
return [c, b];
}
var result = f2();
assert.areEqual(10, result[0](), "Function definition in the param scope captures the param scope symbol");
assert.areEqual(20, result[1](), "Function definition in the body captures the body symbol");
var g = 1;
function f3(a = 10, b = function () { a; return g;}) {
assert.areEqual(10, g(), "Function definition inside the body is unaffected by the outer variable");
function g() {
return 10;
}
return b;
}
assert.areEqual(1, f3()(), "Function definition in the param scope captures the outer scoped var");
function f4(a = x1, b = function g() {
a;
return function h() {
assert.areEqual(10, x1, "x1 is captured from the outer scope");
};
}) {
var x1 = 100;
b()();
};
var x1 = 10;
f4();
var x2 = 1;
function f5(a = x2, b = function() { a; return x2; }) {
{
function x2() {
}
}
var x2 = 2;
return b;
}
assert.areEqual(1, f5()(), "Symbol capture at the param scope is unaffected by the inner definitions");
var x3 = 1;
function f6(a = x3, b = function(_x) { a; return x3; }) {
var x3 = 2;
return b;
}
assert.areEqual(1, f6()(), "Symbol capture at the param scope is unaffected by other references in the body and param");
}
},
{
name : "Split scope and arguments symbol",
body : function () {
assert.throws(function () { eval("function f(a = arguments, b = () => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f1() { function f2(a = arguments, b = () => a) { } }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope inside another function", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = arguments, b = () => a, c = eval('')) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with eval", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = arguments = [1, 2], b = () => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = () => a, c = arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = () => a, c = a = arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list with split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, b = () => { a; arguments}) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list when captured in lambda method", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = (c = arguments) => a) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list when captured in a lambda in split scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, b = () => a, c = () => { return arguments; }) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list in split scope when captured by a lambda method", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 10, b = () => a, c = () => () => arguments) { }"); }, SyntaxError, "Use of arguments symbol is not allowed in non-simple parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, arguments = function () { return a; } ) { }"); }, SyntaxError, "Use of arguments as a parameter name is not allowed in non-simple parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f({a, arguments = function () { return a; }}) { }"); }, SyntaxError, "Use of arguments as a parameter name is not allowed in destructuring parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f({a = arguments}, b = function () { return a; } ) { }"); }, SyntaxError, "Use of arguments is not allowed in destructuring parameter list in split scope when captured by nested lambda", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments[0]) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = 1, b = () => arguments[0]) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope at any position", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments[0] + b, b = 10) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope at any position", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments) { var arguments }"); }, SyntaxError, "Arguments cannot be captured in the param scope even when duplicate definition occurs in the body", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a = () => arguments) { function arguments() { } }"); }, SyntaxError, "Arguments cannot be captured in the param scope even when duplicate definition occurs in the body", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(arguments, b = () => arguments) { }"); }, SyntaxError, "Arguments cannot be captured in the param scope even if it is a formal shadowing the actual arguments", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f({a, arguments}, b = () => a) { }"); }, SyntaxError, "Arguments cannot be used as a formal name when one of the formal is captured", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
assert.throws(function () { eval("function f(a, {arguments, b = () => arguments}) { }"); }, SyntaxError, "Arguments cannot be used as a formal name when one of the formal is captured", "Use of 'arguments' in non-simple parameter list is not supported when one of the formals is captured");
function f1(a, b = () => a) {
eval("");
b = () => { return arguments; };
assert.areEqual(1, arguments[0], "Arguments object receives the first parameter properly");
assert.areEqual(1, b()[0], "First argument receives the right value passed in");
assert.areEqual(undefined, b()[1], "Second argument receives the right value passed in");
assert.areEqual(2, arguments.length, "Arguments should have only two elements in it");
}
f1(1, undefined);
function f2(a, b = () => { return a; }) {
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
(() => { arguments = [3, 4]; a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f2(1, undefined, 2)(), "Param scope method properly captures the first parameter");
function f3(a, b = () => { return a; }) {
eval("");
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
(() => { arguments = [3, 4]; a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f3(1, undefined, 2)(), "Param scope method properly captures the first parameter, with eval in the body");
function f4(a, b = function () { a; } ) {
var c = 10;
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
eval("");
}
f4(1);
function f5(a, b = function () { a; } ) {
var c = 10;
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
eval("");
}
f5(1);
function f6(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
}
f6(1);
function f7(a, b = function () { a; } ) {
assert.areEqual(5, arguments(), "Function definition is hoisted");
function arguments() { return 5; }
}
f7(1);
function f8(a, b = function () { a; } ) {
assert.areEqual(5, arguments(), "Function definition is hoisted");
function arguments() { return 5; }
eval("");
}
f8(1);
function f9(a, b = function () { a; } ) {
assert.areEqual(1, eval("a"), "Eval should be able to access the first argument properly");
assert.areEqual(1, eval("arguments[0]"), "Eval should be able to access the first argument properly from arguments object");
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual(100, eval("arguments"), "Updated value of arguments is visible in eval");
assert.areEqual(1, eval("a"), "First argument remains unchanged after the arguments are updated");
}
f9(1);
function f10(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
}
f10(1);
function f11(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
eval("");
}
f11(1);
function f12(a, b = function () { a; } ) {
assert.areEqual(1, arguments[0], "Arguments symbol properly receives the passed in values");
b = () => arguments;
assert.areEqual(1, b()[0], "Lambda captures the right arguments symbol");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual(100, b(), "Lambda now gives the updated value");
eval("");
}
f12(1);
function f13(a, b = () => { return a; }) {
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
((c = arguments = [3, 4]) => { a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f13(1, undefined, 2)(), "Param scope method properly captures the first parameter");
function f14(a, b = () => { return a; }) {
eval("");
a = 10;
assert.areEqual(1, arguments[0], "First argument is properly received");
assert.areEqual(2, arguments[2], "Third argument is properly received");
assert.areEqual(3, arguments.length, "Only three arguments are passed in");
((c = arguments = [3, 4]) => { a; })();
assert.areEqual(3, arguments[0], "Arguments symbol is updated with the new value when the lambda is executed");
assert.areEqual(4, arguments[1], "New array is properly assigned to arguments symbol");
assert.areEqual(2, arguments.length, "New array has only elements");
return b;
}
assert.areEqual(1, f14(1, undefined, 2)(), "Param scope method properly captures the first parameter, with eval in the body");
function f15(a, b = function () { a; }, ...c) {
assert.areEqual(1, arguments[0], "Checking first argument");
assert.areEqual(undefined, arguments[1], "Checking second argument");
assert.areEqual(2, arguments[2], "Checking third argument");
assert.areEqual(3, arguments[3], "Checking fourth argument");
assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual([2, 3], c, "Rest should remain unaffected when arguments is updated");
eval("");
}
f15(1, undefined, 2, 3);
var f16 = function f17(a, b = function () { a; }, ...c) {
if (a === 1) {
assert.areEqual(1, arguments[0], "Checking first argument");
assert.areEqual(undefined, arguments[1], "Checking second argument");
assert.areEqual(2, arguments[2], "Checking third argument");
assert.areEqual(3, arguments[3], "Checking fourth argument");
assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
return f17(undefined, undefined, ...c);
} else {
assert.areEqual(undefined, arguments[0], "Checking first argument on the recursive call");
assert.areEqual(undefined, arguments[1], "Checking second argument on the recursive call");
assert.areEqual(2, arguments[2], "Checking third argument on the recursive call");
assert.areEqual(3, arguments[3], "Checking fourth argument on the recursive call");
assert.areEqual([2, 3], c, "Rest argument should get the trailing parameters properly");
var arguments = 100;
assert.areEqual(100, arguments, "Arguments is updated after the assignment");
assert.areEqual([2, 3], c, "Rest should remain unaffected when arguments is updated");
return eval("c");
}
}
assert.areEqual([2, 3], f16(1, undefined, 2, 3), "Rest should remain unaffected when arguments is updated");
function f18(a, b = function arguments(c) {
if (!c) {
return arguments.callee(a, 10, 20);
}
return arguments;
}) {
assert.areEqual(10, b()[1], "Function defined in the param scope can be called recursively");
assert.areEqual(1, arguments[0], "Arguments symbol is unaffected by the function expression");
}
f18(1);
function f19(a, b = arguments) {
var c = function arguments(c) {
if (!arguments.length) {
return arguments.callee(a, 10, 20, 30);
}
return arguments;
}
assert.areEqual(30, c()[3], "In the function body the arguments function expression with name is not visible");
assert.areEqual(1, b[0], "In the param scope arguments symbol referes to the passed in values");
}
f19(1, undefined, 2, 3, 4);
function f20(a, b = function arguments(c) {
if (!c) {
return arguments.callee(a, 10, 20);
}
return eval("arguments");
}) {
assert.areEqual(1, b()[0], "Function defined in the param scope can be called recursively when eval occurs in its body");
assert.areEqual(1, arguments[0], "Arguments symbol is unaffected by the function expression");
}
f20(1);
function f21(a, b = arguments) {
var c = function arguments(c) {
if (!arguments.length) {
return arguments.callee(a, 10, 20, 30);
}
return arguments;
}
assert.areEqual(30, c()[3], "In the function body the arguments function expression with name is not visible when eval is there in the body");
assert.areEqual(3, eval("b[3]"), "In the param scope arguments symbol referes to the passed in values");
}
f21(1, undefined, 2, 3, 4);
function f22(a, b = () => a) {
assert.areEqual(1, arguments[0], "Function in block causes a var declaration to be hoisted and the initial value should be same as the arguments symbol");
{
{
function arguments() {
return 10;
}
}
}
assert.areEqual(1, b(), "Function defined in the param scope should be able to capture the formal even when arguments in overwritten the body");
assert.areEqual(10, arguments(), "Hoisted var binding is updated after the block is exected");
}
f22(1);
function f23(a, b = () => a) {
function f16() {
eval("");
this.arguments = 1;
}
a = 10;
var obj = new f16();
function arguments() {
return 10;
}
assert.areEqual(1, obj.arguments, "Inner function with eval should add the property named arguments when duplicate arguments definition occurs in the parent body");
assert.areEqual(1, b(), "Formal captured from the param scope should be constrained to the param scope");
};
f23(1);
}
},
{
name: "Split scope and super call",
body: function () {
class c1 {
constructor() {
return { x : 1 };
}
};
class c2 extends c1 {
constructor(a = 1, b = () => { assert.areEqual(1, super().x, "Super is accessible in the param scope"); return a; }) {
var c = 10;
a = 20;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
return {};
}
}
new c2();
class c3 extends c1 {
constructor(a = 1, b = () => { return a; }) {
(() => assert.areEqual(1, super().x, "Lambda should be able to access the super method properly in the body"))();
a = 10;
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
new c3();
class c4 extends c1 {
constructor(a = 1, b = () => { return a; }) {
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
assert.areEqual(1, eval("super().x"), "Eval should be able to access the super property properly");
}
}
new c4();
class c5 extends c1 {
constructor(a = super().x, b = () => { return a; }) {
assert.areEqual(1, a, "First formal calls the super from the param scope");
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
new c5();
}
},
{
name: "Split scope and super property",
body: function () {
class c1 {
foo () {
return 1;
}
};
class c2 extends c1 {
foo(a = 1, b = () => { assert.areEqual(1, super.foo(), "Super property access works fine from a lambda defined in the param scope"); return a; }) {
a = 20;
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
(new c2()).foo();
class c3 extends c1 {
foo(a = 1, b = () => { return a; }) {
var c = 10;
a = 20;
(() => assert.areEqual(1, super.foo(), "Super property access works fine from a lambda defined in the body scope"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
(new c3()).foo();
class c4 extends c1 {
foo(a = 1, b = () => { return a; }) {
var c = 10;
a = 20;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
assert.areEqual(1, eval("super.foo()"), "Eval should be able to access the super property properly from the body scope");
}
}
(new c4()).foo();
class c5 extends c1 {
foo(a = super.foo(), b = () => { return a; }) {
assert.areEqual(1, a, "First formal uses the super property from the param scope");
var c = 10;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
a = 20;
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
(new c5()).foo();
}
},
{
name: "Split scope and new.target",
body: function () {
class c1 {
constructor(newTarget) {
assert.isTrue(newTarget == new.target, "Base class should receive the right value for new.target");
}
};
class c2 extends c1 {
constructor(a = 1, b = () => { assert.isTrue(new.target == c2, "new.target should have the derived class value in the param scope"); return a; }) {
super(c2);
var c = 10;
a = 20;
(() => assert.areEqual(10, c, "Allocation of scope slot for super property shouldn't affect the body variables"))();
assert.areEqual(1, b(), "Function defined in the param scope should capture the formal");
}
}
new c2();
class c3 extends c1 {
constructor(a = 1, b = () => { return a; }) {
super(c3);
var c = 10;
(() => assert.isTrue(new.target == c3, "new.target should be the derived class in the body scope when captured by lambda"))();
assert.isTrue(new.target == c3, "new.target should be the derived class in the body scope");
}
}
new c3();
class c4 extends c1 {
constructor(a = 1, b = () => { return a; }) {
super(c4);
assert.isTrue(eval("new.target == c4"), "new.target should be the derived class inside eval");
assert.isTrue(new.target == c4, "new.target should be the derived class in the body scope");
}
}
new c4();
class c5 extends c1 {
constructor(a = new.target, b = () => { return a; }) {
super(c5);
assert.isTrue(a == c5, "new.target accessed from the param scope should work fine");
}
}
new c5();
}
},
{
name: "Split parameter scope and eval",
body: function () {
function g() {
return 3 * 3;
}
function f1(h = () => eval("g()")) {
assert.areEqual(6, g(), "Right method is called in the body scope");
function g() {
return 2 * 3;
}
return h();
}
assert.areEqual(9, f1(), "Paramater scope remains split");
function f2(h = () => eval("g()")) {
assert.areEqual(6, eval("g()"), "Right method is called in the body scope");
function g() {
return 2 * 3;
}
return h();
}
assert.areEqual(9, f2(), "Paramater scope remains split");
}
},
{
name: "Split parameter scope with eval in body",
body: function () {
function f1(a = 10, b = function () { return a; }) {
assert.areEqual(10, a, "Initial value of parameter in the body scope should be the same as the one in param scope");
assert.areEqual(10, eval('a'), "Initial value of parameter in the body scope in eval should be the same as the one in param scope");
var a = 20;
assert.areEqual(20, a, "New assignment in the body scope updates the variable's value in body scope");
assert.areEqual(20, eval('a'), "New assignment in the body scope updates the variable's value when evaluated through eval in body scope");
return b;
}
assert.areEqual(10, f1()(), "Function defined in the param scope captures the formals from the param scope not body scope with eval");
function f2(a = 10, b = function () { return a; }) {
assert.areEqual(10, eval('b()'), "Eval of the function from param scope should return the right value for the formal");
var a = 20;
assert.areEqual(10, eval('b()'), "Eval of the function from param scope should return the right value for the formal even after assignment to the corresponding body symbol");
return b;
}
assert.areEqual(10, f2()(), "Function defined in the param scope captures the formals from the param scope not body scope with eval");
function f3(a = 10, b = function () { return a; }) {
assert.areEqual(100, eval('b()'), "Eval of the function from body scope should return the right value for the formal");
var a = 20;
function b () { return a * a; }
assert.areEqual(400, eval('b()'), "Eval of the function from body scope should return the right value after assignment to the corresponding body symbol");
return b;
}
assert.areEqual(400, f3()(), "Function defined in the body scope captures the symbol from the body scope with eval");
function f4 (a, b, c = function () { b; }, d = 1) {
var e = 10;
assert.areEqual(2, arguments[0], "Unmapped arguments value has the expected value in the body");
(function () {
eval('');
}());
};
f4.call(1, 2);
}
},
{
name: "Split scope and with",
body: function () {
function f1(a, b, c = function () { a; }) {
with ({}) {
var d = function () {
return 10;
};
assert.areEqual(10, d(), "With inside a split scope function should work fine");
}
}
f1();
function f2(a, b, c = function () { a; }) {
var d = function () {
return 10;
};
with ({}) {
assert.areEqual(10, d(), "With inside a split scope function should be able to access the function definition from the body");
}
}
f2();
function f3(a, b = function () { return 10; }, c = function () { a; }) {
with ({}) {
assert.areEqual(10, b(), "With inside a split scope function should be able to access the function definition from the param scope");
}
}
f3();
function f4(a, b = function () { return 10; }, c = function () { a; }) {
var d = {
e : function () { return 10; }
};
e = function () { return 100; };
with (d) {
assert.areEqual(10, e(), "With should use the function definition inside the object not the one from body");
}
}
f4();
function f5(a, b = { d : function () { return 10; } }, c = function () { a; }) {
var d = { };
with (b) {
assert.areEqual(10, d(), "With should use the function definition inside the object from the param scope not the one from body");
}
}
f5();
var v6 = 100
function f6(a, b, c = function () { a; }, e = function () { with({}) { assert.areEqual(100, v6, "With inside param scope should be able to access var from outside"); } }, f = e()) {
var v6 = { };
}
f6();
function f7(a, b, c = function () { a; }) {
with ({}) {
assert.areEqual(100, v6, "With inside body scope should be able to access var from outside");
}
}
f7();
function f8() {
function f9() {
return 1;
}
var v1 = 10;
function f10(a = 10, b = function f11() {
a;
assert.areEqual(10, v1, "Function in the param scope should be able to access the outside variable");
with ({}) {
assert.areEqual(1, f9(), "With construct inside a param scoped function should be able to execute functions from outside");
}
}) {
b();
};
f10();
}
f8();
f8();
function f12() {
function f13() {
return 1;
}
var v2 = 100;
function f14(a = 10, b = function () {
assert.areEqual(10, a, "Function in the param scope should be able to access the formal from parent");
return function () {
assert.areEqual(10, a, "Function nested in the param scope should be able to access the formal from the split scoped function");
assert.areEqual(100, v2, "Function in the param scope should be able to access the outside variable");
with ({}) {
assert.areEqual(1, f13(), "With construct inside a param scoped function should be able to execute functions from outside");
}
};
}) {
b()();
};
f14();
}
f12();
f12();
}
},
{
name: "Basic eval in parameter scope",
body: function () {
assert.areEqual(1,
function (a = eval("1")) { return a; }(),
"Eval with static constant works in parameter scope");
{
let b = 2;
assert.areEqual(2,
function (a = eval("b")) { return a; }(),
"Eval with parent var reference works in parameter scope");
}
assert.areEqual(1,
function (a, b = eval("arguments[0]")) { return b; }(1),
"Eval with arguments reference works in parameter scope");
function testSelf(a = eval("testSelf(1)")) {
return a;
}
assert.areEqual(1, testSelf(1), "Eval with reference to the current function works in parameter scope");
var testSelfExpr = function (a = eval("testSelfExpr(1)")) {
return a;
}
assert.areEqual(1, testSelfExpr(), "Eval with reference to the current function expression works in parameter scope");
{
let a = 1, b = 2, c = 3;
function testEvalRef(a = eval("a"), b = eval("b"), c = eval("c")) {
return [a, b, c];
}
assert.throws(function () { testEvalRef(); },
ReferenceError,
"Eval with reference to the current formal throws",
"Use before declaration");
function testEvalRef2(x = eval("a"), y = eval("b"), z = eval("c")) {
return [x, y, z];
}
assert.areEqual([1, 2, 3], testEvalRef2(), "Eval with references works in parameter scope");
}
function f1(a = 10, b = () => eval("a")) {
assert.areEqual(10, eval("a"), "In the body initial value of the symbol should be same as the final value from param scope");
a = 20;
assert.areEqual(20, eval("a"), "In the body after assignment the symbol value is updated");
assert.areEqual(10, b(), "Eval in the param scope captures the symbol from the param scope");
}
f1();
function f2(a = 10, b = () => eval("a")) {
a = 20;
assert.areEqual(10, b(), "Eval in the param scope captures the symbol from the param scope even when there is no eval in the body");
}
f2();
function f3(a = 10, b = function () { return eval("a"); }) {
a = 20;
assert.areEqual(10, b(), "Eval in the param scope captures the symbol from the param scope even when there is no eval in the body");
}
f3();
function f4(a = 10, b = () => eval("a"), c = a = 30) {
assert.areEqual(30, eval("a"), "In the body initial value of the symbol should be same as the final value from param scope");
a = 20;
assert.areEqual(20, eval("a"), "In the body after assignment the symbol value is updated");
assert.areEqual(30, b(), "Eval in the param scope captures the symbol from the param scope");
}
f4();
function f5(a = 10, b = () => eval("a")) {
assert.areEqual(30, eval("a"), "In the body initial value of the symbol should be same as the final value from param scope");
var a = 20;
assert.areEqual(20, eval("a"), "In the body after assignment the symbol value is updated");
assert.areEqual(30, b(), "Eval in the param scope captures the symbol from the param scope");
}
f5(30);
}
},
{
name: "Eval declarations in parameter scope",
body: function() {
// Redeclarations of formals - var
assert.throws(function () { return function (a = eval("var a = 2"), b = a) { return [a, b]; }() },
ReferenceError,
"Redeclaring the current formal using var inside an eval throws",
"Let/Const redeclaration");
assert.doesNotThrow(function () { "use strict"; return function (a = eval("var a = 2"), b = a) { return [a, b]; }() },
"Redeclaring the current formal using var inside a strict mode eval does not throw");
assert.doesNotThrow(function () { "use strict"; return function (a = eval("var a = 2"), b = a) { return [a, b]; }() },
"Redeclaring the current formal using var inside a strict mode function eval does not throw");
assert.throws(function () { function foo(a = eval("var b"), b, c = b) { return [a, b, c]; } foo(); },
ReferenceError,
"Redeclaring a future formal using var inside an eval throws",
"Let/Const redeclaration");
assert.throws(function () { function foo(a, b = eval("var a"), c = a) { return [a, b, c]; } foo(); },
ReferenceError,
"Redeclaring a previous formal using var inside an eval throws",
"Let/Const redeclaration");
// Let and const do not leak outside of an eval, so the test cases below should never throw.
// Redeclarations of formals - let
assert.doesNotThrow(function (a = eval("let a")) { return a; },
"Attempting to redeclare the current formal using let inside an eval does not leak");
assert.doesNotThrow(function (a = eval("let b"), b) { return [a, b]; },
"Attempting to redeclare a future formal using let inside an eval does not leak");
assert.doesNotThrow(function (a, b = eval("let a")) { return [a, b]; },
"Attempting to redeclare a previous formal using let inside an eval does not leak");
// Redeclarations of formals - const
assert.doesNotThrow(function (a = eval("const a = 1")) { return a; },
"Attempting to redeclare the current formal using const inside an eval does not leak");
assert.doesNotThrow(function (a = eval("const b = 1"), b) { return [a, b]; },
"Attempting to redeclare a future formal using const inside an eval does not leak");
assert.doesNotThrow(function (a, b = eval("const a = 1")) { return [a, b]; },
"Attempting to redeclare a previous formal using const inside an eval does not leak");
// Conditional declarations
function test(x = eval("var a1 = 1; let b1 = 2; const c1 = 3;")) {
// none should be visible
assert.throws(function () { a1 }, ReferenceError, "Ignoring the default value does not result in an eval declaration leaking", "'a1' is undefined");
assert.throws(function () { b1 }, ReferenceError, "Let declarations do not leak out of eval to parameter scope", "'b1' is undefined");
assert.throws(function () { c1 }, ReferenceError, "Const declarations do not leak out of eval to parameter scope when x is ", "'c1' is undefined");
}
test();
// Redefining locals
function foo(a = eval("var x = 1; assert.areEqual(1, x, 'Variable declared inside eval is accessible within eval');")) {
assert.areEqual(undefined, x, "Var declaration from eval is not visible in the body");
var x = 10;
assert.areEqual(10, x, "Var declaration from eval uses its new value in the body declaration");
}
assert.doesNotThrow(function() { foo(); }, "Redefining a local var with an eval var does not throw");
// Function bodies defined in eval
function funcArrow(a = eval("() => 1"), b = a) { function a() { return 10; }; return [a(), b()]; }
assert.areEqual([10,1], funcArrow(), "Defining an arrow function body inside an eval works at default parameter scope");
function funcDecl(a = eval("(function foo() { return 1; })"), b = a()) { return [a(), b]; }
assert.areEqual([1, 1], funcDecl(), "Defining a function inside an eval works at default parameter scope");
function funcDecl(a = eval("function foo() { return 1; }; foo"), b = a()) { return [a(), b]; }
assert.areEqual([1, 1], funcDecl(), "Defining a function inside an eval works at default parameter scope");
function genFuncDecl(a = eval("(function *foo() { yield 1; return 2; })"), b = a(), c = b.next()) { return [c, b.next()]; }
assert.areEqual([{value : 1, done : false}, {value : 2, done : true}], genFuncDecl(), "Declaring a generator function inside an eval works at default parameter scope");
function funcExpr(a = eval("f = function foo() { return 1; }"), b = f()) { return [a(), b, f()]; }
assert.areEqual([1, 1, 1], funcExpr(), "Declaring a function inside an eval works at default parameter scope");
assert.throws(function () { eval("function foo(a = eval('b'), b) {}; foo();"); }, ReferenceError, "Future default references using eval are not allowed", "Use before declaration");
}
},
];
testRunner.runTests(tests, { verbose: WScript.Arguments[0] != "summary" });
|
2486_2
|
crossvul
|
js
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
#if defined(HAVE_CONFIG_H)
#include "config.h"
#endif
#include <memory>
#include "rutil/Logger.hxx"
#include "resip/stack/ConnectionBase.hxx"
#include "resip/stack/WsConnectionBase.hxx"
#include "resip/stack/SipMessage.hxx"
#include "resip/stack/WsDecorator.hxx"
#include "resip/stack/Cookie.hxx"
#include "resip/stack/WsBaseTransport.hxx"
#include "resip/stack/WsCookieContext.hxx"
#include "resip/stack/WsCookieContextFactory.hxx"
#include "resip/stack/Symbols.hxx"
#include "rutil/WinLeakCheck.hxx"
#include "rutil/SharedPtr.hxx"
#include "rutil/Sha1.hxx"
#ifdef USE_SSL
#include "resip/stack/ssl/Security.hxx"
#include "resip/stack/ssl/TlsConnection.hxx"
#include "rutil/ssl/SHA1Stream.hxx"
#endif
#include "rutil/MD5Stream.hxx"
#ifdef USE_SIGCOMP
#include <osc/Stack.h>
#include <osc/TcpStream.h>
#include <osc/SigcompMessage.h>
#include <osc/StateChanges.h>
#endif
using namespace resip;
#define RESIPROCATE_SUBSYSTEM Subsystem::TRANSPORT
char
ConnectionBase::connectionStates[ConnectionBase::MAX][32] = { "NewMessage", "ReadingHeaders", "PartialBody" };
#ifndef RESIP_SIP_MSG_MAX_BYTES
#define RESIP_SIP_MSG_MAX_BYTES 10485760
#endif
size_t
ConnectionBase::messageSizeMax = RESIP_SIP_MSG_MAX_BYTES;
ConnectionBase::ConnectionBase(Transport* transport, const Tuple& who, Compression &compression)
: mSendPos(0),
mTransport(transport),
mWho(who),
mFailureReason(TransportFailure::None),
mFailureSubCode(0),
mCompression(compression),
// NO: #ifdef USE_SIGCOMP // class def doesn't decl members conditionally
mSigcompStack(0),
mSigcompFramer(0),
// NO: #endif
mSendingTransmissionFormat(Unknown),
mReceivingTransmissionFormat(Unknown),
mMessage(0),
mBuffer(0),
mBufferPos(0),
mBufferSize(0),
mWsFrameExtractor(messageSizeMax),
mLastUsed(Timer::getTimeMs()),
mConnState(NewMessage)
{
DebugLog (<< "ConnectionBase::ConnectionBase, who: " << mWho << " " << this);
#ifdef USE_SIGCOMP
if (mCompression.isEnabled())
{
DebugLog (<< "Compression enabled for connection: " << this);
mSigcompStack = new osc::Stack(mCompression.getStateHandler());
mCompression.addCompressorsToStack(mSigcompStack);
}
else
{
DebugLog (<< "Compression disabled for connection: " << this);
}
#else
DebugLog (<< "No compression library available: " << this);
#endif
if(mTransport)
{
mWho.mTransportKey = mTransport->getKey();
}
}
ConnectionBase::~ConnectionBase()
{
if(mTransport)
{
mTransport->flowTerminated(mWho);
}
while (!mOutstandingSends.empty())
{
SendData* sendData = mOutstandingSends.front();
mTransport->fail(sendData->transactionId,
mFailureReason ? mFailureReason : TransportFailure::ConnectionUnknown,
mFailureSubCode);
delete sendData;
mOutstandingSends.pop_front();
}
delete [] mBuffer;
delete mMessage;
#ifdef USE_SIGCOMP
delete mSigcompStack;
#endif
DebugLog (<< "ConnectionBase::~ConnectionBase " << this);
}
void
ConnectionBase::setFailureReason(TransportFailure::FailureReason failReason, int subCode)
{
if ( failReason > mFailureReason )
{
mFailureReason = failReason;
mFailureSubCode = subCode;
}
}
FlowKey
ConnectionBase::getFlowKey() const
{
return mWho.mFlowKey;
}
bool
ConnectionBase::preparseNewBytes(int bytesRead)
{
DebugLog(<< "In State: " << connectionStates[mConnState]);
start: // If there is an overhang come back here, effectively recursing
switch(mConnState)
{
case NewMessage:
{
if (strncmp(mBuffer + mBufferPos, Symbols::CRLFCRLF, 4) == 0)
{
DebugLog(<< "Got incoming double-CRLF keepalive (aka ping).");
mBufferPos += 4;
bytesRead -= 4;
onDoubleCRLF();
if (bytesRead)
{
goto start;
}
else
{
delete [] mBuffer;
mBuffer = 0;
return true;
}
}
else if (strncmp(mBuffer + mBufferPos, Symbols::CRLF, 2) == 0)
{
//DebugLog(<< "Got incoming CRLF keepalive response (aka pong).");
mBufferPos += 2;
bytesRead -= 2;
onSingleCRLF();
if (bytesRead)
{
goto start;
}
else
{
delete [] mBuffer;
mBuffer = 0;
return true;
}
}
resip_assert(mTransport);
mMessage = new SipMessage(&mTransport->getTuple());
DebugLog(<< "ConnectionBase::process setting source " << mWho);
mMessage->setSource(mWho);
mMessage->setTlsDomain(mTransport->tlsDomain());
#ifdef USE_SSL
// Set TlsPeerName if message is from TlsConnection
TlsConnection *tlsConnection = dynamic_cast<TlsConnection *>(this);
if(tlsConnection)
{
std::list<Data> peerNameList;
tlsConnection->getPeerNames(peerNameList);
mMessage->setTlsPeerNames(peerNameList);
}
#endif
mMsgHeaderScanner.prepareForMessage(mMessage);
// Fall through to the next case.
}
case ReadingHeaders:
{
unsigned int chunkLength = (unsigned int)mBufferPos + bytesRead;
char *unprocessedCharPtr;
MsgHeaderScanner::ScanChunkResult scanChunkResult =
mMsgHeaderScanner.scanChunk(mBuffer,
chunkLength,
&unprocessedCharPtr);
if (scanChunkResult == MsgHeaderScanner::scrError)
{
//.jacob. Not a terribly informative warning.
WarningLog(<< "Discarding preparse!");
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
mConnState = NewMessage;
return false;
}
if (mMsgHeaderScanner.getHeaderCount() > 1024)
{
WarningLog(<< "Discarding preparse; too many headers");
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
mConnState = NewMessage;
return false;
}
unsigned int numUnprocessedChars =
(unsigned int)((mBuffer + chunkLength) - unprocessedCharPtr);
if(numUnprocessedChars > ConnectionBase::ChunkSize &&
scanChunkResult == MsgHeaderScanner::scrNextChunk)
{
WarningLog(<< "Discarding preparse; header-field-value (or "
"header name) too long");
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
mConnState = NewMessage;
return false;
}
if(numUnprocessedChars==chunkLength)
{
// .bwc. MsgHeaderScanner wasn't able to parse anything useful;
// don't bother mMessage yet, but make more room in mBuffer.
size_t size = numUnprocessedChars*3/2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = 0;
try
{
newBuffer=MsgHeaderScanner::allocateBuffer((int)size);
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer during preparse!");
return false;
}
memcpy(newBuffer, unprocessedCharPtr, numUnprocessedChars);
delete [] mBuffer;
mBuffer = newBuffer;
mBufferPos = numUnprocessedChars;
mBufferSize = size;
mConnState = ReadingHeaders;
return true;
}
mMessage->addBuffer(mBuffer);
mBuffer=0;
if (scanChunkResult == MsgHeaderScanner::scrNextChunk)
{
// Message header is incomplete...
if (numUnprocessedChars == 0)
{
// ...but the chunk is completely processed.
//.jacob. I've discarded the "assigned" concept.
//DebugLog(<< "Data assigned, not fragmented, not complete");
try
{
mBuffer = MsgHeaderScanner::allocateBuffer(ChunkSize);
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer during preparse!");
return false;
}
mBufferPos = 0;
mBufferSize = ChunkSize;
}
else
{
// ...but some of the chunk must be shifted into the next one.
size_t size = numUnprocessedChars*3/2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = 0;
try
{
newBuffer = MsgHeaderScanner::allocateBuffer((int)size);
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer during preparse!");
return false;
}
memcpy(newBuffer, unprocessedCharPtr, numUnprocessedChars);
mBuffer = newBuffer;
mBufferPos = numUnprocessedChars;
mBufferSize = size;
}
mConnState = ReadingHeaders;
}
else
{
size_t contentLength = 0;
try
{
// The message header is complete.
contentLength=mMessage->const_header(h_ContentLength).value();
}
catch(resip::BaseException& e) // Could be SipMessage::Exception or ParseException
{
WarningLog(<<"Malformed Content-Length in connection-based transport"
". Not much we can do to fix this. " << e);
// .bwc. Bad Content-Length. We are hosed.
delete mMessage;
mMessage = 0;
mBuffer = 0;
// .bwc. mMessage just took ownership of mBuffer, so we don't
// delete it here. We do zero it though, for completeness.
//.jacob. Shouldn't the state also be set here?
return false;
}
if(contentLength > messageSizeMax || contentLength < 0)
{
WarningLog(<<"Content-Length in connection-based "
"transport exceeds maximum " << messageSizeMax);
delete mMessage;
mMessage = 0;
mBuffer = 0;
// .bwc. mMessage just took ownership of mBuffer, so we don't
// delete it here. We do zero it though, for completeness.
//.jacob. Shouldn't the state also be set here?
return false;
}
if (numUnprocessedChars < contentLength)
{
// The message body is incomplete.
DebugLog(<< "partial body received");
size_t newSize=resipMin(resipMax((size_t)numUnprocessedChars*3/2,
(size_t)ConnectionBase::ChunkSize),
contentLength);
char* newBuffer = MsgHeaderScanner::allocateBuffer((int)newSize);
memcpy(newBuffer, unprocessedCharPtr, numUnprocessedChars);
mBufferPos = numUnprocessedChars;
mBufferSize = newSize;
mBuffer = newBuffer;
mConnState = PartialBody;
}
else
{
// Do this stuff BEFORE we kick the message out the door.
// Remember, deleting or passing mMessage on invalidates our
// buffer!
int overHang = numUnprocessedChars - (int)contentLength;
mConnState = NewMessage;
mBuffer = 0;
if (overHang > 0)
{
// The next message has been partially read.
size_t size = overHang*3/2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = MsgHeaderScanner::allocateBuffer((int)size);
memcpy(newBuffer,
unprocessedCharPtr + contentLength,
overHang);
mBuffer = newBuffer;
mBufferPos = 0;
mBufferSize = size;
DebugLog (<< "Extra bytes after message: " << overHang);
DebugLog (<< Data(mBuffer, overHang));
bytesRead = overHang;
}
// The message body is complete.
mMessage->setBody(unprocessedCharPtr, (UInt32)contentLength);
CongestionManager::RejectionBehavior b=mTransport->getRejectionBehaviorForIncoming();
if (b==CongestionManager::REJECTING_NON_ESSENTIAL
|| (b==CongestionManager::REJECTING_NEW_WORK
&& mMessage->isRequest()))
{
UInt32 expectedWait(mTransport->getExpectedWaitForIncoming());
// .bwc. If this fifo is REJECTING_NEW_WORK, we will drop
// requests but not responses ( ?bwc? is this right for ACK?).
// If we are REJECTING_NON_ESSENTIAL,
// we reject all incoming work, since losing something from the
// wire will not cause instability or leaks (see
// CongestionManager.hxx)
// .bwc. This handles all appropriate checking for whether
// this is a response or an ACK.
std::auto_ptr<SendData> tryLater(transport()->make503(*mMessage, expectedWait/1000));
if(tryLater.get())
{
transport()->send(tryLater);
}
delete mMessage; // dropping message due to congestion
mMessage = 0;
}
else if (!transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
else
{
Transport::stampReceived(mMessage);
DebugLog(<< "##Connection: " << *this << " received: " << *mMessage);
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
}
if (overHang > 0)
{
goto start;
}
}
}
break;
}
case PartialBody:
{
size_t contentLength = 0;
try
{
contentLength = mMessage->const_header(h_ContentLength).value();
}
catch(resip::BaseException& e) // Could be SipMessage::Exception or ParseException
{
WarningLog(<<"Malformed Content-Length in connection-based transport"
". Not much we can do to fix this. " << e);
// .bwc. Bad Content-Length. We are hosed.
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
//.jacob. Shouldn't the state also be set here?
return false;
}
mBufferPos += bytesRead;
if (mBufferPos == contentLength)
{
mMessage->addBuffer(mBuffer);
mMessage->setBody(mBuffer, (UInt32)contentLength);
mBuffer=0;
// .bwc. basicCheck takes up substantial CPU. Don't bother doing it
// if we're overloaded.
CongestionManager::RejectionBehavior b=mTransport->getRejectionBehaviorForIncoming();
if (b==CongestionManager::REJECTING_NON_ESSENTIAL
|| (b==CongestionManager::REJECTING_NEW_WORK
&& mMessage->isRequest()))
{
UInt32 expectedWait(mTransport->getExpectedWaitForIncoming());
// .bwc. If this fifo is REJECTING_NEW_WORK, we will drop
// requests but not responses ( ?bwc? is this right for ACK?).
// If we are REJECTING_NON_ESSENTIAL,
// we reject all incoming work, since losing something from the
// wire will not cause instability or leaks (see
// CongestionManager.hxx)
// .bwc. This handles all appropriate checking for whether
// this is a response or an ACK.
std::auto_ptr<SendData> tryLater = transport()->make503(*mMessage, expectedWait/1000);
if(tryLater.get())
{
transport()->send(tryLater);
}
delete mMessage; // dropping message due to congestion
mMessage = 0;
}
else if (!transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
else
{
DebugLog(<< "##ConnectionBase: " << *this << " received: " << *mMessage);
Transport::stampReceived(mMessage);
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
}
mConnState = NewMessage;
}
else if (mBufferPos == mBufferSize)
{
// .bwc. We've filled our buffer; go ahead and make more room.
size_t newSize = resipMin(mBufferSize*3/2, contentLength);
char* newBuffer = 0;
try
{
newBuffer=new char[newSize];
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer while receiving body!");
return false;
}
memcpy(newBuffer, mBuffer, mBufferSize);
mBufferSize=newSize;
delete [] mBuffer;
mBuffer = newBuffer;
}
break;
}
default:
resip_assert(0);
}
return true;
}
void
ConnectionBase::wsParseCookies(CookieList& cookieList, const SipMessage* message)
{
Data name;
Data value;
StringCategories::const_iterator it = message->header(h_Cookies).begin();
for (; it != message->header(h_Cookies).end(); ++it)
{
ParseBuffer pb((*it).value());
while(!pb.eof())
{
const char* anchor = pb.skipWhitespace();
pb.skipToChar(Symbols::EQUALS[0]);
pb.data(name, anchor);
anchor = pb.skipChar(Symbols::EQUALS[0]);
if(*(pb.position()) == Symbols::DOUBLE_QUOTE[0])
{
anchor = pb.skipChar(Symbols::DOUBLE_QUOTE[0]);
pb.skipToChar(Symbols::DOUBLE_QUOTE[0]);
pb.data(value, anchor);
pb.skipChar(Symbols::DOUBLE_QUOTE[0]);
}
else
{
pb.skipToOneOf(Symbols::SEMI_COLON, ParseBuffer::Whitespace);
pb.data(value, anchor);
}
Cookie cookie(name, value);
cookieList.push_back(cookie);
DebugLog(<< "Cookie: " << cookie);
if(!pb.eof() && *(pb.position()) == Symbols::SEMI_COLON[0])
{
pb.skipChar(Symbols::SEMI_COLON[0]);
}
pb.skipWhitespace();
}
}
}
/*
* Returns true if handshake complete, false if more bytes needed
* Sets dropConnection = true if an error occurs
*/
bool
ConnectionBase::wsProcessHandshake(int bytesRead, bool &dropConnection)
{
mConnState = WebSocket;
dropConnection = false;
if(mBufferPos + bytesRead > messageSizeMax)
{
WarningLog(<<"Too many bytes received during WS handshake, dropping connection. Max message size = " << messageSizeMax);
dropConnection = true;
return false;
}
resip_assert(mTransport);
mMessage = new SipMessage(&mTransport->getTuple());
resip_assert(mMessage);
mMessage->setSource(mWho);
mMessage->setTlsDomain(mTransport->tlsDomain());
if (!scanMsgHeader(bytesRead))
{
return false;
}
try
{
WsConnectionBase* wsConnectionBase = dynamic_cast<WsConnectionBase*>(this);
CookieList cookieList;
if(wsConnectionBase)
{
SharedPtr<WsCookieContext> wsCookieContext((WsCookieContext*)0);
if (mMessage->exists(h_Cookies))
{
WsBaseTransport* wst = dynamic_cast<WsBaseTransport*>(mTransport);
resip_assert(wst);
try
{
wsParseCookies(cookieList, mMessage);
wsConnectionBase->setCookies(cookieList);
// Use of resip WsCookieContext capabilities is not mandatory,
// only try to use it if cookieContextFactory is available
if(wst->cookieContextFactory().get())
{
Uri& requestUri = mMessage->header(h_RequestLine).uri();
wsCookieContext = wst->cookieContextFactory()->makeCookieContext(cookieList, requestUri);
wsConnectionBase->setWsCookieContext(wsCookieContext);
}
}
catch(ParseException& ex)
{
WarningLog(<<"Failed to parse cookies into WsCookieContext: " << ex);
}
}
SharedPtr<WsConnectionValidator> wsConnectionValidator = wsConnectionBase->connectionValidator();
if(wsConnectionValidator &&
(!wsCookieContext.get() || !wsConnectionValidator->validateConnection(*wsCookieContext)))
{
ErrLog(<<"WebSocket cookie validation failed, dropping connection");
// FIXME: should send back a HTTP error code:
// 400 if the cookie was not in the right syntax
// 403 if the cookie was well formed but rejected
// due to expiry or a bad HMAC
delete mMessage;
mMessage = 0;
mBufferPos = 0;
dropConnection = true;
return false;
}
}
std::auto_ptr<Data> wsResponsePtr = makeWsHandshakeResponse();
if (wsResponsePtr.get())
{
DebugLog (<< "WebSocket upgrade accepted, cookie count = " << cookieList.size());
mOutstandingSends.push_back(new SendData(
who(),
*wsResponsePtr.get(),
Data::Empty,
Data::Empty,
true));
}
else
{
ErrLog(<<"Failed to parse WebSocket initialization request");
delete mMessage;
mMessage = 0;
mBufferPos = 0;
dropConnection = true;
return false;
}
}
catch(resip::ParseException& e)
{
ErrLog(<<"Cannot auth request is missing " << e);
delete mMessage;
mMessage = 0;
mBufferPos = 0;
dropConnection = true;
return false;
}
delete mMessage;
mMessage=0;
mBufferPos = 0;
return true;
}
bool
ConnectionBase::scanMsgHeader(int bytesRead)
{
mMsgHeaderScanner.prepareForMessage(mMessage);
char *unprocessedCharPtr;
MsgHeaderScanner::ScanChunkResult scanResult = mMsgHeaderScanner.scanChunk(mBuffer, mBufferPos + bytesRead, &unprocessedCharPtr);
if (scanResult != MsgHeaderScanner::scrEnd)
{
if(scanResult != MsgHeaderScanner::scrNextChunk)
{
StackLog(<<"Failed to parse message, more bytes needed");
StackLog(<< Data(mBuffer, bytesRead));
}
delete mMessage;
mMessage=0;
mBufferPos += bytesRead;
return false;
}
return true;
}
std::auto_ptr<Data>
ConnectionBase::makeWsHandshakeResponse()
{
std::auto_ptr<Data> responsePtr(0);
if(isUsingSecWebSocketKey())
{
responsePtr.reset(new Data("HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Protocol: sip\r\n"));
// Assuming that OpenSSL implementation of SHA1 is more effient than our internal one
#ifdef USE_SSL
SHA1Stream wsSha1Stream;
wsSha1Stream << (mMessage->const_header(h_SecWebSocketKey).value() + Symbols::WebsocketMagicGUID);
Data wsAcceptKey = wsSha1Stream.getBin(160).base64encode();
#else
SHA1 sha1;
sha1.update(mMessage->const_header(h_SecWebSocketKey).value().c_str());
sha1.update(Symbols::WebsocketMagicGUID);
Data wsAcceptKey = sha1.finalBin().base64encode();
#endif
*responsePtr += "Sec-WebSocket-Accept: " + wsAcceptKey + "\r\n\r\n";
}
else if(isUsingDeprecatedSecWebSocketKeys())
{
ErrLog(<<"WS client wants to use depracated protocol version, unsupported");
}
else
{
ErrLog(<<"No SecWebSocketKey header");
}
return responsePtr;
}
bool ConnectionBase::isUsingDeprecatedSecWebSocketKeys()
{
resip_assert(mMessage);
return mMessage->exists(h_SecWebSocketKey1) &&
mMessage->exists(h_SecWebSocketKey2);
}
bool ConnectionBase::isUsingSecWebSocketKey()
{
resip_assert(mMessage);
return mMessage->exists(h_SecWebSocketKey);
}
bool
ConnectionBase::wsProcessData(int bytesRead)
{
bool dropConnection = false;
// Always consumes the whole buffer:
std::auto_ptr<Data> msg = mWsFrameExtractor.processBytes((UInt8*)mBuffer, bytesRead, dropConnection);
while(msg.get())
{
// mWsBuffer should now contain a discrete SIP message, let the
// stack go to work on it
if(msg->size() == 4 && memcmp(msg->data(), "\r\n\r\n", 4) == 0)
{
// sending a keep alive reply now
StackLog(<<"got a SIP ping embedded in WebSocket frame, replying");
onDoubleCRLF();
msg = mWsFrameExtractor.processBytes(0, 0, dropConnection);
continue;
}
resip_assert(mTransport);
mMessage = new SipMessage(&mTransport->getTuple());
mMessage->setSource(mWho);
mMessage->setTlsDomain(mTransport->tlsDomain());
#ifdef USE_SSL
// Set TlsPeerName if message is from TlsConnection
TlsConnection *tlsConnection = dynamic_cast<TlsConnection *>(this);
if(tlsConnection)
{
std::list<Data> peerNameList;
tlsConnection->getPeerNames(peerNameList);
mMessage->setTlsPeerNames(peerNameList);
}
#endif
WsConnectionBase *wsConnectionBase = dynamic_cast<WsConnectionBase *>(this);
if (wsConnectionBase)
{
mMessage->setWsCookies(wsConnectionBase->getCookies());
mMessage->setWsCookieContext(wsConnectionBase->getWsCookieContext());
}
Data::size_type msg_len = msg->size();
// cast permitted, as it is borrowed:
char *sipBuffer = (char *)msg->data();
mMessage->addBuffer(sipBuffer);
mMsgHeaderScanner.prepareForMessage(mMessage);
char *unprocessedCharPtr;
if (mMsgHeaderScanner.scanChunk(sipBuffer,
msg_len,
&unprocessedCharPtr) !=
MsgHeaderScanner::scrEnd)
{
StackLog(<<"Scanner rejecting WebSocket SIP message as unparsable, length = " << msg_len);
StackLog(<< Data(sipBuffer, msg_len));
delete mMessage;
mMessage=0;
}
unsigned int used = unprocessedCharPtr - sipBuffer;
if (mMessage && (used < msg_len))
{
mMessage->setBody(sipBuffer+used, msg_len-used);
}
if (mMessage && !transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
if (mMessage)
{
Transport::stampReceived(mMessage);
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
}
else
{
// Something wrong...
ErrLog(<< "We don't have a valid SIP message, maybe drop the connection?");
}
msg = mWsFrameExtractor.processBytes(0, 0, dropConnection);
}
if(dropConnection)
{
return false;
}
return true;
}
#ifdef USE_SIGCOMP
void
ConnectionBase::decompressNewBytes(int bytesRead)
{
mConnState = SigComp;
if (!mSigcompFramer)
{
mSigcompFramer = new osc::TcpStream();
}
mSigcompFramer->addData(mBuffer, bytesRead);
size_t bytesUncompressed;
osc::StateChanges *sc = 0;
char *uncompressed = new char[65536];
while ((bytesUncompressed = mSigcompStack->uncompressMessage(
*mSigcompFramer, uncompressed, 65536, sc)) > 0)
{
DebugLog (<< "Uncompressed Connection-oriented message");
mMessage = new SipMessage(mWho.transport);
mMessage->setSource(mWho);
mMessage->setTlsDomain(mWho.transport->tlsDomain());
#ifdef USE_SSL
// Set TlsPeerName if message is from TlsConnection
TlsConnection *tlsConnection = dynamic_cast<TlsConnection *>(this);
if(tlsConnection)
{
std::list<Data> peerNameList;
tlsConnection->getPeerNames(peerNameList);
mMessage->setTlsPeerNames(peerNameList);
}
#endif
char *sipBuffer = new char[bytesUncompressed];
memmove(sipBuffer, uncompressed, bytesUncompressed);
mMessage->addBuffer(sipBuffer);
mMsgHeaderScanner.prepareForMessage(mMessage);
char *unprocessedCharPtr;
if (mMsgHeaderScanner.scanChunk(sipBuffer,
bytesUncompressed,
&unprocessedCharPtr) !=
MsgHeaderScanner::scrEnd)
{
StackLog(<<"Scanner rejecting compressed message as unparsable");
StackLog(<< Data(sipBuffer, bytesUncompressed));
delete mMessage;
mMessage=0;
}
unsigned int used = unprocessedCharPtr - sipBuffer;
if (mMessage && (used < bytesUncompressed))
{
mMessage->setBody(sipBuffer+used, bytesUncompressed-used);
}
if (mMessage && !transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
if (mMessage)
{
Transport::stampReceived(mMessage);
// If the message made it this far, we should let it store
// SigComp state: extract the compartment ID.
const Via &via = mMessage->const_header(h_Vias).front();
if (mMessage->isRequest())
{
// For requests, the compartment ID is read out of the
// top via header field; if not present, we use the
// TCP connection for identification purposes.
if (via.exists(p_sigcompId))
{
Data compId = via.param(p_sigcompId);
if(!compId.empty())
{
mSigcompStack->provideCompartmentId(sc, compId.data(), compId.size());
}
}
else
{
mSigcompStack->provideCompartmentId(sc, this, sizeof(this));
}
}
else
{
// For responses, the compartment ID is supposed to be
// the same as the compartment ID of the request. We
// *could* dig down into the transaction layer to try to
// figure this out, but that's a royal pain, and a rather
// severe layer violation. In practice, we're going to ferret
// the ID out of the the Via header field, which is where we
// squirreled it away when we sent this request in the first place.
Data compId = via.param(p_branch).getSigcompCompartment();
if(!compId.empty())
{
mSigcompStack->provideCompartmentId(sc, compId.data(), compId.size());
}
}
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
sc = 0;
}
else
{
delete sc;
sc = 0;
}
}
delete [] uncompressed;
// If there was a decompression failure, let the other side know.
osc::SigcompMessage *nack = mSigcompStack->getNack();
if (nack)
{
if (mSendingTransmissionFormat == Compressed)
{
// !bwc! We are not telling anyone that we're interested in having our
// FD put in the writable set...
mOutstandingSends.push_back(new SendData(
who(),
Data(nack->getStreamMessage(), nack->getStreamLength()),
Data::Empty,
Data::Empty,
true));
}
else
{
delete nack;
}
}
}
#endif
std::pair<char*, size_t>
ConnectionBase::getWriteBuffer()
{
if (mConnState == NewMessage)
{
if (!mBuffer)
{
DebugLog (<< "Creating buffer for " << *this);
mBuffer = MsgHeaderScanner::allocateBuffer(ConnectionBase::ChunkSize);
mBufferSize = ConnectionBase::ChunkSize;
}
mBufferPos = 0;
}
return getCurrentWriteBuffer();
}
std::pair<char*, size_t>
ConnectionBase::getCurrentWriteBuffer()
{
return std::make_pair(mBuffer + mBufferPos, mBufferSize - mBufferPos);
}
char*
ConnectionBase::getWriteBufferForExtraBytes(int currentPos, int extraBytes)
{
if (currentPos > 0 && extraBytes > 0)
{
if ((currentPos + extraBytes) > mBufferSize)
{
mBufferSize = currentPos + extraBytes;
char* buffer = MsgHeaderScanner::allocateBuffer((int)mBufferSize);
memcpy(buffer, mBuffer, currentPos);
delete[] mBuffer;
mBuffer = buffer;
}
return &mBuffer[currentPos];
}
else
{
resip_assert(0);
return mBuffer;
}
}
void
ConnectionBase::setBuffer(char* bytes, int count)
{
mBuffer = bytes;
mBufferPos = 0;
mBufferSize = count;
}
Transport*
ConnectionBase::transport() const
{
resip_assert(this);
return mTransport;
}
EncodeStream&
resip::operator<<(EncodeStream& strm,
const resip::ConnectionBase& c)
{
strm << "CONN_BASE: " << &c << " " << c.mWho;
return strm;
}
/* ====================================================================
* The Vovida Software License, Version 1.0
*
* Copyright (c) 2000
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The names "VOCAL", "Vovida Open Communication Application Library",
* and "Vovida Open Communication Application Library (VOCAL)" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact vocal@vovida.org.
*
* 4. Products derived from this software may not be called "VOCAL", nor
* may "VOCAL" appear in their name, without prior written
* permission of Vovida Networks, Inc.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
* NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL VOVIDA
* NETWORKS, INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT DAMAGES
* IN EXCESS OF $1,000, NOR FOR ANY INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* ====================================================================
*
* This software consists of voluntary contributions made by Vovida
* Networks, Inc. and many individuals on behalf of Vovida Networks,
* Inc. For more information on Vovida Networks, Inc., please see
* <http://www.vovida.org/>.
*
* vi: set shiftwidth=3 expandtab:
*/
|
#if defined(HAVE_CONFIG_H)
#include "config.h"
#endif
#include <memory>
#include "rutil/Logger.hxx"
#include "resip/stack/ConnectionBase.hxx"
#include "resip/stack/WsConnectionBase.hxx"
#include "resip/stack/SipMessage.hxx"
#include "resip/stack/WsDecorator.hxx"
#include "resip/stack/Cookie.hxx"
#include "resip/stack/WsBaseTransport.hxx"
#include "resip/stack/WsCookieContext.hxx"
#include "resip/stack/WsCookieContextFactory.hxx"
#include "resip/stack/Symbols.hxx"
#include "rutil/WinLeakCheck.hxx"
#include "rutil/SharedPtr.hxx"
#include "rutil/Sha1.hxx"
#ifdef USE_SSL
#include "resip/stack/ssl/Security.hxx"
#include "resip/stack/ssl/TlsConnection.hxx"
#include "rutil/ssl/SHA1Stream.hxx"
#endif
#include "rutil/MD5Stream.hxx"
#ifdef USE_SIGCOMP
#include <osc/Stack.h>
#include <osc/TcpStream.h>
#include <osc/SigcompMessage.h>
#include <osc/StateChanges.h>
#endif
using namespace resip;
#define RESIPROCATE_SUBSYSTEM Subsystem::TRANSPORT
char
ConnectionBase::connectionStates[ConnectionBase::MAX][32] = { "NewMessage", "ReadingHeaders", "PartialBody" };
#ifndef RESIP_SIP_MSG_MAX_BYTES
#define RESIP_SIP_MSG_MAX_BYTES 10485760
#endif
size_t
ConnectionBase::messageSizeMax = RESIP_SIP_MSG_MAX_BYTES;
ConnectionBase::ConnectionBase(Transport* transport, const Tuple& who, Compression &compression)
: mSendPos(0),
mTransport(transport),
mWho(who),
mFailureReason(TransportFailure::None),
mFailureSubCode(0),
mCompression(compression),
// NO: #ifdef USE_SIGCOMP // class def doesn't decl members conditionally
mSigcompStack(0),
mSigcompFramer(0),
// NO: #endif
mSendingTransmissionFormat(Unknown),
mReceivingTransmissionFormat(Unknown),
mMessage(0),
mBuffer(0),
mBufferPos(0),
mBufferSize(0),
mWsFrameExtractor(messageSizeMax),
mLastUsed(Timer::getTimeMs()),
mConnState(NewMessage)
{
DebugLog (<< "ConnectionBase::ConnectionBase, who: " << mWho << " " << this);
#ifdef USE_SIGCOMP
if (mCompression.isEnabled())
{
DebugLog (<< "Compression enabled for connection: " << this);
mSigcompStack = new osc::Stack(mCompression.getStateHandler());
mCompression.addCompressorsToStack(mSigcompStack);
}
else
{
DebugLog (<< "Compression disabled for connection: " << this);
}
#else
DebugLog (<< "No compression library available: " << this);
#endif
if(mTransport)
{
mWho.mTransportKey = mTransport->getKey();
}
}
ConnectionBase::~ConnectionBase()
{
if(mTransport)
{
mTransport->flowTerminated(mWho);
}
while (!mOutstandingSends.empty())
{
SendData* sendData = mOutstandingSends.front();
mTransport->fail(sendData->transactionId,
mFailureReason ? mFailureReason : TransportFailure::ConnectionUnknown,
mFailureSubCode);
delete sendData;
mOutstandingSends.pop_front();
}
delete [] mBuffer;
delete mMessage;
#ifdef USE_SIGCOMP
delete mSigcompStack;
#endif
DebugLog (<< "ConnectionBase::~ConnectionBase " << this);
}
void
ConnectionBase::setFailureReason(TransportFailure::FailureReason failReason, int subCode)
{
if ( failReason > mFailureReason )
{
mFailureReason = failReason;
mFailureSubCode = subCode;
}
}
FlowKey
ConnectionBase::getFlowKey() const
{
return mWho.mFlowKey;
}
bool
ConnectionBase::preparseNewBytes(int bytesRead)
{
DebugLog(<< "In State: " << connectionStates[mConnState]);
start: // If there is an overhang come back here, effectively recursing
switch(mConnState)
{
case NewMessage:
{
if (strncmp(mBuffer + mBufferPos, Symbols::CRLFCRLF, 4) == 0)
{
DebugLog(<< "Got incoming double-CRLF keepalive (aka ping).");
mBufferPos += 4;
bytesRead -= 4;
onDoubleCRLF();
if (bytesRead)
{
goto start;
}
else
{
delete [] mBuffer;
mBuffer = 0;
return true;
}
}
else if (strncmp(mBuffer + mBufferPos, Symbols::CRLF, 2) == 0)
{
//DebugLog(<< "Got incoming CRLF keepalive response (aka pong).");
mBufferPos += 2;
bytesRead -= 2;
onSingleCRLF();
if (bytesRead)
{
goto start;
}
else
{
delete [] mBuffer;
mBuffer = 0;
return true;
}
}
resip_assert(mTransport);
mMessage = new SipMessage(&mTransport->getTuple());
DebugLog(<< "ConnectionBase::process setting source " << mWho);
mMessage->setSource(mWho);
mMessage->setTlsDomain(mTransport->tlsDomain());
#ifdef USE_SSL
// Set TlsPeerName if message is from TlsConnection
TlsConnection *tlsConnection = dynamic_cast<TlsConnection *>(this);
if(tlsConnection)
{
std::list<Data> peerNameList;
tlsConnection->getPeerNames(peerNameList);
mMessage->setTlsPeerNames(peerNameList);
}
#endif
mMsgHeaderScanner.prepareForMessage(mMessage);
// Fall through to the next case.
}
case ReadingHeaders:
{
unsigned int chunkLength = (unsigned int)mBufferPos + bytesRead;
char *unprocessedCharPtr;
MsgHeaderScanner::ScanChunkResult scanChunkResult =
mMsgHeaderScanner.scanChunk(mBuffer,
chunkLength,
&unprocessedCharPtr);
if (scanChunkResult == MsgHeaderScanner::scrError)
{
//.jacob. Not a terribly informative warning.
WarningLog(<< "Discarding preparse!");
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
mConnState = NewMessage;
return false;
}
if (mMsgHeaderScanner.getHeaderCount() > 1024)
{
WarningLog(<< "Discarding preparse; too many headers");
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
mConnState = NewMessage;
return false;
}
unsigned int numUnprocessedChars =
(unsigned int)((mBuffer + chunkLength) - unprocessedCharPtr);
if(numUnprocessedChars > ConnectionBase::ChunkSize &&
scanChunkResult == MsgHeaderScanner::scrNextChunk)
{
WarningLog(<< "Discarding preparse; header-field-value (or "
"header name) too long");
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
mConnState = NewMessage;
return false;
}
if(numUnprocessedChars==chunkLength)
{
// .bwc. MsgHeaderScanner wasn't able to parse anything useful;
// don't bother mMessage yet, but make more room in mBuffer.
size_t size = numUnprocessedChars*3/2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = 0;
try
{
newBuffer=MsgHeaderScanner::allocateBuffer((int)size);
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer during preparse!");
return false;
}
memcpy(newBuffer, unprocessedCharPtr, numUnprocessedChars);
delete [] mBuffer;
mBuffer = newBuffer;
mBufferPos = numUnprocessedChars;
mBufferSize = size;
mConnState = ReadingHeaders;
return true;
}
mMessage->addBuffer(mBuffer);
mBuffer=0;
if (scanChunkResult == MsgHeaderScanner::scrNextChunk)
{
// Message header is incomplete...
if (numUnprocessedChars == 0)
{
// ...but the chunk is completely processed.
//.jacob. I've discarded the "assigned" concept.
//DebugLog(<< "Data assigned, not fragmented, not complete");
try
{
mBuffer = MsgHeaderScanner::allocateBuffer(ChunkSize);
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer during preparse!");
return false;
}
mBufferPos = 0;
mBufferSize = ChunkSize;
}
else
{
// ...but some of the chunk must be shifted into the next one.
size_t size = numUnprocessedChars*3/2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = 0;
try
{
newBuffer = MsgHeaderScanner::allocateBuffer((int)size);
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer during preparse!");
return false;
}
memcpy(newBuffer, unprocessedCharPtr, numUnprocessedChars);
mBuffer = newBuffer;
mBufferPos = numUnprocessedChars;
mBufferSize = size;
}
mConnState = ReadingHeaders;
}
else
{
size_t contentLength = 0;
try
{
// The message header is complete.
contentLength=mMessage->const_header(h_ContentLength).value();
}
catch(resip::BaseException& e) // Could be SipMessage::Exception or ParseException
{
WarningLog(<<"Malformed Content-Length in connection-based transport"
". Not much we can do to fix this. " << e);
// .bwc. Bad Content-Length. We are hosed.
delete mMessage;
mMessage = 0;
mBuffer = 0;
// .bwc. mMessage just took ownership of mBuffer, so we don't
// delete it here. We do zero it though, for completeness.
//.jacob. Shouldn't the state also be set here?
return false;
}
if(contentLength > messageSizeMax || contentLength < 0)
{
WarningLog(<<"Content-Length in connection-based "
"transport exceeds maximum " << messageSizeMax);
delete mMessage;
mMessage = 0;
mBuffer = 0;
// .bwc. mMessage just took ownership of mBuffer, so we don't
// delete it here. We do zero it though, for completeness.
//.jacob. Shouldn't the state also be set here?
return false;
}
if (numUnprocessedChars < contentLength)
{
// The message body is incomplete.
DebugLog(<< "partial body received");
size_t newSize=resipMin(resipMax((size_t)numUnprocessedChars*3/2,
(size_t)ConnectionBase::ChunkSize),
contentLength);
char* newBuffer = MsgHeaderScanner::allocateBuffer((int)newSize);
memcpy(newBuffer, unprocessedCharPtr, numUnprocessedChars);
mBufferPos = numUnprocessedChars;
mBufferSize = newSize;
mBuffer = newBuffer;
mConnState = PartialBody;
}
else
{
// Do this stuff BEFORE we kick the message out the door.
// Remember, deleting or passing mMessage on invalidates our
// buffer!
int overHang = numUnprocessedChars - (int)contentLength;
mConnState = NewMessage;
mBuffer = 0;
if (overHang > 0)
{
// The next message has been partially read.
size_t size = overHang*3/2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = MsgHeaderScanner::allocateBuffer((int)size);
memcpy(newBuffer,
unprocessedCharPtr + contentLength,
overHang);
mBuffer = newBuffer;
mBufferPos = 0;
mBufferSize = size;
DebugLog (<< "Extra bytes after message: " << overHang);
//DebugLog (<< Data(mBuffer, overHang));
bytesRead = overHang;
}
// The message body is complete.
mMessage->setBody(unprocessedCharPtr, (UInt32)contentLength);
CongestionManager::RejectionBehavior b=mTransport->getRejectionBehaviorForIncoming();
if (b==CongestionManager::REJECTING_NON_ESSENTIAL
|| (b==CongestionManager::REJECTING_NEW_WORK
&& mMessage->isRequest()))
{
UInt32 expectedWait(mTransport->getExpectedWaitForIncoming());
// .bwc. If this fifo is REJECTING_NEW_WORK, we will drop
// requests but not responses ( ?bwc? is this right for ACK?).
// If we are REJECTING_NON_ESSENTIAL,
// we reject all incoming work, since losing something from the
// wire will not cause instability or leaks (see
// CongestionManager.hxx)
// .bwc. This handles all appropriate checking for whether
// this is a response or an ACK.
std::auto_ptr<SendData> tryLater(transport()->make503(*mMessage, expectedWait/1000));
if(tryLater.get())
{
transport()->send(tryLater);
}
delete mMessage; // dropping message due to congestion
mMessage = 0;
}
else if (!transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
else
{
Transport::stampReceived(mMessage);
DebugLog(<< "##Connection: " << *this << " received: " << *mMessage);
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
}
if (overHang > 0)
{
goto start;
}
}
}
break;
}
case PartialBody:
{
size_t contentLength = 0;
try
{
contentLength = mMessage->const_header(h_ContentLength).value();
}
catch(resip::BaseException& e) // Could be SipMessage::Exception or ParseException
{
WarningLog(<<"Malformed Content-Length in connection-based transport"
". Not much we can do to fix this. " << e);
// .bwc. Bad Content-Length. We are hosed.
delete [] mBuffer;
mBuffer = 0;
delete mMessage;
mMessage = 0;
//.jacob. Shouldn't the state also be set here?
return false;
}
mBufferPos += bytesRead;
if (mBufferPos >= contentLength)
{
int overHang = mBufferPos - (int)contentLength;
char *overHangStart = mBuffer + contentLength;
mMessage->addBuffer(mBuffer);
mMessage->setBody(mBuffer, (UInt32)contentLength);
mConnState = NewMessage;
mBuffer = 0;
if (overHang > 0)
{
// The next message has been partially read.
size_t size = overHang * 3 / 2;
if (size < ConnectionBase::ChunkSize)
{
size = ConnectionBase::ChunkSize;
}
char* newBuffer = MsgHeaderScanner::allocateBuffer((int)size);
memcpy(newBuffer, overHangStart, overHang);
mBuffer = newBuffer;
mBufferPos = 0;
mBufferSize = size;
DebugLog(<< "Extra bytes after message: " << overHang);
//DebugLog(<< Data(mBuffer, overHang));
bytesRead = overHang;
}
// .bwc. basicCheck takes up substantial CPU. Don't bother doing it
// if we're overloaded.
CongestionManager::RejectionBehavior b=mTransport->getRejectionBehaviorForIncoming();
if (b==CongestionManager::REJECTING_NON_ESSENTIAL
|| (b==CongestionManager::REJECTING_NEW_WORK
&& mMessage->isRequest()))
{
UInt32 expectedWait(mTransport->getExpectedWaitForIncoming());
// .bwc. If this fifo is REJECTING_NEW_WORK, we will drop
// requests but not responses ( ?bwc? is this right for ACK?).
// If we are REJECTING_NON_ESSENTIAL,
// we reject all incoming work, since losing something from the
// wire will not cause instability or leaks (see
// CongestionManager.hxx)
// .bwc. This handles all appropriate checking for whether
// this is a response or an ACK.
std::auto_ptr<SendData> tryLater = transport()->make503(*mMessage, expectedWait/1000);
if(tryLater.get())
{
transport()->send(tryLater);
}
delete mMessage; // dropping message due to congestion
mMessage = 0;
}
else if (!transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
else
{
DebugLog(<< "##ConnectionBase: " << *this << " received: " << *mMessage);
Transport::stampReceived(mMessage);
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
}
if (overHang > 0)
{
goto start;
}
}
else if (mBufferPos == mBufferSize)
{
// .bwc. We've filled our buffer and haven't read contentLength bytes yet; go ahead and make more room.
assert(contentLength >= mBufferSize);
size_t newSize = resipMin(mBufferSize*3/2, contentLength);
char* newBuffer = 0;
try
{
newBuffer=new char[newSize];
}
catch(std::bad_alloc&)
{
ErrLog(<<"Failed to alloc a buffer while receiving body!");
return false;
}
memcpy(newBuffer, mBuffer, mBufferSize);
mBufferSize=newSize;
delete [] mBuffer;
mBuffer = newBuffer;
}
break;
}
default:
resip_assert(0);
}
return true;
}
void
ConnectionBase::wsParseCookies(CookieList& cookieList, const SipMessage* message)
{
Data name;
Data value;
StringCategories::const_iterator it = message->header(h_Cookies).begin();
for (; it != message->header(h_Cookies).end(); ++it)
{
ParseBuffer pb((*it).value());
while(!pb.eof())
{
const char* anchor = pb.skipWhitespace();
pb.skipToChar(Symbols::EQUALS[0]);
pb.data(name, anchor);
anchor = pb.skipChar(Symbols::EQUALS[0]);
if(*(pb.position()) == Symbols::DOUBLE_QUOTE[0])
{
anchor = pb.skipChar(Symbols::DOUBLE_QUOTE[0]);
pb.skipToChar(Symbols::DOUBLE_QUOTE[0]);
pb.data(value, anchor);
pb.skipChar(Symbols::DOUBLE_QUOTE[0]);
}
else
{
pb.skipToOneOf(Symbols::SEMI_COLON, ParseBuffer::Whitespace);
pb.data(value, anchor);
}
Cookie cookie(name, value);
cookieList.push_back(cookie);
DebugLog(<< "Cookie: " << cookie);
if(!pb.eof() && *(pb.position()) == Symbols::SEMI_COLON[0])
{
pb.skipChar(Symbols::SEMI_COLON[0]);
}
pb.skipWhitespace();
}
}
}
/*
* Returns true if handshake complete, false if more bytes needed
* Sets dropConnection = true if an error occurs
*/
bool
ConnectionBase::wsProcessHandshake(int bytesRead, bool &dropConnection)
{
mConnState = WebSocket;
dropConnection = false;
if(mBufferPos + bytesRead > messageSizeMax)
{
WarningLog(<<"Too many bytes received during WS handshake, dropping connection. Max message size = " << messageSizeMax);
dropConnection = true;
return false;
}
resip_assert(mTransport);
mMessage = new SipMessage(&mTransport->getTuple());
resip_assert(mMessage);
mMessage->setSource(mWho);
mMessage->setTlsDomain(mTransport->tlsDomain());
if (!scanMsgHeader(bytesRead))
{
return false;
}
try
{
WsConnectionBase* wsConnectionBase = dynamic_cast<WsConnectionBase*>(this);
CookieList cookieList;
if(wsConnectionBase)
{
SharedPtr<WsCookieContext> wsCookieContext((WsCookieContext*)0);
if (mMessage->exists(h_Cookies))
{
WsBaseTransport* wst = dynamic_cast<WsBaseTransport*>(mTransport);
resip_assert(wst);
try
{
wsParseCookies(cookieList, mMessage);
wsConnectionBase->setCookies(cookieList);
// Use of resip WsCookieContext capabilities is not mandatory,
// only try to use it if cookieContextFactory is available
if(wst->cookieContextFactory().get())
{
Uri& requestUri = mMessage->header(h_RequestLine).uri();
wsCookieContext = wst->cookieContextFactory()->makeCookieContext(cookieList, requestUri);
wsConnectionBase->setWsCookieContext(wsCookieContext);
}
}
catch(ParseException& ex)
{
WarningLog(<<"Failed to parse cookies into WsCookieContext: " << ex);
}
}
SharedPtr<WsConnectionValidator> wsConnectionValidator = wsConnectionBase->connectionValidator();
if(wsConnectionValidator &&
(!wsCookieContext.get() || !wsConnectionValidator->validateConnection(*wsCookieContext)))
{
ErrLog(<<"WebSocket cookie validation failed, dropping connection");
// FIXME: should send back a HTTP error code:
// 400 if the cookie was not in the right syntax
// 403 if the cookie was well formed but rejected
// due to expiry or a bad HMAC
delete mMessage;
mMessage = 0;
mBufferPos = 0;
dropConnection = true;
return false;
}
}
std::auto_ptr<Data> wsResponsePtr = makeWsHandshakeResponse();
if (wsResponsePtr.get())
{
DebugLog (<< "WebSocket upgrade accepted, cookie count = " << cookieList.size());
mOutstandingSends.push_back(new SendData(
who(),
*wsResponsePtr.get(),
Data::Empty,
Data::Empty,
true));
}
else
{
ErrLog(<<"Failed to parse WebSocket initialization request");
delete mMessage;
mMessage = 0;
mBufferPos = 0;
dropConnection = true;
return false;
}
}
catch(resip::ParseException& e)
{
ErrLog(<<"Cannot auth request is missing " << e);
delete mMessage;
mMessage = 0;
mBufferPos = 0;
dropConnection = true;
return false;
}
delete mMessage;
mMessage=0;
mBufferPos = 0;
return true;
}
bool
ConnectionBase::scanMsgHeader(int bytesRead)
{
mMsgHeaderScanner.prepareForMessage(mMessage);
char *unprocessedCharPtr;
MsgHeaderScanner::ScanChunkResult scanResult = mMsgHeaderScanner.scanChunk(mBuffer, mBufferPos + bytesRead, &unprocessedCharPtr);
if (scanResult != MsgHeaderScanner::scrEnd)
{
if(scanResult != MsgHeaderScanner::scrNextChunk)
{
StackLog(<<"Failed to parse message, more bytes needed");
StackLog(<< Data(mBuffer, bytesRead));
}
delete mMessage;
mMessage=0;
mBufferPos += bytesRead;
return false;
}
return true;
}
std::auto_ptr<Data>
ConnectionBase::makeWsHandshakeResponse()
{
std::auto_ptr<Data> responsePtr(0);
if(isUsingSecWebSocketKey())
{
responsePtr.reset(new Data("HTTP/1.1 101 WebSocket Protocol Handshake\r\n"
"Upgrade: WebSocket\r\n"
"Connection: Upgrade\r\n"
"Sec-WebSocket-Protocol: sip\r\n"));
// Assuming that OpenSSL implementation of SHA1 is more effient than our internal one
#ifdef USE_SSL
SHA1Stream wsSha1Stream;
wsSha1Stream << (mMessage->const_header(h_SecWebSocketKey).value() + Symbols::WebsocketMagicGUID);
Data wsAcceptKey = wsSha1Stream.getBin(160).base64encode();
#else
SHA1 sha1;
sha1.update(mMessage->const_header(h_SecWebSocketKey).value().c_str());
sha1.update(Symbols::WebsocketMagicGUID);
Data wsAcceptKey = sha1.finalBin().base64encode();
#endif
*responsePtr += "Sec-WebSocket-Accept: " + wsAcceptKey + "\r\n\r\n";
}
else if(isUsingDeprecatedSecWebSocketKeys())
{
ErrLog(<<"WS client wants to use depracated protocol version, unsupported");
}
else
{
ErrLog(<<"No SecWebSocketKey header");
}
return responsePtr;
}
bool ConnectionBase::isUsingDeprecatedSecWebSocketKeys()
{
resip_assert(mMessage);
return mMessage->exists(h_SecWebSocketKey1) &&
mMessage->exists(h_SecWebSocketKey2);
}
bool ConnectionBase::isUsingSecWebSocketKey()
{
resip_assert(mMessage);
return mMessage->exists(h_SecWebSocketKey);
}
bool
ConnectionBase::wsProcessData(int bytesRead)
{
bool dropConnection = false;
// Always consumes the whole buffer:
std::auto_ptr<Data> msg = mWsFrameExtractor.processBytes((UInt8*)mBuffer, bytesRead, dropConnection);
while(msg.get())
{
// mWsBuffer should now contain a discrete SIP message, let the
// stack go to work on it
if(msg->size() == 4 && memcmp(msg->data(), "\r\n\r\n", 4) == 0)
{
// sending a keep alive reply now
StackLog(<<"got a SIP ping embedded in WebSocket frame, replying");
onDoubleCRLF();
msg = mWsFrameExtractor.processBytes(0, 0, dropConnection);
continue;
}
resip_assert(mTransport);
mMessage = new SipMessage(&mTransport->getTuple());
mMessage->setSource(mWho);
mMessage->setTlsDomain(mTransport->tlsDomain());
#ifdef USE_SSL
// Set TlsPeerName if message is from TlsConnection
TlsConnection *tlsConnection = dynamic_cast<TlsConnection *>(this);
if(tlsConnection)
{
std::list<Data> peerNameList;
tlsConnection->getPeerNames(peerNameList);
mMessage->setTlsPeerNames(peerNameList);
}
#endif
WsConnectionBase *wsConnectionBase = dynamic_cast<WsConnectionBase *>(this);
if (wsConnectionBase)
{
mMessage->setWsCookies(wsConnectionBase->getCookies());
mMessage->setWsCookieContext(wsConnectionBase->getWsCookieContext());
}
Data::size_type msg_len = msg->size();
// cast permitted, as it is borrowed:
char *sipBuffer = (char *)msg->data();
mMessage->addBuffer(sipBuffer);
mMsgHeaderScanner.prepareForMessage(mMessage);
char *unprocessedCharPtr;
if (mMsgHeaderScanner.scanChunk(sipBuffer,
msg_len,
&unprocessedCharPtr) !=
MsgHeaderScanner::scrEnd)
{
StackLog(<<"Scanner rejecting WebSocket SIP message as unparsable, length = " << msg_len);
StackLog(<< Data(sipBuffer, msg_len));
delete mMessage;
mMessage=0;
}
unsigned int used = unprocessedCharPtr - sipBuffer;
if (mMessage && (used < msg_len))
{
mMessage->setBody(sipBuffer+used, msg_len-used);
}
if (mMessage && !transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
if (mMessage)
{
Transport::stampReceived(mMessage);
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
}
else
{
// Something wrong...
ErrLog(<< "We don't have a valid SIP message, maybe drop the connection?");
}
msg = mWsFrameExtractor.processBytes(0, 0, dropConnection);
}
if(dropConnection)
{
return false;
}
return true;
}
#ifdef USE_SIGCOMP
void
ConnectionBase::decompressNewBytes(int bytesRead)
{
mConnState = SigComp;
if (!mSigcompFramer)
{
mSigcompFramer = new osc::TcpStream();
}
mSigcompFramer->addData(mBuffer, bytesRead);
size_t bytesUncompressed;
osc::StateChanges *sc = 0;
char *uncompressed = new char[65536];
while ((bytesUncompressed = mSigcompStack->uncompressMessage(
*mSigcompFramer, uncompressed, 65536, sc)) > 0)
{
DebugLog (<< "Uncompressed Connection-oriented message");
mMessage = new SipMessage(mWho.transport);
mMessage->setSource(mWho);
mMessage->setTlsDomain(mWho.transport->tlsDomain());
#ifdef USE_SSL
// Set TlsPeerName if message is from TlsConnection
TlsConnection *tlsConnection = dynamic_cast<TlsConnection *>(this);
if(tlsConnection)
{
std::list<Data> peerNameList;
tlsConnection->getPeerNames(peerNameList);
mMessage->setTlsPeerNames(peerNameList);
}
#endif
char *sipBuffer = new char[bytesUncompressed];
memmove(sipBuffer, uncompressed, bytesUncompressed);
mMessage->addBuffer(sipBuffer);
mMsgHeaderScanner.prepareForMessage(mMessage);
char *unprocessedCharPtr;
if (mMsgHeaderScanner.scanChunk(sipBuffer,
bytesUncompressed,
&unprocessedCharPtr) !=
MsgHeaderScanner::scrEnd)
{
StackLog(<<"Scanner rejecting compressed message as unparsable");
StackLog(<< Data(sipBuffer, bytesUncompressed));
delete mMessage;
mMessage=0;
}
unsigned int used = unprocessedCharPtr - sipBuffer;
if (mMessage && (used < bytesUncompressed))
{
mMessage->setBody(sipBuffer+used, bytesUncompressed-used);
}
if (mMessage && !transport()->basicCheck(*mMessage))
{
delete mMessage;
mMessage = 0;
}
if (mMessage)
{
Transport::stampReceived(mMessage);
// If the message made it this far, we should let it store
// SigComp state: extract the compartment ID.
const Via &via = mMessage->const_header(h_Vias).front();
if (mMessage->isRequest())
{
// For requests, the compartment ID is read out of the
// top via header field; if not present, we use the
// TCP connection for identification purposes.
if (via.exists(p_sigcompId))
{
Data compId = via.param(p_sigcompId);
if(!compId.empty())
{
mSigcompStack->provideCompartmentId(sc, compId.data(), compId.size());
}
}
else
{
mSigcompStack->provideCompartmentId(sc, this, sizeof(this));
}
}
else
{
// For responses, the compartment ID is supposed to be
// the same as the compartment ID of the request. We
// *could* dig down into the transaction layer to try to
// figure this out, but that's a royal pain, and a rather
// severe layer violation. In practice, we're going to ferret
// the ID out of the the Via header field, which is where we
// squirreled it away when we sent this request in the first place.
Data compId = via.param(p_branch).getSigcompCompartment();
if(!compId.empty())
{
mSigcompStack->provideCompartmentId(sc, compId.data(), compId.size());
}
}
resip_assert( mTransport );
mTransport->pushRxMsgUp(mMessage);
mMessage = 0;
sc = 0;
}
else
{
delete sc;
sc = 0;
}
}
delete [] uncompressed;
// If there was a decompression failure, let the other side know.
osc::SigcompMessage *nack = mSigcompStack->getNack();
if (nack)
{
if (mSendingTransmissionFormat == Compressed)
{
// !bwc! We are not telling anyone that we're interested in having our
// FD put in the writable set...
mOutstandingSends.push_back(new SendData(
who(),
Data(nack->getStreamMessage(), nack->getStreamLength()),
Data::Empty,
Data::Empty,
true));
}
else
{
delete nack;
}
}
}
#endif
std::pair<char*, size_t>
ConnectionBase::getWriteBuffer()
{
if (mConnState == NewMessage)
{
if (!mBuffer)
{
DebugLog (<< "Creating buffer for " << *this);
mBuffer = MsgHeaderScanner::allocateBuffer(ConnectionBase::ChunkSize);
mBufferSize = ConnectionBase::ChunkSize;
}
mBufferPos = 0;
}
return getCurrentWriteBuffer();
}
std::pair<char*, size_t>
ConnectionBase::getCurrentWriteBuffer()
{
return std::make_pair(mBuffer + mBufferPos, mBufferSize - mBufferPos);
}
char*
ConnectionBase::getWriteBufferForExtraBytes(int currentPos, int extraBytes)
{
if (currentPos > 0 && extraBytes > 0)
{
if ((currentPos + extraBytes) > mBufferSize)
{
mBufferSize = currentPos + extraBytes;
char* buffer = MsgHeaderScanner::allocateBuffer((int)mBufferSize);
memcpy(buffer, mBuffer, currentPos);
delete[] mBuffer;
mBuffer = buffer;
}
return &mBuffer[currentPos];
}
else
{
resip_assert(0);
return mBuffer;
}
}
void
ConnectionBase::setBuffer(char* bytes, int count)
{
mBuffer = bytes;
mBufferPos = 0;
mBufferSize = count;
}
Transport*
ConnectionBase::transport() const
{
resip_assert(this);
return mTransport;
}
EncodeStream&
resip::operator<<(EncodeStream& strm,
const resip::ConnectionBase& c)
{
strm << "CONN_BASE: " << &c << " " << c.mWho;
return strm;
}
/* ====================================================================
* The Vovida Software License, Version 1.0
*
* Copyright (c) 2000
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* 3. The names "VOCAL", "Vovida Open Communication Application Library",
* and "Vovida Open Communication Application Library (VOCAL)" must
* not be used to endorse or promote products derived from this
* software without prior written permission. For written
* permission, please contact vocal@vovida.org.
*
* 4. Products derived from this software may not be called "VOCAL", nor
* may "VOCAL" appear in their name, without prior written
* permission of Vovida Networks, Inc.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
* NON-INFRINGEMENT ARE DISCLAIMED. IN NO EVENT SHALL VOVIDA
* NETWORKS, INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT DAMAGES
* IN EXCESS OF $1,000, NOR FOR ANY INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* ====================================================================
*
* This software consists of voluntary contributions made by Vovida
* Networks, Inc. and many individuals on behalf of Vovida Networks,
* Inc. For more information on Vovida Networks, Inc., please see
* <http://www.vovida.org/>.
*
* vi: set shiftwidth=3 expandtab:
*/
|
197_0
|
crossvul
|
cxx
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved.
* Copyright 2014 Pierre Ossman for Cendio AB
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
// -=- PixelBuffer.cxx
//
// The PixelBuffer class encapsulates the PixelFormat and dimensions
// of a block of pixel data.
#include <rfb/Exception.h>
#include <rfb/LogWriter.h>
#include <rfb/PixelBuffer.h>
using namespace rfb;
using namespace rdr;
static LogWriter vlog("PixelBuffer");
// -=- Generic pixel buffer class
PixelBuffer::PixelBuffer(const PixelFormat& pf, int w, int h)
: format(pf), width_(w), height_(h) {}
PixelBuffer::PixelBuffer() : width_(0), height_(0) {}
PixelBuffer::~PixelBuffer() {}
void
PixelBuffer::getImage(void* imageBuf, const Rect& r, int outStride) const {
int inStride;
const U8* data = getBuffer(r, &inStride);
// We assume that the specified rectangle is pre-clipped to the buffer
int bytesPerPixel = format.bpp/8;
int inBytesPerRow = inStride * bytesPerPixel;
if (!outStride) outStride = r.width();
int outBytesPerRow = outStride * bytesPerPixel;
int bytesPerMemCpy = r.width() * bytesPerPixel;
U8* imageBufPos = (U8*)imageBuf;
const U8* end = data + (inBytesPerRow * r.height());
while (data < end) {
memcpy(imageBufPos, data, bytesPerMemCpy);
imageBufPos += outBytesPerRow;
data += inBytesPerRow;
}
}
void PixelBuffer::getImage(const PixelFormat& pf, void* imageBuf,
const Rect& r, int stride) const
{
const rdr::U8* srcBuffer;
int srcStride;
if (format.equal(pf)) {
getImage(imageBuf, r, stride);
return;
}
if (stride == 0)
stride = r.width();
srcBuffer = getBuffer(r, &srcStride);
pf.bufferFromBuffer((U8*)imageBuf, format, srcBuffer, r.width(), r.height(),
stride, srcStride);
}
// -=- Modifiable generic pixel buffer class
ModifiablePixelBuffer::ModifiablePixelBuffer(const PixelFormat& pf,
int w, int h)
: PixelBuffer(pf, w, h)
{
}
ModifiablePixelBuffer::ModifiablePixelBuffer()
{
}
ModifiablePixelBuffer::~ModifiablePixelBuffer()
{
}
void ModifiablePixelBuffer::fillRect(const Rect& r, const void* pix)
{
int stride;
U8 *buf;
int w, h, b;
w = r.width();
h = r.height();
b = format.bpp/8;
if (h == 0)
return;
buf = getBufferRW(r, &stride);
if (b == 1) {
while (h--) {
memset(buf, *(const U8*)pix, w);
buf += stride * b;
}
} else {
U8 *start;
int w1;
start = buf;
w1 = w;
while (w1--) {
memcpy(buf, pix, b);
buf += b;
}
buf += (stride - w) * b;
h--;
while (h--) {
memcpy(buf, start, w * b);
buf += stride * b;
}
}
commitBufferRW(r);
}
void ModifiablePixelBuffer::imageRect(const Rect& r,
const void* pixels, int srcStride)
{
int bytesPerPixel = getPF().bpp/8;
int destStride;
U8* dest = getBufferRW(r, &destStride);
int bytesPerDestRow = bytesPerPixel * destStride;
if (!srcStride) srcStride = r.width();
int bytesPerSrcRow = bytesPerPixel * srcStride;
int bytesPerFill = bytesPerPixel * r.width();
const U8* src = (const U8*)pixels;
U8* end = dest + (bytesPerDestRow * r.height());
while (dest < end) {
memcpy(dest, src, bytesPerFill);
dest += bytesPerDestRow;
src += bytesPerSrcRow;
}
commitBufferRW(r);
}
void ModifiablePixelBuffer::maskRect(const Rect& r,
const void* pixels, const void* mask_)
{
Rect cr = getRect().intersect(r);
if (cr.is_empty()) return;
int stride;
U8* data = getBufferRW(cr, &stride);
U8* mask = (U8*) mask_;
int w = cr.width();
int h = cr.height();
int bpp = getPF().bpp;
int pixelStride = r.width();
int maskStride = (r.width() + 7) / 8;
Point offset = Point(cr.tl.x-r.tl.x, cr.tl.y-r.tl.y);
mask += offset.y * maskStride;
for (int y = 0; y < h; y++) {
int cy = offset.y + y;
for (int x = 0; x < w; x++) {
int cx = offset.x + x;
U8* byte = mask + (cx / 8);
int bit = 7 - cx % 8;
if ((*byte) & (1 << bit)) {
switch (bpp) {
case 8:
((U8*)data)[y * stride + x] = ((U8*)pixels)[cy * pixelStride + cx];
break;
case 16:
((U16*)data)[y * stride + x] = ((U16*)pixels)[cy * pixelStride + cx];
break;
case 32:
((U32*)data)[y * stride + x] = ((U32*)pixels)[cy * pixelStride + cx];
break;
}
}
}
mask += maskStride;
}
commitBufferRW(cr);
}
void ModifiablePixelBuffer::maskRect(const Rect& r,
Pixel pixel, const void* mask_)
{
Rect cr = getRect().intersect(r);
if (cr.is_empty()) return;
int stride;
U8* data = getBufferRW(cr, &stride);
U8* mask = (U8*) mask_;
int w = cr.width();
int h = cr.height();
int bpp = getPF().bpp;
int maskStride = (r.width() + 7) / 8;
Point offset = Point(cr.tl.x-r.tl.x, cr.tl.y-r.tl.y);
mask += offset.y * maskStride;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int cx = offset.x + x;
U8* byte = mask + (cx / 8);
int bit = 7 - cx % 8;
if ((*byte) & (1 << bit)) {
switch (bpp) {
case 8:
((U8*)data)[y * stride + x] = pixel;
break;
case 16:
((U16*)data)[y * stride + x] = pixel;
break;
case 32:
((U32*)data)[y * stride + x] = pixel;
break;
}
}
}
mask += maskStride;
}
commitBufferRW(cr);
}
void ModifiablePixelBuffer::copyRect(const Rect &rect,
const Point &move_by_delta)
{
int srcStride, dstStride;
const U8* srcData;
U8* dstData;
Rect drect, srect;
drect = rect;
if (!drect.enclosed_by(getRect())) {
vlog.error("Destination rect %dx%d at %d,%d exceeds framebuffer %dx%d",
drect.width(), drect.height(), drect.tl.x, drect.tl.y, width_, height_);
drect = drect.intersect(getRect());
}
if (drect.is_empty())
return;
srect = drect.translate(move_by_delta.negate());
if (!srect.enclosed_by(getRect())) {
vlog.error("Source rect %dx%d at %d,%d exceeds framebuffer %dx%d",
srect.width(), srect.height(), srect.tl.x, srect.tl.y, width_, height_);
srect = srect.intersect(getRect());
// Need to readjust the destination now that the area has changed
drect = srect.translate(move_by_delta);
}
if (srect.is_empty())
return;
srcData = getBuffer(srect, &srcStride);
dstData = getBufferRW(drect, &dstStride);
if (move_by_delta.y == 0) {
// Possible overlap. Be careful and use memmove().
int h = drect.height();
while (h--) {
memmove(dstData, srcData, drect.width() * format.bpp/8);
dstData += dstStride * format.bpp/8;
srcData += srcStride * format.bpp/8;
}
} else if (move_by_delta.y < 0) {
// The data shifted upwards. Copy from top to bottom.
int h = drect.height();
while (h--) {
memcpy(dstData, srcData, drect.width() * format.bpp/8);
dstData += dstStride * format.bpp/8;
srcData += srcStride * format.bpp/8;
}
} else {
// The data shifted downwards. Copy from bottom to top.
int h = drect.height();
dstData += (h-1) * dstStride * format.bpp/8;
srcData += (h-1) * srcStride * format.bpp/8;
while (h--) {
memcpy(dstData, srcData, drect.width() * format.bpp/8);
dstData -= dstStride * format.bpp/8;
srcData -= srcStride * format.bpp/8;
}
}
commitBufferRW(drect);
}
void ModifiablePixelBuffer::fillRect(const PixelFormat& pf, const Rect &dest,
const void* pix)
{
rdr::U8 buf[4];
format.bufferFromBuffer(buf, pf, (const rdr::U8*)pix, 1);
fillRect(dest, buf);
}
void ModifiablePixelBuffer::imageRect(const PixelFormat& pf, const Rect &dest,
const void* pixels, int stride)
{
rdr::U8* dstBuffer;
int dstStride;
if (stride == 0)
stride = dest.width();
dstBuffer = getBufferRW(dest, &dstStride);
format.bufferFromBuffer(dstBuffer, pf, (const rdr::U8*)pixels,
dest.width(), dest.height(),
dstStride, stride);
commitBufferRW(dest);
}
// -=- Simple pixel buffer with a continuous block of memory
FullFramePixelBuffer::FullFramePixelBuffer(const PixelFormat& pf, int w, int h,
rdr::U8* data_, int stride_)
: ModifiablePixelBuffer(pf, w, h), data(data_), stride(stride_)
{
}
FullFramePixelBuffer::FullFramePixelBuffer() : data(0) {}
FullFramePixelBuffer::~FullFramePixelBuffer() {}
rdr::U8* FullFramePixelBuffer::getBufferRW(const Rect& r, int* stride_)
{
*stride_ = stride;
return &data[(r.tl.x + (r.tl.y * stride)) * format.bpp/8];
}
void FullFramePixelBuffer::commitBufferRW(const Rect& r)
{
}
const rdr::U8* FullFramePixelBuffer::getBuffer(const Rect& r, int* stride_) const
{
*stride_ = stride;
return &data[(r.tl.x + (r.tl.y * stride)) * format.bpp/8];
}
// -=- Managed pixel buffer class
// Automatically allocates enough space for the specified format & area
ManagedPixelBuffer::ManagedPixelBuffer()
: datasize(0)
{
checkDataSize();
};
ManagedPixelBuffer::ManagedPixelBuffer(const PixelFormat& pf, int w, int h)
: FullFramePixelBuffer(pf, w, h, NULL, w), datasize(0)
{
checkDataSize();
};
ManagedPixelBuffer::~ManagedPixelBuffer() {
if (data) delete [] data;
};
void
ManagedPixelBuffer::setPF(const PixelFormat &pf) {
format = pf; checkDataSize();
};
void
ManagedPixelBuffer::setSize(int w, int h) {
width_ = w; height_ = h; stride = w; checkDataSize();
};
inline void
ManagedPixelBuffer::checkDataSize() {
unsigned long new_datasize = width_ * height_ * (format.bpp/8);
if (datasize < new_datasize) {
vlog.debug("reallocating managed buffer (%dx%d)", width_, height_);
if (data) {
delete [] data;
datasize = 0; data = 0;
}
if (new_datasize) {
data = new U8[new_datasize];
if (!data)
throw Exception("rfb::ManagedPixelBuffer unable to allocate buffer");
datasize = new_datasize;
}
}
};
|
/* Copyright (C) 2002-2005 RealVNC Ltd. All Rights Reserved.
* Copyright 2014 Pierre Ossman for Cendio AB
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
// -=- PixelBuffer.cxx
//
// The PixelBuffer class encapsulates the PixelFormat and dimensions
// of a block of pixel data.
#include <rfb/Exception.h>
#include <rfb/LogWriter.h>
#include <rfb/PixelBuffer.h>
using namespace rfb;
using namespace rdr;
static LogWriter vlog("PixelBuffer");
// -=- Generic pixel buffer class
PixelBuffer::PixelBuffer(const PixelFormat& pf, int w, int h)
: format(pf), width_(w), height_(h) {}
PixelBuffer::PixelBuffer() : width_(0), height_(0) {}
PixelBuffer::~PixelBuffer() {}
void
PixelBuffer::getImage(void* imageBuf, const Rect& r, int outStride) const {
int inStride;
const U8* data = getBuffer(r, &inStride);
// We assume that the specified rectangle is pre-clipped to the buffer
int bytesPerPixel = format.bpp/8;
int inBytesPerRow = inStride * bytesPerPixel;
if (!outStride) outStride = r.width();
int outBytesPerRow = outStride * bytesPerPixel;
int bytesPerMemCpy = r.width() * bytesPerPixel;
U8* imageBufPos = (U8*)imageBuf;
const U8* end = data + (inBytesPerRow * r.height());
while (data < end) {
memcpy(imageBufPos, data, bytesPerMemCpy);
imageBufPos += outBytesPerRow;
data += inBytesPerRow;
}
}
void PixelBuffer::getImage(const PixelFormat& pf, void* imageBuf,
const Rect& r, int stride) const
{
const rdr::U8* srcBuffer;
int srcStride;
if (format.equal(pf)) {
getImage(imageBuf, r, stride);
return;
}
if (stride == 0)
stride = r.width();
srcBuffer = getBuffer(r, &srcStride);
pf.bufferFromBuffer((U8*)imageBuf, format, srcBuffer, r.width(), r.height(),
stride, srcStride);
}
// -=- Modifiable generic pixel buffer class
ModifiablePixelBuffer::ModifiablePixelBuffer(const PixelFormat& pf,
int w, int h)
: PixelBuffer(pf, w, h)
{
}
ModifiablePixelBuffer::ModifiablePixelBuffer()
{
}
ModifiablePixelBuffer::~ModifiablePixelBuffer()
{
}
void ModifiablePixelBuffer::fillRect(const Rect& r, const void* pix)
{
int stride;
U8 *buf;
int w, h, b;
Rect drect;
drect = r;
if (!drect.enclosed_by(getRect())) {
vlog.error("Destination rect %dx%d at %d,%d exceeds framebuffer %dx%d",
drect.width(), drect.height(), drect.tl.x, drect.tl.y, width_, height_);
drect = drect.intersect(getRect());
}
if (drect.is_empty())
return;
w = drect.width();
h = drect.height();
b = format.bpp/8;
if (h == 0)
return;
buf = getBufferRW(drect, &stride);
if (b == 1) {
while (h--) {
memset(buf, *(const U8*)pix, w);
buf += stride * b;
}
} else {
U8 *start;
int w1;
start = buf;
w1 = w;
while (w1--) {
memcpy(buf, pix, b);
buf += b;
}
buf += (stride - w) * b;
h--;
while (h--) {
memcpy(buf, start, w * b);
buf += stride * b;
}
}
commitBufferRW(drect);
}
void ModifiablePixelBuffer::imageRect(const Rect& r,
const void* pixels, int srcStride)
{
int bytesPerPixel = getPF().bpp/8;
int destStride;
U8* dest = getBufferRW(r, &destStride);
int bytesPerDestRow = bytesPerPixel * destStride;
if (!srcStride) srcStride = r.width();
int bytesPerSrcRow = bytesPerPixel * srcStride;
int bytesPerFill = bytesPerPixel * r.width();
const U8* src = (const U8*)pixels;
U8* end = dest + (bytesPerDestRow * r.height());
while (dest < end) {
memcpy(dest, src, bytesPerFill);
dest += bytesPerDestRow;
src += bytesPerSrcRow;
}
commitBufferRW(r);
}
void ModifiablePixelBuffer::maskRect(const Rect& r,
const void* pixels, const void* mask_)
{
Rect cr = getRect().intersect(r);
if (cr.is_empty()) return;
int stride;
U8* data = getBufferRW(cr, &stride);
U8* mask = (U8*) mask_;
int w = cr.width();
int h = cr.height();
int bpp = getPF().bpp;
int pixelStride = r.width();
int maskStride = (r.width() + 7) / 8;
Point offset = Point(cr.tl.x-r.tl.x, cr.tl.y-r.tl.y);
mask += offset.y * maskStride;
for (int y = 0; y < h; y++) {
int cy = offset.y + y;
for (int x = 0; x < w; x++) {
int cx = offset.x + x;
U8* byte = mask + (cx / 8);
int bit = 7 - cx % 8;
if ((*byte) & (1 << bit)) {
switch (bpp) {
case 8:
((U8*)data)[y * stride + x] = ((U8*)pixels)[cy * pixelStride + cx];
break;
case 16:
((U16*)data)[y * stride + x] = ((U16*)pixels)[cy * pixelStride + cx];
break;
case 32:
((U32*)data)[y * stride + x] = ((U32*)pixels)[cy * pixelStride + cx];
break;
}
}
}
mask += maskStride;
}
commitBufferRW(cr);
}
void ModifiablePixelBuffer::maskRect(const Rect& r,
Pixel pixel, const void* mask_)
{
Rect cr = getRect().intersect(r);
if (cr.is_empty()) return;
int stride;
U8* data = getBufferRW(cr, &stride);
U8* mask = (U8*) mask_;
int w = cr.width();
int h = cr.height();
int bpp = getPF().bpp;
int maskStride = (r.width() + 7) / 8;
Point offset = Point(cr.tl.x-r.tl.x, cr.tl.y-r.tl.y);
mask += offset.y * maskStride;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
int cx = offset.x + x;
U8* byte = mask + (cx / 8);
int bit = 7 - cx % 8;
if ((*byte) & (1 << bit)) {
switch (bpp) {
case 8:
((U8*)data)[y * stride + x] = pixel;
break;
case 16:
((U16*)data)[y * stride + x] = pixel;
break;
case 32:
((U32*)data)[y * stride + x] = pixel;
break;
}
}
}
mask += maskStride;
}
commitBufferRW(cr);
}
void ModifiablePixelBuffer::copyRect(const Rect &rect,
const Point &move_by_delta)
{
int srcStride, dstStride;
const U8* srcData;
U8* dstData;
Rect drect, srect;
drect = rect;
if (!drect.enclosed_by(getRect())) {
vlog.error("Destination rect %dx%d at %d,%d exceeds framebuffer %dx%d",
drect.width(), drect.height(), drect.tl.x, drect.tl.y, width_, height_);
drect = drect.intersect(getRect());
}
if (drect.is_empty())
return;
srect = drect.translate(move_by_delta.negate());
if (!srect.enclosed_by(getRect())) {
vlog.error("Source rect %dx%d at %d,%d exceeds framebuffer %dx%d",
srect.width(), srect.height(), srect.tl.x, srect.tl.y, width_, height_);
srect = srect.intersect(getRect());
// Need to readjust the destination now that the area has changed
drect = srect.translate(move_by_delta);
}
if (srect.is_empty())
return;
srcData = getBuffer(srect, &srcStride);
dstData = getBufferRW(drect, &dstStride);
if (move_by_delta.y == 0) {
// Possible overlap. Be careful and use memmove().
int h = drect.height();
while (h--) {
memmove(dstData, srcData, drect.width() * format.bpp/8);
dstData += dstStride * format.bpp/8;
srcData += srcStride * format.bpp/8;
}
} else if (move_by_delta.y < 0) {
// The data shifted upwards. Copy from top to bottom.
int h = drect.height();
while (h--) {
memcpy(dstData, srcData, drect.width() * format.bpp/8);
dstData += dstStride * format.bpp/8;
srcData += srcStride * format.bpp/8;
}
} else {
// The data shifted downwards. Copy from bottom to top.
int h = drect.height();
dstData += (h-1) * dstStride * format.bpp/8;
srcData += (h-1) * srcStride * format.bpp/8;
while (h--) {
memcpy(dstData, srcData, drect.width() * format.bpp/8);
dstData -= dstStride * format.bpp/8;
srcData -= srcStride * format.bpp/8;
}
}
commitBufferRW(drect);
}
void ModifiablePixelBuffer::fillRect(const PixelFormat& pf, const Rect &dest,
const void* pix)
{
rdr::U8 buf[4];
format.bufferFromBuffer(buf, pf, (const rdr::U8*)pix, 1);
fillRect(dest, buf);
}
void ModifiablePixelBuffer::imageRect(const PixelFormat& pf, const Rect &dest,
const void* pixels, int stride)
{
rdr::U8* dstBuffer;
int dstStride;
if (stride == 0)
stride = dest.width();
dstBuffer = getBufferRW(dest, &dstStride);
format.bufferFromBuffer(dstBuffer, pf, (const rdr::U8*)pixels,
dest.width(), dest.height(),
dstStride, stride);
commitBufferRW(dest);
}
// -=- Simple pixel buffer with a continuous block of memory
FullFramePixelBuffer::FullFramePixelBuffer(const PixelFormat& pf, int w, int h,
rdr::U8* data_, int stride_)
: ModifiablePixelBuffer(pf, w, h), data(data_), stride(stride_)
{
}
FullFramePixelBuffer::FullFramePixelBuffer() : data(0) {}
FullFramePixelBuffer::~FullFramePixelBuffer() {}
rdr::U8* FullFramePixelBuffer::getBufferRW(const Rect& r, int* stride_)
{
*stride_ = stride;
return &data[(r.tl.x + (r.tl.y * stride)) * format.bpp/8];
}
void FullFramePixelBuffer::commitBufferRW(const Rect& r)
{
}
const rdr::U8* FullFramePixelBuffer::getBuffer(const Rect& r, int* stride_) const
{
*stride_ = stride;
return &data[(r.tl.x + (r.tl.y * stride)) * format.bpp/8];
}
// -=- Managed pixel buffer class
// Automatically allocates enough space for the specified format & area
ManagedPixelBuffer::ManagedPixelBuffer()
: datasize(0)
{
checkDataSize();
};
ManagedPixelBuffer::ManagedPixelBuffer(const PixelFormat& pf, int w, int h)
: FullFramePixelBuffer(pf, w, h, NULL, w), datasize(0)
{
checkDataSize();
};
ManagedPixelBuffer::~ManagedPixelBuffer() {
if (data) delete [] data;
};
void
ManagedPixelBuffer::setPF(const PixelFormat &pf) {
format = pf; checkDataSize();
};
void
ManagedPixelBuffer::setSize(int w, int h) {
width_ = w; height_ = h; stride = w; checkDataSize();
};
inline void
ManagedPixelBuffer::checkDataSize() {
unsigned long new_datasize = width_ * height_ * (format.bpp/8);
if (datasize < new_datasize) {
vlog.debug("reallocating managed buffer (%dx%d)", width_, height_);
if (data) {
delete [] data;
datasize = 0; data = 0;
}
if (new_datasize) {
data = new U8[new_datasize];
if (!data)
throw Exception("rfb::ManagedPixelBuffer unable to allocate buffer");
datasize = new_datasize;
}
}
};
|
3123_0
|
crossvul
|
cxx
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
* Copyright (C) 2010 m-privacy GmbH
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This header should not be compiled without HAVE_GNUTLS defined"
#endif
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <rfb/CSecurityTLS.h>
#include <rfb/SSecurityVeNCrypt.h>
#include <rfb/CConnection.h>
#include <rfb/LogWriter.h>
#include <rfb/Exception.h>
#include <rfb/UserMsgBox.h>
#include <rdr/TLSInStream.h>
#include <rdr/TLSOutStream.h>
#include <os/os.h>
#include <gnutls/x509.h>
/*
* GNUTLS 2.6.5 and older didn't have some variables defined so don't use them.
* GNUTLS 1.X.X defined LIBGNUTLS_VERSION_NUMBER so treat it as "old" gnutls as
* well
*/
#if (defined(GNUTLS_VERSION_NUMBER) && GNUTLS_VERSION_NUMBER < 0x020606) || \
defined(LIBGNUTLS_VERSION_NUMBER)
#define WITHOUT_X509_TIMES
#endif
/* Ancient GNUTLS... */
#if !defined(GNUTLS_VERSION_NUMBER) && !defined(LIBGNUTLS_VERSION_NUMBER)
#define WITHOUT_X509_TIMES
#endif
using namespace rfb;
StringParameter CSecurityTLS::X509CA("X509CA", "X509 CA certificate", "", ConfViewer);
StringParameter CSecurityTLS::X509CRL("X509CRL", "X509 CRL file", "", ConfViewer);
static LogWriter vlog("TLS");
void CSecurityTLS::initGlobal()
{
static bool globalInitDone = false;
if (!globalInitDone) {
gnutls_global_init();
globalInitDone = true;
}
}
CSecurityTLS::CSecurityTLS(bool _anon) : session(0), anon_cred(0),
anon(_anon), fis(0), fos(0)
{
cafile = X509CA.getData();
crlfile = X509CRL.getData();
}
void CSecurityTLS::setDefaults()
{
char* homeDir = NULL;
if (getvnchomedir(&homeDir) == -1) {
vlog.error("Could not obtain VNC home directory path");
return;
}
int len = strlen(homeDir) + 1;
CharArray caDefault(len + 11);
CharArray crlDefault(len + 12);
sprintf(caDefault.buf, "%sx509_ca.pem", homeDir);
sprintf(crlDefault.buf, "%s509_crl.pem", homeDir);
delete [] homeDir;
if (!fileexists(caDefault.buf))
X509CA.setDefaultStr(strdup(caDefault.buf));
if (!fileexists(crlDefault.buf))
X509CRL.setDefaultStr(strdup(crlDefault.buf));
}
void CSecurityTLS::shutdown(bool needbye)
{
if (session && needbye)
if (gnutls_bye(session, GNUTLS_SHUT_RDWR) != GNUTLS_E_SUCCESS)
vlog.error("gnutls_bye failed");
if (anon_cred) {
gnutls_anon_free_client_credentials(anon_cred);
anon_cred = 0;
}
if (cert_cred) {
gnutls_certificate_free_credentials(cert_cred);
cert_cred = 0;
}
if (session) {
gnutls_deinit(session);
session = 0;
gnutls_global_deinit();
}
}
CSecurityTLS::~CSecurityTLS()
{
shutdown(true);
if (fis)
delete fis;
if (fos)
delete fos;
delete[] cafile;
delete[] crlfile;
}
bool CSecurityTLS::processMsg(CConnection* cc)
{
rdr::InStream* is = cc->getInStream();
rdr::OutStream* os = cc->getOutStream();
client = cc;
initGlobal();
if (!session) {
if (!is->checkNoWait(1))
return false;
if (is->readU8() == 0) {
rdr::U32 result = is->readU32();
CharArray reason;
if (result == secResultFailed || result == secResultTooMany)
reason.buf = is->readString();
else
reason.buf = strDup("Authentication failure (protocol error)");
throw AuthFailureException(reason.buf);
}
if (gnutls_init(&session, GNUTLS_CLIENT) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_init failed");
if (gnutls_set_default_priority(session) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_set_default_priority failed");
setParam();
}
rdr::TLSInStream *tlsis = new rdr::TLSInStream(is, session);
rdr::TLSOutStream *tlsos = new rdr::TLSOutStream(os, session);
int err;
err = gnutls_handshake(session);
if (err != GNUTLS_E_SUCCESS) {
delete tlsis;
delete tlsos;
if (!gnutls_error_is_fatal(err))
return false;
vlog.error("TLS Handshake failed: %s\n", gnutls_strerror (err));
shutdown(false);
throw AuthFailureException("TLS Handshake failed");
}
checkSession();
cc->setStreams(fis = tlsis, fos = tlsos);
return true;
}
void CSecurityTLS::setParam()
{
static const char kx_anon_priority[] = ":+ANON-ECDH:+ANON-DH";
int ret;
char *prio;
const char *err;
prio = (char*)malloc(strlen(Security::GnuTLSPriority) +
strlen(kx_anon_priority) + 1);
if (prio == NULL)
throw AuthFailureException("Not enough memory for GnuTLS priority string");
strcpy(prio, Security::GnuTLSPriority);
if (anon)
strcat(prio, kx_anon_priority);
ret = gnutls_priority_set_direct(session, prio, &err);
free(prio);
if (ret != GNUTLS_E_SUCCESS) {
if (ret == GNUTLS_E_INVALID_REQUEST)
vlog.error("GnuTLS priority syntax error at: %s", err);
throw AuthFailureException("gnutls_set_priority_direct failed");
}
if (anon) {
if (gnutls_anon_allocate_client_credentials(&anon_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_anon_allocate_client_credentials failed");
if (gnutls_credentials_set(session, GNUTLS_CRD_ANON, anon_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("Anonymous session has been set");
} else {
if (gnutls_certificate_allocate_credentials(&cert_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_certificate_allocate_credentials failed");
if (*cafile && gnutls_certificate_set_x509_trust_file(cert_cred,cafile,GNUTLS_X509_FMT_PEM) < 0)
throw AuthFailureException("load of CA cert failed");
/* Load previously saved certs */
char *homeDir = NULL;
int err;
if (getvnchomedir(&homeDir) == -1)
vlog.error("Could not obtain VNC home directory path");
else {
CharArray caSave(strlen(homeDir) + 19 + 1);
sprintf(caSave.buf, "%sx509_savedcerts.pem", homeDir);
delete [] homeDir;
err = gnutls_certificate_set_x509_trust_file(cert_cred, caSave.buf,
GNUTLS_X509_FMT_PEM);
if (err < 0)
vlog.debug("Failed to load saved server certificates from %s", caSave.buf);
}
if (*crlfile && gnutls_certificate_set_x509_crl_file(cert_cred,crlfile,GNUTLS_X509_FMT_PEM) < 0)
throw AuthFailureException("load of CRL failed");
if (gnutls_credentials_set(session, GNUTLS_CRD_CERTIFICATE, cert_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("X509 session has been set");
}
}
void CSecurityTLS::checkSession()
{
const unsigned allowed_errors = GNUTLS_CERT_INVALID |
GNUTLS_CERT_SIGNER_NOT_FOUND |
GNUTLS_CERT_SIGNER_NOT_CA;
unsigned int status;
const gnutls_datum_t *cert_list;
unsigned int cert_list_size = 0;
int err;
gnutls_datum_t info;
if (anon)
return;
if (gnutls_certificate_type_get(session) != GNUTLS_CRT_X509)
throw AuthFailureException("unsupported certificate type");
err = gnutls_certificate_verify_peers2(session, &status);
if (err != 0) {
vlog.error("server certificate verification failed: %s", gnutls_strerror(err));
throw AuthFailureException("server certificate verification failed");
}
if (status & GNUTLS_CERT_REVOKED)
throw AuthFailureException("server certificate has been revoked");
#ifndef WITHOUT_X509_TIMES
if (status & GNUTLS_CERT_NOT_ACTIVATED)
throw AuthFailureException("server certificate has not been activated");
if (status & GNUTLS_CERT_EXPIRED) {
vlog.debug("server certificate has expired");
if (!msg->showMsgBox(UserMsgBox::M_YESNO, "certificate has expired",
"The certificate of the server has expired, "
"do you want to continue?"))
throw AuthFailureException("server certificate has expired");
}
#endif
/* Process other errors later */
cert_list = gnutls_certificate_get_peers(session, &cert_list_size);
if (!cert_list_size)
throw AuthFailureException("empty certificate chain");
/* Process only server's certificate, not issuer's certificate */
gnutls_x509_crt_t crt;
gnutls_x509_crt_init(&crt);
if (gnutls_x509_crt_import(crt, &cert_list[0], GNUTLS_X509_FMT_DER) < 0)
throw AuthFailureException("decoding of certificate failed");
if (gnutls_x509_crt_check_hostname(crt, client->getServerName()) == 0) {
char buf[255];
vlog.debug("hostname mismatch");
snprintf(buf, sizeof(buf), "Hostname (%s) does not match any certificate, "
"do you want to continue?", client->getServerName());
buf[sizeof(buf) - 1] = '\0';
if (!msg->showMsgBox(UserMsgBox::M_YESNO, "hostname mismatch", buf))
throw AuthFailureException("hostname mismatch");
}
if (status == 0) {
/* Everything is fine (hostname + verification) */
gnutls_x509_crt_deinit(crt);
return;
}
if (status & GNUTLS_CERT_INVALID)
vlog.debug("server certificate invalid");
if (status & GNUTLS_CERT_SIGNER_NOT_FOUND)
vlog.debug("server cert signer not found");
if (status & GNUTLS_CERT_SIGNER_NOT_CA)
vlog.debug("server cert signer not CA");
if ((status & (~allowed_errors)) != 0) {
/* No other errors are allowed */
vlog.debug("GNUTLS status of certificate verification: %u", status);
throw AuthFailureException("Invalid status of server certificate verification");
}
vlog.debug("Saved server certificates don't match");
if (gnutls_x509_crt_print(crt, GNUTLS_CRT_PRINT_ONELINE, &info)) {
/*
* GNUTLS doesn't correctly export gnutls_free symbol which is
* a function pointer. Linking with Visual Studio 2008 Express will
* fail when you call gnutls_free().
*/
#if WIN32
free(info.data);
#else
gnutls_free(info.data);
#endif
throw AuthFailureException("Could not find certificate to display");
}
size_t out_size = 0;
char *out_buf = NULL;
char *certinfo = NULL;
int len = 0;
vlog.debug("certificate issuer unknown");
len = snprintf(NULL, 0, "This certificate has been signed by an unknown "
"authority:\n\n%s\n\nDo you want to save it and "
"continue?\n ", info.data);
if (len < 0)
AuthFailureException("certificate decoding error");
vlog.debug("%s", info.data);
certinfo = new char[len];
if (certinfo == NULL)
throw AuthFailureException("Out of memory");
snprintf(certinfo, len, "This certificate has been signed by an unknown "
"authority:\n\n%s\n\nDo you want to save it and "
"continue? ", info.data);
for (int i = 0; i < len - 1; i++)
if (certinfo[i] == ',' && certinfo[i + 1] == ' ')
certinfo[i] = '\n';
if (!msg->showMsgBox(UserMsgBox::M_YESNO, "certificate issuer unknown",
certinfo)) {
delete [] certinfo;
throw AuthFailureException("certificate issuer unknown");
}
delete [] certinfo;
if (gnutls_x509_crt_export(crt, GNUTLS_X509_FMT_PEM, NULL, &out_size)
== GNUTLS_E_SHORT_MEMORY_BUFFER)
AuthFailureException("Out of memory");
// Save cert
out_buf = new char[out_size];
if (out_buf == NULL)
AuthFailureException("Out of memory");
if (gnutls_x509_crt_export(crt, GNUTLS_X509_FMT_PEM, out_buf, &out_size) < 0)
AuthFailureException("certificate issuer unknown, and certificate "
"export failed");
char *homeDir = NULL;
if (getvnchomedir(&homeDir) == -1)
vlog.error("Could not obtain VNC home directory path");
else {
FILE *f;
CharArray caSave(strlen(homeDir) + 1 + 19);
sprintf(caSave.buf, "%sx509_savedcerts.pem", homeDir);
delete [] homeDir;
f = fopen(caSave.buf, "a+");
if (!f)
msg->showMsgBox(UserMsgBox::M_OK, "certificate save failed",
"Could not save the certificate");
else {
fprintf(f, "%s\n", out_buf);
fclose(f);
}
}
delete [] out_buf;
gnutls_x509_crt_deinit(crt);
/*
* GNUTLS doesn't correctly export gnutls_free symbol which is
* a function pointer. Linking with Visual Studio 2008 Express will
* fail when you call gnutls_free().
*/
#if WIN32
free(info.data);
#else
gnutls_free(info.data);
#endif
}
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
* Copyright (C) 2010 m-privacy GmbH
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This header should not be compiled without HAVE_GNUTLS defined"
#endif
#include <stdlib.h>
#ifndef WIN32
#include <unistd.h>
#endif
#include <rfb/CSecurityTLS.h>
#include <rfb/SSecurityVeNCrypt.h>
#include <rfb/CConnection.h>
#include <rfb/LogWriter.h>
#include <rfb/Exception.h>
#include <rfb/UserMsgBox.h>
#include <rdr/TLSInStream.h>
#include <rdr/TLSOutStream.h>
#include <os/os.h>
#include <gnutls/x509.h>
/*
* GNUTLS 2.6.5 and older didn't have some variables defined so don't use them.
* GNUTLS 1.X.X defined LIBGNUTLS_VERSION_NUMBER so treat it as "old" gnutls as
* well
*/
#if (defined(GNUTLS_VERSION_NUMBER) && GNUTLS_VERSION_NUMBER < 0x020606) || \
defined(LIBGNUTLS_VERSION_NUMBER)
#define WITHOUT_X509_TIMES
#endif
/* Ancient GNUTLS... */
#if !defined(GNUTLS_VERSION_NUMBER) && !defined(LIBGNUTLS_VERSION_NUMBER)
#define WITHOUT_X509_TIMES
#endif
using namespace rfb;
StringParameter CSecurityTLS::X509CA("X509CA", "X509 CA certificate", "", ConfViewer);
StringParameter CSecurityTLS::X509CRL("X509CRL", "X509 CRL file", "", ConfViewer);
static LogWriter vlog("TLS");
CSecurityTLS::CSecurityTLS(bool _anon) : session(0), anon_cred(0),
anon(_anon), fis(0), fos(0)
{
cafile = X509CA.getData();
crlfile = X509CRL.getData();
if (gnutls_global_init() != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_global_init failed");
}
void CSecurityTLS::setDefaults()
{
char* homeDir = NULL;
if (getvnchomedir(&homeDir) == -1) {
vlog.error("Could not obtain VNC home directory path");
return;
}
int len = strlen(homeDir) + 1;
CharArray caDefault(len + 11);
CharArray crlDefault(len + 12);
sprintf(caDefault.buf, "%sx509_ca.pem", homeDir);
sprintf(crlDefault.buf, "%s509_crl.pem", homeDir);
delete [] homeDir;
if (!fileexists(caDefault.buf))
X509CA.setDefaultStr(strdup(caDefault.buf));
if (!fileexists(crlDefault.buf))
X509CRL.setDefaultStr(strdup(crlDefault.buf));
}
void CSecurityTLS::shutdown(bool needbye)
{
if (session && needbye)
if (gnutls_bye(session, GNUTLS_SHUT_RDWR) != GNUTLS_E_SUCCESS)
vlog.error("gnutls_bye failed");
if (anon_cred) {
gnutls_anon_free_client_credentials(anon_cred);
anon_cred = 0;
}
if (cert_cred) {
gnutls_certificate_free_credentials(cert_cred);
cert_cred = 0;
}
if (session) {
gnutls_deinit(session);
session = 0;
}
}
CSecurityTLS::~CSecurityTLS()
{
shutdown(true);
if (fis)
delete fis;
if (fos)
delete fos;
delete[] cafile;
delete[] crlfile;
gnutls_global_deinit();
}
bool CSecurityTLS::processMsg(CConnection* cc)
{
rdr::InStream* is = cc->getInStream();
rdr::OutStream* os = cc->getOutStream();
client = cc;
if (!session) {
if (!is->checkNoWait(1))
return false;
if (is->readU8() == 0) {
rdr::U32 result = is->readU32();
CharArray reason;
if (result == secResultFailed || result == secResultTooMany)
reason.buf = is->readString();
else
reason.buf = strDup("Authentication failure (protocol error)");
throw AuthFailureException(reason.buf);
}
if (gnutls_init(&session, GNUTLS_CLIENT) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_init failed");
if (gnutls_set_default_priority(session) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_set_default_priority failed");
setParam();
}
rdr::TLSInStream *tlsis = new rdr::TLSInStream(is, session);
rdr::TLSOutStream *tlsos = new rdr::TLSOutStream(os, session);
int err;
err = gnutls_handshake(session);
if (err != GNUTLS_E_SUCCESS) {
delete tlsis;
delete tlsos;
if (!gnutls_error_is_fatal(err))
return false;
vlog.error("TLS Handshake failed: %s\n", gnutls_strerror (err));
shutdown(false);
throw AuthFailureException("TLS Handshake failed");
}
checkSession();
cc->setStreams(fis = tlsis, fos = tlsos);
return true;
}
void CSecurityTLS::setParam()
{
static const char kx_anon_priority[] = ":+ANON-ECDH:+ANON-DH";
int ret;
char *prio;
const char *err;
prio = (char*)malloc(strlen(Security::GnuTLSPriority) +
strlen(kx_anon_priority) + 1);
if (prio == NULL)
throw AuthFailureException("Not enough memory for GnuTLS priority string");
strcpy(prio, Security::GnuTLSPriority);
if (anon)
strcat(prio, kx_anon_priority);
ret = gnutls_priority_set_direct(session, prio, &err);
free(prio);
if (ret != GNUTLS_E_SUCCESS) {
if (ret == GNUTLS_E_INVALID_REQUEST)
vlog.error("GnuTLS priority syntax error at: %s", err);
throw AuthFailureException("gnutls_set_priority_direct failed");
}
if (anon) {
if (gnutls_anon_allocate_client_credentials(&anon_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_anon_allocate_client_credentials failed");
if (gnutls_credentials_set(session, GNUTLS_CRD_ANON, anon_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("Anonymous session has been set");
} else {
if (gnutls_certificate_allocate_credentials(&cert_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_certificate_allocate_credentials failed");
if (*cafile && gnutls_certificate_set_x509_trust_file(cert_cred,cafile,GNUTLS_X509_FMT_PEM) < 0)
throw AuthFailureException("load of CA cert failed");
/* Load previously saved certs */
char *homeDir = NULL;
int err;
if (getvnchomedir(&homeDir) == -1)
vlog.error("Could not obtain VNC home directory path");
else {
CharArray caSave(strlen(homeDir) + 19 + 1);
sprintf(caSave.buf, "%sx509_savedcerts.pem", homeDir);
delete [] homeDir;
err = gnutls_certificate_set_x509_trust_file(cert_cred, caSave.buf,
GNUTLS_X509_FMT_PEM);
if (err < 0)
vlog.debug("Failed to load saved server certificates from %s", caSave.buf);
}
if (*crlfile && gnutls_certificate_set_x509_crl_file(cert_cred,crlfile,GNUTLS_X509_FMT_PEM) < 0)
throw AuthFailureException("load of CRL failed");
if (gnutls_credentials_set(session, GNUTLS_CRD_CERTIFICATE, cert_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("X509 session has been set");
}
}
void CSecurityTLS::checkSession()
{
const unsigned allowed_errors = GNUTLS_CERT_INVALID |
GNUTLS_CERT_SIGNER_NOT_FOUND |
GNUTLS_CERT_SIGNER_NOT_CA;
unsigned int status;
const gnutls_datum_t *cert_list;
unsigned int cert_list_size = 0;
int err;
gnutls_datum_t info;
if (anon)
return;
if (gnutls_certificate_type_get(session) != GNUTLS_CRT_X509)
throw AuthFailureException("unsupported certificate type");
err = gnutls_certificate_verify_peers2(session, &status);
if (err != 0) {
vlog.error("server certificate verification failed: %s", gnutls_strerror(err));
throw AuthFailureException("server certificate verification failed");
}
if (status & GNUTLS_CERT_REVOKED)
throw AuthFailureException("server certificate has been revoked");
#ifndef WITHOUT_X509_TIMES
if (status & GNUTLS_CERT_NOT_ACTIVATED)
throw AuthFailureException("server certificate has not been activated");
if (status & GNUTLS_CERT_EXPIRED) {
vlog.debug("server certificate has expired");
if (!msg->showMsgBox(UserMsgBox::M_YESNO, "certificate has expired",
"The certificate of the server has expired, "
"do you want to continue?"))
throw AuthFailureException("server certificate has expired");
}
#endif
/* Process other errors later */
cert_list = gnutls_certificate_get_peers(session, &cert_list_size);
if (!cert_list_size)
throw AuthFailureException("empty certificate chain");
/* Process only server's certificate, not issuer's certificate */
gnutls_x509_crt_t crt;
gnutls_x509_crt_init(&crt);
if (gnutls_x509_crt_import(crt, &cert_list[0], GNUTLS_X509_FMT_DER) < 0)
throw AuthFailureException("decoding of certificate failed");
if (gnutls_x509_crt_check_hostname(crt, client->getServerName()) == 0) {
char buf[255];
vlog.debug("hostname mismatch");
snprintf(buf, sizeof(buf), "Hostname (%s) does not match any certificate, "
"do you want to continue?", client->getServerName());
buf[sizeof(buf) - 1] = '\0';
if (!msg->showMsgBox(UserMsgBox::M_YESNO, "hostname mismatch", buf))
throw AuthFailureException("hostname mismatch");
}
if (status == 0) {
/* Everything is fine (hostname + verification) */
gnutls_x509_crt_deinit(crt);
return;
}
if (status & GNUTLS_CERT_INVALID)
vlog.debug("server certificate invalid");
if (status & GNUTLS_CERT_SIGNER_NOT_FOUND)
vlog.debug("server cert signer not found");
if (status & GNUTLS_CERT_SIGNER_NOT_CA)
vlog.debug("server cert signer not CA");
if ((status & (~allowed_errors)) != 0) {
/* No other errors are allowed */
vlog.debug("GNUTLS status of certificate verification: %u", status);
throw AuthFailureException("Invalid status of server certificate verification");
}
vlog.debug("Saved server certificates don't match");
if (gnutls_x509_crt_print(crt, GNUTLS_CRT_PRINT_ONELINE, &info)) {
/*
* GNUTLS doesn't correctly export gnutls_free symbol which is
* a function pointer. Linking with Visual Studio 2008 Express will
* fail when you call gnutls_free().
*/
#if WIN32
free(info.data);
#else
gnutls_free(info.data);
#endif
throw AuthFailureException("Could not find certificate to display");
}
size_t out_size = 0;
char *out_buf = NULL;
char *certinfo = NULL;
int len = 0;
vlog.debug("certificate issuer unknown");
len = snprintf(NULL, 0, "This certificate has been signed by an unknown "
"authority:\n\n%s\n\nDo you want to save it and "
"continue?\n ", info.data);
if (len < 0)
AuthFailureException("certificate decoding error");
vlog.debug("%s", info.data);
certinfo = new char[len];
if (certinfo == NULL)
throw AuthFailureException("Out of memory");
snprintf(certinfo, len, "This certificate has been signed by an unknown "
"authority:\n\n%s\n\nDo you want to save it and "
"continue? ", info.data);
for (int i = 0; i < len - 1; i++)
if (certinfo[i] == ',' && certinfo[i + 1] == ' ')
certinfo[i] = '\n';
if (!msg->showMsgBox(UserMsgBox::M_YESNO, "certificate issuer unknown",
certinfo)) {
delete [] certinfo;
throw AuthFailureException("certificate issuer unknown");
}
delete [] certinfo;
if (gnutls_x509_crt_export(crt, GNUTLS_X509_FMT_PEM, NULL, &out_size)
== GNUTLS_E_SHORT_MEMORY_BUFFER)
AuthFailureException("Out of memory");
// Save cert
out_buf = new char[out_size];
if (out_buf == NULL)
AuthFailureException("Out of memory");
if (gnutls_x509_crt_export(crt, GNUTLS_X509_FMT_PEM, out_buf, &out_size) < 0)
AuthFailureException("certificate issuer unknown, and certificate "
"export failed");
char *homeDir = NULL;
if (getvnchomedir(&homeDir) == -1)
vlog.error("Could not obtain VNC home directory path");
else {
FILE *f;
CharArray caSave(strlen(homeDir) + 1 + 19);
sprintf(caSave.buf, "%sx509_savedcerts.pem", homeDir);
delete [] homeDir;
f = fopen(caSave.buf, "a+");
if (!f)
msg->showMsgBox(UserMsgBox::M_OK, "certificate save failed",
"Could not save the certificate");
else {
fprintf(f, "%s\n", out_buf);
fclose(f);
}
}
delete [] out_buf;
gnutls_x509_crt_deinit(crt);
/*
* GNUTLS doesn't correctly export gnutls_free symbol which is
* a function pointer. Linking with Visual Studio 2008 Express will
* fail when you call gnutls_free().
*/
#if WIN32
free(info.data);
#else
gnutls_free(info.data);
#endif
}
|
4842_0
|
crossvul
|
cxx
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This source should not be compiled without HAVE_GNUTLS defined"
#endif
#include <stdlib.h>
#include <rfb/SSecurityTLS.h>
#include <rfb/SConnection.h>
#include <rfb/LogWriter.h>
#include <rfb/Exception.h>
#include <rdr/TLSInStream.h>
#include <rdr/TLSOutStream.h>
#define DH_BITS 1024 /* XXX This should be configurable! */
using namespace rfb;
StringParameter SSecurityTLS::X509_CertFile
("X509Cert", "Path to the X509 certificate in PEM format", "", ConfServer);
StringParameter SSecurityTLS::X509_KeyFile
("X509Key", "Path to the key of the X509 certificate in PEM format", "", ConfServer);
static LogWriter vlog("TLS");
void SSecurityTLS::initGlobal()
{
static bool globalInitDone = false;
if (!globalInitDone) {
if (gnutls_global_init() != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_global_init failed");
globalInitDone = true;
}
}
SSecurityTLS::SSecurityTLS(bool _anon) : session(0), dh_params(0),
anon_cred(0), cert_cred(0),
anon(_anon), fis(0), fos(0)
{
certfile = X509_CertFile.getData();
keyfile = X509_KeyFile.getData();
}
void SSecurityTLS::shutdown()
{
if (session) {
if (gnutls_bye(session, GNUTLS_SHUT_RDWR) != GNUTLS_E_SUCCESS) {
/* FIXME: Treat as non-fatal error */
vlog.error("TLS session wasn't terminated gracefully");
}
}
if (dh_params) {
gnutls_dh_params_deinit(dh_params);
dh_params = 0;
}
if (anon_cred) {
gnutls_anon_free_server_credentials(anon_cred);
anon_cred = 0;
}
if (cert_cred) {
gnutls_certificate_free_credentials(cert_cred);
cert_cred = 0;
}
if (session) {
gnutls_deinit(session);
session = 0;
gnutls_global_deinit();
}
}
SSecurityTLS::~SSecurityTLS()
{
shutdown();
if (fis)
delete fis;
if (fos)
delete fos;
delete[] keyfile;
delete[] certfile;
}
bool SSecurityTLS::processMsg(SConnection *sc)
{
rdr::InStream* is = sc->getInStream();
rdr::OutStream* os = sc->getOutStream();
vlog.debug("Process security message (session %p)", session);
if (!session) {
initGlobal();
if (gnutls_init(&session, GNUTLS_SERVER) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_init failed");
if (gnutls_set_default_priority(session) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_set_default_priority failed");
try {
setParams(session);
}
catch(...) {
os->writeU8(0);
throw;
}
os->writeU8(1);
os->flush();
}
rdr::TLSInStream *tlsis = new rdr::TLSInStream(is, session);
rdr::TLSOutStream *tlsos = new rdr::TLSOutStream(os, session);
int err;
err = gnutls_handshake(session);
if (err != GNUTLS_E_SUCCESS) {
delete tlsis;
delete tlsos;
if (!gnutls_error_is_fatal(err)) {
vlog.debug("Deferring completion of TLS handshake: %s", gnutls_strerror(err));
return false;
}
vlog.error("TLS Handshake failed: %s", gnutls_strerror (err));
shutdown();
throw AuthFailureException("TLS Handshake failed");
}
vlog.debug("Handshake completed");
sc->setStreams(fis = tlsis, fos = tlsos);
return true;
}
void SSecurityTLS::setParams(gnutls_session_t session)
{
static const char kx_anon_priority[] = ":+ANON-ECDH:+ANON-DH";
int ret;
char *prio;
const char *err;
prio = (char*)malloc(strlen(Security::GnuTLSPriority) +
strlen(kx_anon_priority) + 1);
if (prio == NULL)
throw AuthFailureException("Not enough memory for GnuTLS priority string");
strcpy(prio, Security::GnuTLSPriority);
if (anon)
strcat(prio, kx_anon_priority);
ret = gnutls_priority_set_direct(session, prio, &err);
free(prio);
if (ret != GNUTLS_E_SUCCESS) {
if (ret == GNUTLS_E_INVALID_REQUEST)
vlog.error("GnuTLS priority syntax error at: %s", err);
throw AuthFailureException("gnutls_set_priority_direct failed");
}
if (gnutls_dh_params_init(&dh_params) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_dh_params_init failed");
if (gnutls_dh_params_generate2(dh_params, DH_BITS) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_dh_params_generate2 failed");
if (anon) {
if (gnutls_anon_allocate_server_credentials(&anon_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_anon_allocate_server_credentials failed");
gnutls_anon_set_server_dh_params(anon_cred, dh_params);
if (gnutls_credentials_set(session, GNUTLS_CRD_ANON, anon_cred)
!= GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("Anonymous session has been set");
} else {
if (gnutls_certificate_allocate_credentials(&cert_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_certificate_allocate_credentials failed");
gnutls_certificate_set_dh_params(cert_cred, dh_params);
if (gnutls_certificate_set_x509_key_file(cert_cred, certfile, keyfile,
GNUTLS_X509_FMT_PEM) != GNUTLS_E_SUCCESS)
throw AuthFailureException("load of key failed");
if (gnutls_credentials_set(session, GNUTLS_CRD_CERTIFICATE, cert_cred)
!= GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("X509 session has been set");
}
}
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This source should not be compiled without HAVE_GNUTLS defined"
#endif
#include <stdlib.h>
#include <rfb/SSecurityTLS.h>
#include <rfb/SConnection.h>
#include <rfb/LogWriter.h>
#include <rfb/Exception.h>
#include <rdr/TLSInStream.h>
#include <rdr/TLSOutStream.h>
#define DH_BITS 1024 /* XXX This should be configurable! */
using namespace rfb;
StringParameter SSecurityTLS::X509_CertFile
("X509Cert", "Path to the X509 certificate in PEM format", "", ConfServer);
StringParameter SSecurityTLS::X509_KeyFile
("X509Key", "Path to the key of the X509 certificate in PEM format", "", ConfServer);
static LogWriter vlog("TLS");
SSecurityTLS::SSecurityTLS(bool _anon) : session(0), dh_params(0),
anon_cred(0), cert_cred(0),
anon(_anon), fis(0), fos(0)
{
certfile = X509_CertFile.getData();
keyfile = X509_KeyFile.getData();
if (gnutls_global_init() != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_global_init failed");
}
void SSecurityTLS::shutdown()
{
if (session) {
if (gnutls_bye(session, GNUTLS_SHUT_RDWR) != GNUTLS_E_SUCCESS) {
/* FIXME: Treat as non-fatal error */
vlog.error("TLS session wasn't terminated gracefully");
}
}
if (dh_params) {
gnutls_dh_params_deinit(dh_params);
dh_params = 0;
}
if (anon_cred) {
gnutls_anon_free_server_credentials(anon_cred);
anon_cred = 0;
}
if (cert_cred) {
gnutls_certificate_free_credentials(cert_cred);
cert_cred = 0;
}
if (session) {
gnutls_deinit(session);
session = 0;
}
}
SSecurityTLS::~SSecurityTLS()
{
shutdown();
if (fis)
delete fis;
if (fos)
delete fos;
delete[] keyfile;
delete[] certfile;
gnutls_global_deinit();
}
bool SSecurityTLS::processMsg(SConnection *sc)
{
rdr::InStream* is = sc->getInStream();
rdr::OutStream* os = sc->getOutStream();
vlog.debug("Process security message (session %p)", session);
if (!session) {
if (gnutls_init(&session, GNUTLS_SERVER) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_init failed");
if (gnutls_set_default_priority(session) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_set_default_priority failed");
try {
setParams(session);
}
catch(...) {
os->writeU8(0);
throw;
}
os->writeU8(1);
os->flush();
}
rdr::TLSInStream *tlsis = new rdr::TLSInStream(is, session);
rdr::TLSOutStream *tlsos = new rdr::TLSOutStream(os, session);
int err;
err = gnutls_handshake(session);
if (err != GNUTLS_E_SUCCESS) {
delete tlsis;
delete tlsos;
if (!gnutls_error_is_fatal(err)) {
vlog.debug("Deferring completion of TLS handshake: %s", gnutls_strerror(err));
return false;
}
vlog.error("TLS Handshake failed: %s", gnutls_strerror (err));
shutdown();
throw AuthFailureException("TLS Handshake failed");
}
vlog.debug("Handshake completed");
sc->setStreams(fis = tlsis, fos = tlsos);
return true;
}
void SSecurityTLS::setParams(gnutls_session_t session)
{
static const char kx_anon_priority[] = ":+ANON-ECDH:+ANON-DH";
int ret;
char *prio;
const char *err;
prio = (char*)malloc(strlen(Security::GnuTLSPriority) +
strlen(kx_anon_priority) + 1);
if (prio == NULL)
throw AuthFailureException("Not enough memory for GnuTLS priority string");
strcpy(prio, Security::GnuTLSPriority);
if (anon)
strcat(prio, kx_anon_priority);
ret = gnutls_priority_set_direct(session, prio, &err);
free(prio);
if (ret != GNUTLS_E_SUCCESS) {
if (ret == GNUTLS_E_INVALID_REQUEST)
vlog.error("GnuTLS priority syntax error at: %s", err);
throw AuthFailureException("gnutls_set_priority_direct failed");
}
if (gnutls_dh_params_init(&dh_params) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_dh_params_init failed");
if (gnutls_dh_params_generate2(dh_params, DH_BITS) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_dh_params_generate2 failed");
if (anon) {
if (gnutls_anon_allocate_server_credentials(&anon_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_anon_allocate_server_credentials failed");
gnutls_anon_set_server_dh_params(anon_cred, dh_params);
if (gnutls_credentials_set(session, GNUTLS_CRD_ANON, anon_cred)
!= GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("Anonymous session has been set");
} else {
if (gnutls_certificate_allocate_credentials(&cert_cred) != GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_certificate_allocate_credentials failed");
gnutls_certificate_set_dh_params(cert_cred, dh_params);
if (gnutls_certificate_set_x509_key_file(cert_cred, certfile, keyfile,
GNUTLS_X509_FMT_PEM) != GNUTLS_E_SUCCESS)
throw AuthFailureException("load of key failed");
if (gnutls_credentials_set(session, GNUTLS_CRD_CERTIFICATE, cert_cred)
!= GNUTLS_E_SUCCESS)
throw AuthFailureException("gnutls_credentials_set failed");
vlog.debug("X509 session has been set");
}
}
|
4842_2
|
crossvul
|
cxx
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
java
|
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.handler.ssl;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBufferFactory;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelDownstreamHandler;
import org.jboss.netty.channel.ChannelEvent;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.DefaultChannelFuture;
import org.jboss.netty.channel.DownstreamMessageEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.handler.codec.frame.FrameDecoder;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.Timeout;
import org.jboss.netty.util.Timer;
import org.jboss.netty.util.TimerTask;
import org.jboss.netty.util.internal.DetectionUtil;
import org.jboss.netty.util.internal.NonReentrantLock;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLEngineResult;
import javax.net.ssl.SSLEngineResult.HandshakeStatus;
import javax.net.ssl.SSLEngineResult.Status;
import javax.net.ssl.SSLException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.DatagramChannel;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.regex.Pattern;
import static org.jboss.netty.channel.Channels.*;
/**
* Adds <a href="http://en.wikipedia.org/wiki/Transport_Layer_Security">SSL
* · TLS</a> and StartTLS support to a {@link Channel}. Please refer
* to the <strong>"SecureChat"</strong> example in the distribution or the web
* site for the detailed usage.
*
* <h3>Beginning the handshake</h3>
* <p>
* You must make sure not to write a message while the
* {@linkplain #handshake() handshake} is in progress unless you are
* renegotiating. You will be notified by the {@link ChannelFuture} which is
* returned by the {@link #handshake()} method when the handshake
* process succeeds or fails.
*
* <h3>Handshake</h3>
* <p>
* If {@link #isIssueHandshake()} is {@code false}
* (default) you will need to take care of calling {@link #handshake()} by your own. In most
* situations were {@link SslHandler} is used in 'client mode' you want to issue a handshake once
* the connection was established. if {@link #setIssueHandshake(boolean)} is set to {@code true}
* you don't need to worry about this as the {@link SslHandler} will take care of it.
* <p>
*
* <h3>Renegotiation</h3>
* <p>
* If {@link #isEnableRenegotiation() enableRenegotiation} is {@code true}
* (default) and the initial handshake has been done successfully, you can call
* {@link #handshake()} to trigger the renegotiation.
* <p>
* If {@link #isEnableRenegotiation() enableRenegotiation} is {@code false},
* an attempt to trigger renegotiation will result in the connection closure.
* <p>
* Please note that TLS renegotiation had a security issue before. If your
* runtime environment did not fix it, please make sure to disable TLS
* renegotiation by calling {@link #setEnableRenegotiation(boolean)} with
* {@code false}. For more information, please refer to the following documents:
* <ul>
* <li><a href="http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-3555">CVE-2009-3555</a></li>
* <li><a href="http://www.ietf.org/rfc/rfc5746.txt">RFC5746</a></li>
* <li><a href="http://www.oracle.com/technetwork/java/javase/documentation/tlsreadme2-176330.html">Phased
* Approach to Fixing the TLS Renegotiation Issue</a></li>
* </ul>
*
* <h3>Closing the session</h3>
* <p>
* To close the SSL session, the {@link #close()} method should be
* called to send the {@code close_notify} message to the remote peer. One
* exception is when you close the {@link Channel} - {@link SslHandler}
* intercepts the close request and send the {@code close_notify} message
* before the channel closure automatically. Once the SSL session is closed,
* it is not reusable, and consequently you should create a new
* {@link SslHandler} with a new {@link SSLEngine} as explained in the
* following section.
*
* <h3>Restarting the session</h3>
* <p>
* To restart the SSL session, you must remove the existing closed
* {@link SslHandler} from the {@link ChannelPipeline}, insert a new
* {@link SslHandler} with a new {@link SSLEngine} into the pipeline,
* and start the handshake process as described in the first section.
*
* <h3>Implementing StartTLS</h3>
* <p>
* <a href="http://en.wikipedia.org/wiki/STARTTLS">StartTLS</a> is the
* communication pattern that secures the wire in the middle of the plaintext
* connection. Please note that it is different from SSL · TLS, that
* secures the wire from the beginning of the connection. Typically, StartTLS
* is composed of three steps:
* <ol>
* <li>Client sends a StartTLS request to server.</li>
* <li>Server sends a StartTLS response to client.</li>
* <li>Client begins SSL handshake.</li>
* </ol>
* If you implement a server, you need to:
* <ol>
* <li>create a new {@link SslHandler} instance with {@code startTls} flag set
* to {@code true},</li>
* <li>insert the {@link SslHandler} to the {@link ChannelPipeline}, and</li>
* <li>write a StartTLS response.</li>
* </ol>
* Please note that you must insert {@link SslHandler} <em>before</em> sending
* the StartTLS response. Otherwise the client can send begin SSL handshake
* before {@link SslHandler} is inserted to the {@link ChannelPipeline}, causing
* data corruption.
* <p>
* The client-side implementation is much simpler.
* <ol>
* <li>Write a StartTLS request,</li>
* <li>wait for the StartTLS response,</li>
* <li>create a new {@link SslHandler} instance with {@code startTls} flag set
* to {@code false},</li>
* <li>insert the {@link SslHandler} to the {@link ChannelPipeline}, and</li>
* <li>Initiate SSL handshake by calling {@link SslHandler#handshake()}.</li>
* </ol>
*
* <h3>Known issues</h3>
* <p>
* Because of a known issue with the current implementation of the SslEngine that comes
* with Java it may be possible that you see blocked IO-Threads while a full GC is done.
* <p>
* So if you are affected you can workaround this problem by adjust the cache settings
* like shown below:
*
* <pre>
* SslContext context = ...;
* context.getServerSessionContext().setSessionCacheSize(someSaneSize);
* context.getServerSessionContext().setSessionTime(someSameTimeout);
* </pre>
* <p>
* What values to use here depends on the nature of your application and should be set
* based on monitoring and debugging of it.
* For more details see
* <a href="https://github.com/netty/netty/issues/832">#832</a> in our issue tracker.
* @apiviz.landmark
* @apiviz.uses org.jboss.netty.handler.ssl.SslBufferPool
*/
public class SslHandler extends FrameDecoder
implements ChannelDownstreamHandler {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(SslHandler.class);
private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0);
private static final Pattern IGNORABLE_CLASS_IN_STACK = Pattern.compile(
"^.*(?:Socket|Datagram|Sctp|Udt)Channel.*$");
private static final Pattern IGNORABLE_ERROR_MESSAGE = Pattern.compile(
"^.*(?:connection.*(?:reset|closed|abort|broken)|broken.*pipe).*$", Pattern.CASE_INSENSITIVE);
private static SslBufferPool defaultBufferPool;
/**
* Returns the default {@link SslBufferPool} used when no pool is
* specified in the constructor.
*/
public static synchronized SslBufferPool getDefaultBufferPool() {
if (defaultBufferPool == null) {
defaultBufferPool = new SslBufferPool();
}
return defaultBufferPool;
}
private volatile ChannelHandlerContext ctx;
private final SSLEngine engine;
private final SslBufferPool bufferPool;
private final Executor delegatedTaskExecutor;
private final boolean startTls;
private volatile boolean enableRenegotiation = true;
final Object handshakeLock = new Object();
private boolean handshaking;
private volatile boolean handshaken;
private volatile ChannelFuture handshakeFuture;
@SuppressWarnings("UnusedDeclaration")
private volatile int sentFirstMessage;
@SuppressWarnings("UnusedDeclaration")
private volatile int sentCloseNotify;
@SuppressWarnings("UnusedDeclaration")
private volatile int closedOutboundAndChannel;
private static final AtomicIntegerFieldUpdater<SslHandler> SENT_FIRST_MESSAGE_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SslHandler.class, "sentFirstMessage");
private static final AtomicIntegerFieldUpdater<SslHandler> SENT_CLOSE_NOTIFY_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SslHandler.class, "sentCloseNotify");
private static final AtomicIntegerFieldUpdater<SslHandler> CLOSED_OUTBOUND_AND_CHANNEL_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SslHandler.class, "closedOutboundAndChannel");
int ignoreClosedChannelException;
final Object ignoreClosedChannelExceptionLock = new Object();
private final Queue<PendingWrite> pendingUnencryptedWrites = new LinkedList<PendingWrite>();
private final NonReentrantLock pendingUnencryptedWritesLock = new NonReentrantLock();
private final Queue<MessageEvent> pendingEncryptedWrites = new ConcurrentLinkedQueue<MessageEvent>();
private final NonReentrantLock pendingEncryptedWritesLock = new NonReentrantLock();
private volatile boolean issueHandshake;
private volatile boolean writeBeforeHandshakeDone;
private final SSLEngineInboundCloseFuture sslEngineCloseFuture = new SSLEngineInboundCloseFuture();
private boolean closeOnSslException;
private int packetLength;
private final Timer timer;
private final long handshakeTimeoutInMillis;
private Timeout handshakeTimeout;
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
*/
public SslHandler(SSLEngine engine) {
this(engine, getDefaultBufferPool(), false, null, 0);
}
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
* @param bufferPool the {@link SslBufferPool} where this handler will
* acquire the buffers required by the {@link SSLEngine}
*/
public SslHandler(SSLEngine engine, SslBufferPool bufferPool) {
this(engine, bufferPool, false, null, 0);
}
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
* @param startTls {@code true} if the first write request shouldn't be
* encrypted by the {@link SSLEngine}
*/
public SslHandler(SSLEngine engine, boolean startTls) {
this(engine, getDefaultBufferPool(), startTls);
}
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
* @param bufferPool the {@link SslBufferPool} where this handler will
* acquire the buffers required by the {@link SSLEngine}
* @param startTls {@code true} if the first write request shouldn't be
* encrypted by the {@link SSLEngine}
*/
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls) {
this(engine, bufferPool, startTls, null, 0);
}
/**
* Creates a new instance.
*
* @param engine
* the {@link SSLEngine} this handler will use
* @param bufferPool
* the {@link SslBufferPool} where this handler will acquire
* the buffers required by the {@link SSLEngine}
* @param startTls
* {@code true} if the first write request shouldn't be encrypted
* by the {@link SSLEngine}
* @param timer
* the {@link Timer} which will be used to process the timeout of the {@link #handshake()}.
* Be aware that the given {@link Timer} will not get stopped automaticly, so it is up to you to cleanup
* once you not need it anymore
* @param handshakeTimeoutInMillis
* the time in milliseconds after whic the {@link #handshake()} will be failed, and so the future notified
*/
@SuppressWarnings("deprecation")
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls,
Timer timer, long handshakeTimeoutInMillis) {
this(engine, bufferPool, startTls, ImmediateExecutor.INSTANCE, timer, handshakeTimeoutInMillis);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, Executor delegatedTaskExecutor) {
this(engine, getDefaultBufferPool(), delegatedTaskExecutor);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine, boolean)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, Executor delegatedTaskExecutor) {
this(engine, bufferPool, false, delegatedTaskExecutor);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine, boolean)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, boolean startTls, Executor delegatedTaskExecutor) {
this(engine, getDefaultBufferPool(), startTls, delegatedTaskExecutor);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine, SslBufferPool, boolean)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls, Executor delegatedTaskExecutor) {
this(engine, bufferPool, startTls, delegatedTaskExecutor, null, 0);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls, Timer timer,
* long handshakeTimeoutInMillis)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls, Executor delegatedTaskExecutor,
Timer timer, long handshakeTimeoutInMillis) {
if (engine == null) {
throw new NullPointerException("engine");
}
if (bufferPool == null) {
throw new NullPointerException("bufferPool");
}
if (delegatedTaskExecutor == null) {
throw new NullPointerException("delegatedTaskExecutor");
}
if (timer == null && handshakeTimeoutInMillis > 0) {
throw new IllegalArgumentException("No Timer was given but a handshakeTimeoutInMillis, need both or none");
}
this.engine = engine;
this.bufferPool = bufferPool;
this.delegatedTaskExecutor = delegatedTaskExecutor;
this.startTls = startTls;
this.timer = timer;
this.handshakeTimeoutInMillis = handshakeTimeoutInMillis;
}
/**
* Returns the {@link SSLEngine} which is used by this handler.
*/
public SSLEngine getEngine() {
return engine;
}
/**
* Starts an SSL / TLS handshake for the specified channel.
*
* @return a {@link ChannelFuture} which is notified when the handshake
* succeeds or fails.
*/
public ChannelFuture handshake() {
synchronized (handshakeLock) {
if (handshaken && !isEnableRenegotiation()) {
throw new IllegalStateException("renegotiation disabled");
}
final ChannelHandlerContext ctx = this.ctx;
final Channel channel = ctx.getChannel();
ChannelFuture handshakeFuture;
Exception exception = null;
if (handshaking) {
return this.handshakeFuture;
}
handshaking = true;
try {
engine.beginHandshake();
runDelegatedTasks();
handshakeFuture = this.handshakeFuture = future(channel);
if (handshakeTimeoutInMillis > 0) {
handshakeTimeout = timer.newTimeout(new TimerTask() {
public void run(Timeout timeout) throws Exception {
ChannelFuture future = SslHandler.this.handshakeFuture;
if (future != null && future.isDone()) {
return;
}
setHandshakeFailure(channel, new SSLException("Handshake did not complete within " +
handshakeTimeoutInMillis + "ms"));
}
}, handshakeTimeoutInMillis, TimeUnit.MILLISECONDS);
}
} catch (Exception e) {
handshakeFuture = this.handshakeFuture = failedFuture(channel, e);
exception = e;
}
if (exception == null) { // Began handshake successfully.
try {
final ChannelFuture hsFuture = handshakeFuture;
wrapNonAppData(ctx, channel).addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
Throwable cause = future.getCause();
hsFuture.setFailure(cause);
fireExceptionCaught(ctx, cause);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
}
});
} catch (SSLException e) {
handshakeFuture.setFailure(e);
fireExceptionCaught(ctx, e);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
} else { // Failed to initiate handshake.
fireExceptionCaught(ctx, exception);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
return handshakeFuture;
}
}
/**
* @deprecated Use {@link #handshake()} instead.
*/
@Deprecated
public ChannelFuture handshake(@SuppressWarnings("unused") Channel channel) {
return handshake();
}
/**
* Sends an SSL {@code close_notify} message to the specified channel and
* destroys the underlying {@link SSLEngine}.
*/
public ChannelFuture close() {
ChannelHandlerContext ctx = this.ctx;
Channel channel = ctx.getChannel();
try {
engine.closeOutbound();
return wrapNonAppData(ctx, channel);
} catch (SSLException e) {
fireExceptionCaught(ctx, e);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
return failedFuture(channel, e);
}
}
/**
* @deprecated Use {@link #close()} instead.
*/
@Deprecated
public ChannelFuture close(@SuppressWarnings("unused") Channel channel) {
return close();
}
/**
* Returns {@code true} if and only if TLS renegotiation is enabled.
*/
public boolean isEnableRenegotiation() {
return enableRenegotiation;
}
/**
* Enables or disables TLS renegotiation.
*/
public void setEnableRenegotiation(boolean enableRenegotiation) {
this.enableRenegotiation = enableRenegotiation;
}
/**
* Enables or disables the automatic handshake once the {@link Channel} is
* connected. The value will only have affect if its set before the
* {@link Channel} is connected.
*/
public void setIssueHandshake(boolean issueHandshake) {
this.issueHandshake = issueHandshake;
}
/**
* Returns {@code true} if the automatic handshake is enabled
*/
public boolean isIssueHandshake() {
return issueHandshake;
}
/**
* Return the {@link ChannelFuture} that will get notified if the inbound of the {@link SSLEngine} will get closed.
*
* This method will return the same {@link ChannelFuture} all the time.
*
* For more informations see the apidocs of {@link SSLEngine}
*
*/
public ChannelFuture getSSLEngineInboundCloseFuture() {
return sslEngineCloseFuture;
}
/**
* Return the timeout (in ms) after which the {@link ChannelFuture} of {@link #handshake()} will be failed, while
* a handshake is in progress
*/
public long getHandshakeTimeout() {
return handshakeTimeoutInMillis;
}
/**
* If set to {@code true}, the {@link Channel} will automatically get closed
* one a {@link SSLException} was caught. This is most times what you want, as after this
* its almost impossible to recover.
*
* Anyway the default is {@code false} to not break compatibility with older releases. This
* will be changed to {@code true} in the next major release.
*
*/
public void setCloseOnSSLException(boolean closeOnSslException) {
if (ctx != null) {
throw new IllegalStateException("Can only get changed before attached to ChannelPipeline");
}
this.closeOnSslException = closeOnSslException;
}
public boolean getCloseOnSSLException() {
return closeOnSslException;
}
public void handleDownstream(
final ChannelHandlerContext context, final ChannelEvent evt) throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
closeOutboundAndChannel(context, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
context.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
if (!(e.getMessage() instanceof ChannelBuffer)) {
context.sendDownstream(evt);
return;
}
// Do not encrypt the first write request if this handler is
// created with startTLS flag turned on.
if (startTls && SENT_FIRST_MESSAGE_UPDATER.compareAndSet(this, 0, 1)) {
context.sendDownstream(evt);
return;
}
// Otherwise, all messages are encrypted.
ChannelBuffer msg = (ChannelBuffer) e.getMessage();
PendingWrite pendingWrite;
if (msg.readable()) {
pendingWrite = new PendingWrite(evt.getFuture(), msg.toByteBuffer(msg.readerIndex(), msg.readableBytes()));
} else {
pendingWrite = new PendingWrite(evt.getFuture(), null);
}
pendingUnencryptedWritesLock.lock();
try {
pendingUnencryptedWrites.add(pendingWrite);
} finally {
pendingUnencryptedWritesLock.unlock();
}
if (handshakeFuture == null || !handshakeFuture.isDone()) {
writeBeforeHandshakeDone = true;
}
wrap(context, evt.getChannel());
}
private void cancelHandshakeTimeout() {
if (handshakeTimeout != null) {
// cancel the task as we will fail the handshake future now
handshakeTimeout.cancel();
}
}
@Override
public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
// Make sure the handshake future is notified when a connection has
// been closed during handshake.
synchronized (handshakeLock) {
if (handshaking) {
cancelHandshakeTimeout();
handshakeFuture.setFailure(new ClosedChannelException());
}
}
try {
super.channelDisconnected(ctx, e);
} finally {
unwrapNonAppData(ctx, e.getChannel());
closeEngine();
}
}
private void closeEngine() {
engine.closeOutbound();
if (sentCloseNotify == 0 && handshaken) {
try {
engine.closeInbound();
} catch (SSLException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to clean up SSLEngine.", ex);
}
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
if (cause instanceof ClosedChannelException) {
synchronized (ignoreClosedChannelExceptionLock) {
if (ignoreClosedChannelException > 0) {
ignoreClosedChannelException --;
if (logger.isDebugEnabled()) {
logger.debug(
"Swallowing an exception raised while " +
"writing non-app data", cause);
}
return;
}
}
} else {
if (ignoreException(cause)) {
return;
}
}
}
ctx.sendUpstream(e);
}
/**
* Checks if the given {@link Throwable} can be ignore and just "swallowed"
*
* When an ssl connection is closed a close_notify message is sent.
* After that the peer also sends close_notify however, it's not mandatory to receive
* the close_notify. The party who sent the initial close_notify can close the connection immediately
* then the peer will get connection reset error.
*
*/
private boolean ignoreException(Throwable t) {
if (!(t instanceof SSLException) && t instanceof IOException && engine.isOutboundDone()) {
String message = String.valueOf(t.getMessage()).toLowerCase();
// first try to match connection reset / broke peer based on the regex. This is the fastest way
// but may fail on different jdk impls or OS's
if (IGNORABLE_ERROR_MESSAGE.matcher(message).matches()) {
return true;
}
// Inspect the StackTraceElements to see if it was a connection reset / broken pipe or not
StackTraceElement[] elements = t.getStackTrace();
for (StackTraceElement element: elements) {
String classname = element.getClassName();
String methodname = element.getMethodName();
// skip all classes that belong to the io.netty package
if (classname.startsWith("org.jboss.netty.")) {
continue;
}
// check if the method name is read if not skip it
if (!"read".equals(methodname)) {
continue;
}
// This will also match against SocketInputStream which is used by openjdk 7 and maybe
// also others
if (IGNORABLE_CLASS_IN_STACK.matcher(classname).matches()) {
return true;
}
try {
// No match by now.. Try to load the class via classloader and inspect it.
// This is mainly done as other JDK implementations may differ in name of
// the impl.
Class<?> clazz = getClass().getClassLoader().loadClass(classname);
if (SocketChannel.class.isAssignableFrom(clazz)
|| DatagramChannel.class.isAssignableFrom(clazz)) {
return true;
}
// also match against SctpChannel via String matching as it may not present.
if (DetectionUtil.javaVersion() >= 7
&& "com.sun.nio.sctp.SctpChannel".equals(clazz.getSuperclass().getName())) {
return true;
}
} catch (ClassNotFoundException e) {
// This should not happen just ignore
}
}
}
return false;
}
/**
* Returns {@code true} if the given {@link ChannelBuffer} is encrypted. Be aware that this method
* will not increase the readerIndex of the given {@link ChannelBuffer}.
*
* @param buffer
* The {@link ChannelBuffer} to read from. Be aware that it must have at least 5 bytes to read,
* otherwise it will throw an {@link IllegalArgumentException}.
* @return encrypted
* {@code true} if the {@link ChannelBuffer} is encrypted, {@code false} otherwise.
* @throws IllegalArgumentException
* Is thrown if the given {@link ChannelBuffer} has not at least 5 bytes to read.
*/
public static boolean isEncrypted(ChannelBuffer buffer) {
return getEncryptedPacketLength(buffer, buffer.readerIndex()) != -1;
}
/**
* Return how much bytes can be read out of the encrypted data. Be aware that this method will not increase
* the readerIndex of the given {@link ChannelBuffer}.
*
* @param buffer
* The {@link ChannelBuffer} to read from. Be aware that it must have at least 5 bytes to read,
* otherwise it will throw an {@link IllegalArgumentException}.
* @return length
* The length of the encrypted packet that is included in the buffer. This will
* return {@code -1} if the given {@link ChannelBuffer} is not encrypted at all.
* @throws IllegalArgumentException
* Is thrown if the given {@link ChannelBuffer} has not at least 5 bytes to read.
*/
private static int getEncryptedPacketLength(ChannelBuffer buffer, int offset) {
int packetLength = 0;
// SSLv3 or TLS - Check ContentType
boolean tls;
switch (buffer.getUnsignedByte(offset)) {
case 20: // change_cipher_spec
case 21: // alert
case 22: // handshake
case 23: // application_data
tls = true;
break;
default:
// SSLv2 or bad data
tls = false;
}
if (tls) {
// SSLv3 or TLS - Check ProtocolVersion
int majorVersion = buffer.getUnsignedByte(offset + 1);
if (majorVersion == 3) {
// SSLv3 or TLS
packetLength = (getShort(buffer, offset + 3) & 0xFFFF) + 5;
if (packetLength <= 5) {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
} else {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
}
if (!tls) {
// SSLv2 or bad data - Check the version
boolean sslv2 = true;
int headerLength = (buffer.getUnsignedByte(offset) & 0x80) != 0 ? 2 : 3;
int majorVersion = buffer.getUnsignedByte(offset + headerLength + 1);
if (majorVersion == 2 || majorVersion == 3) {
// SSLv2
if (headerLength == 2) {
packetLength = (getShort(buffer, offset) & 0x7FFF) + 2;
} else {
packetLength = (getShort(buffer, offset) & 0x3FFF) + 3;
}
if (packetLength <= headerLength) {
sslv2 = false;
}
} else {
sslv2 = false;
}
if (!sslv2) {
return -1;
}
}
return packetLength;
}
@Override
protected Object decode(
final ChannelHandlerContext ctx, Channel channel, ChannelBuffer in) throws Exception {
final int startOffset = in.readerIndex();
final int endOffset = in.writerIndex();
int offset = startOffset;
int totalLength = 0;
// If we calculated the length of the current SSL record before, use that information.
if (packetLength > 0) {
if (endOffset - startOffset < packetLength) {
return null;
} else {
offset += packetLength;
totalLength = packetLength;
packetLength = 0;
}
}
boolean nonSslRecord = false;
while (totalLength < OpenSslEngine.MAX_ENCRYPTED_PACKET_LENGTH) {
final int readableBytes = endOffset - offset;
if (readableBytes < 5) {
break;
}
final int packetLength = getEncryptedPacketLength(in, offset);
if (packetLength == -1) {
nonSslRecord = true;
break;
}
assert packetLength > 0;
if (packetLength > readableBytes) {
// wait until the whole packet can be read
this.packetLength = packetLength;
break;
}
int newTotalLength = totalLength + packetLength;
if (newTotalLength > OpenSslEngine.MAX_ENCRYPTED_PACKET_LENGTH) {
// Don't read too much.
break;
}
// We have a whole packet.
// Increment the offset to handle the next packet.
offset += packetLength;
totalLength = newTotalLength;
}
ChannelBuffer unwrapped = null;
if (totalLength > 0) {
// The buffer contains one or more full SSL records.
// Slice out the whole packet so unwrap will only be called with complete packets.
// Also directly reset the packetLength. This is needed as unwrap(..) may trigger
// decode(...) again via:
// 1) unwrap(..) is called
// 2) wrap(...) is called from within unwrap(...)
// 3) wrap(...) calls unwrapLater(...)
// 4) unwrapLater(...) calls decode(...)
//
// See https://github.com/netty/netty/issues/1534
final ByteBuffer inNetBuf = in.toByteBuffer(in.readerIndex(), totalLength);
unwrapped = unwrap(ctx, channel, in, inNetBuf, totalLength);
assert !inNetBuf.hasRemaining() || engine.isInboundDone();
}
if (nonSslRecord) {
// Not an SSL/TLS packet
NotSslRecordException e = new NotSslRecordException(
"not an SSL/TLS record: " + ChannelBuffers.hexDump(in));
in.skipBytes(in.readableBytes());
if (closeOnSslException) {
// first trigger the exception and then close the channel
fireExceptionCaught(ctx, e);
Channels.close(ctx, future(channel));
// just return null as we closed the channel before, that
// will take care of cleanup etc
return null;
} else {
throw e;
}
}
return unwrapped;
}
/**
* Reads a big-endian short integer from the buffer. Please note that we do not use
* {@link ChannelBuffer#getShort(int)} because it might be a little-endian buffer.
*/
private static short getShort(ChannelBuffer buf, int offset) {
return (short) (buf.getByte(offset) << 8 | buf.getByte(offset + 1) & 0xFF);
}
private void wrap(ChannelHandlerContext context, Channel channel) throws SSLException {
ChannelBuffer msg;
ByteBuffer outNetBuf = bufferPool.acquireBuffer();
boolean success = true;
boolean offered = false;
boolean needsUnwrap = false;
PendingWrite pendingWrite = null;
try {
loop:
for (;;) {
// Acquire a lock to make sure unencrypted data is polled
// in order and their encrypted counterpart is offered in
// order.
pendingUnencryptedWritesLock.lock();
try {
pendingWrite = pendingUnencryptedWrites.peek();
if (pendingWrite == null) {
break;
}
ByteBuffer outAppBuf = pendingWrite.outAppBuf;
if (outAppBuf == null) {
// A write request with an empty buffer
pendingUnencryptedWrites.remove();
offerEncryptedWriteRequest(
new DownstreamMessageEvent(
channel, pendingWrite.future,
ChannelBuffers.EMPTY_BUFFER,
channel.getRemoteAddress()));
offered = true;
} else {
synchronized (handshakeLock) {
SSLEngineResult result = null;
try {
result = engine.wrap(outAppBuf, outNetBuf);
} finally {
if (!outAppBuf.hasRemaining()) {
pendingUnencryptedWrites.remove();
}
}
if (result.bytesProduced() > 0) {
outNetBuf.flip();
int remaining = outNetBuf.remaining();
msg = ctx.getChannel().getConfig().getBufferFactory().getBuffer(remaining);
// Transfer the bytes to the new ChannelBuffer using some safe method that will also
// work with "non" heap buffers
//
// See https://github.com/netty/netty/issues/329
msg.writeBytes(outNetBuf);
outNetBuf.clear();
ChannelFuture future;
if (pendingWrite.outAppBuf.hasRemaining()) {
// pendingWrite's future shouldn't be notified if
// only partial data is written.
future = succeededFuture(channel);
} else {
future = pendingWrite.future;
}
MessageEvent encryptedWrite = new DownstreamMessageEvent(
channel, future, msg, channel.getRemoteAddress());
offerEncryptedWriteRequest(encryptedWrite);
offered = true;
} else if (result.getStatus() == Status.CLOSED) {
// SSLEngine has been closed already.
// Any further write attempts should be denied.
success = false;
break;
} else {
final HandshakeStatus handshakeStatus = result.getHandshakeStatus();
handleRenegotiation(handshakeStatus);
switch (handshakeStatus) {
case NEED_WRAP:
if (outAppBuf.hasRemaining()) {
break;
} else {
break loop;
}
case NEED_UNWRAP:
needsUnwrap = true;
break loop;
case NEED_TASK:
runDelegatedTasks();
break;
case FINISHED:
setHandshakeSuccess(channel);
if (result.getStatus() == Status.CLOSED) {
success = false;
}
break loop;
case NOT_HANDSHAKING:
setHandshakeSuccessIfStillHandshaking(channel);
if (result.getStatus() == Status.CLOSED) {
success = false;
}
break loop;
default:
throw new IllegalStateException(
"Unknown handshake status: " +
handshakeStatus);
}
}
}
}
} finally {
pendingUnencryptedWritesLock.unlock();
}
}
} catch (SSLException e) {
success = false;
setHandshakeFailure(channel, e);
throw e;
} finally {
bufferPool.releaseBuffer(outNetBuf);
if (offered) {
flushPendingEncryptedWrites(context);
}
if (!success) {
IllegalStateException cause =
new IllegalStateException("SSLEngine already closed");
// Check if we had a pendingWrite in process, if so we need to also notify as otherwise
// the ChannelFuture will never get notified
if (pendingWrite != null) {
pendingWrite.future.setFailure(cause);
}
// Mark all remaining pending writes as failure if anything
// wrong happened before the write requests are wrapped.
// Please note that we do not call setFailure while a lock is
// acquired, to avoid a potential dead lock.
for (;;) {
pendingUnencryptedWritesLock.lock();
try {
pendingWrite = pendingUnencryptedWrites.poll();
if (pendingWrite == null) {
break;
}
} finally {
pendingUnencryptedWritesLock.unlock();
}
pendingWrite.future.setFailure(cause);
}
}
}
if (needsUnwrap) {
unwrapNonAppData(ctx, channel);
}
}
private void offerEncryptedWriteRequest(MessageEvent encryptedWrite) {
final boolean locked = pendingEncryptedWritesLock.tryLock();
try {
pendingEncryptedWrites.add(encryptedWrite);
} finally {
if (locked) {
pendingEncryptedWritesLock.unlock();
}
}
}
private void flushPendingEncryptedWrites(ChannelHandlerContext ctx) {
while (!pendingEncryptedWrites.isEmpty()) {
// Avoid possible dead lock and data integrity issue
// which is caused by cross communication between more than one channel
// in the same VM.
if (!pendingEncryptedWritesLock.tryLock()) {
return;
}
try {
MessageEvent e;
while ((e = pendingEncryptedWrites.poll()) != null) {
ctx.sendDownstream(e);
}
} finally {
pendingEncryptedWritesLock.unlock();
}
// Other thread might have added more elements at this point, so we loop again if the queue got unempty.
}
}
private ChannelFuture wrapNonAppData(ChannelHandlerContext ctx, Channel channel) throws SSLException {
ChannelFuture future = null;
ByteBuffer outNetBuf = bufferPool.acquireBuffer();
SSLEngineResult result;
try {
for (;;) {
synchronized (handshakeLock) {
result = engine.wrap(EMPTY_BUFFER, outNetBuf);
}
if (result.bytesProduced() > 0) {
outNetBuf.flip();
ChannelBuffer msg =
ctx.getChannel().getConfig().getBufferFactory().getBuffer(outNetBuf.remaining());
// Transfer the bytes to the new ChannelBuffer using some safe method that will also
// work with "non" heap buffers
//
// See https://github.com/netty/netty/issues/329
msg.writeBytes(outNetBuf);
outNetBuf.clear();
future = future(channel);
future.addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future)
throws Exception {
if (future.getCause() instanceof ClosedChannelException) {
synchronized (ignoreClosedChannelExceptionLock) {
ignoreClosedChannelException ++;
}
}
}
});
write(ctx, future, msg);
}
final HandshakeStatus handshakeStatus = result.getHandshakeStatus();
handleRenegotiation(handshakeStatus);
switch (handshakeStatus) {
case FINISHED:
setHandshakeSuccess(channel);
runDelegatedTasks();
break;
case NEED_TASK:
runDelegatedTasks();
break;
case NEED_UNWRAP:
if (!Thread.holdsLock(handshakeLock)) {
// unwrap shouldn't be called when this method was
// called by unwrap - unwrap will keep running after
// this method returns.
unwrapNonAppData(ctx, channel);
}
break;
case NOT_HANDSHAKING:
if (setHandshakeSuccessIfStillHandshaking(channel)) {
runDelegatedTasks();
}
break;
case NEED_WRAP:
break;
default:
throw new IllegalStateException(
"Unexpected handshake status: " + handshakeStatus);
}
if (result.bytesProduced() == 0) {
break;
}
}
} catch (SSLException e) {
setHandshakeFailure(channel, e);
throw e;
} finally {
bufferPool.releaseBuffer(outNetBuf);
}
if (future == null) {
future = succeededFuture(channel);
}
return future;
}
/**
* Calls {@link SSLEngine#unwrap(ByteBuffer, ByteBuffer)} with an empty buffer to handle handshakes, etc.
*/
private void unwrapNonAppData(ChannelHandlerContext ctx, Channel channel) throws SSLException {
unwrap(ctx, channel, ChannelBuffers.EMPTY_BUFFER, EMPTY_BUFFER, -1);
}
/**
* Unwraps inbound SSL records.
*/
private ChannelBuffer unwrap(
ChannelHandlerContext ctx, Channel channel,
ChannelBuffer nettyInNetBuf, ByteBuffer nioInNetBuf,
int initialNettyOutAppBufCapacity) throws SSLException {
final int nettyInNetBufStartOffset = nettyInNetBuf.readerIndex();
final int nioInNetBufStartOffset = nioInNetBuf.position();
final ByteBuffer nioOutAppBuf = bufferPool.acquireBuffer();
ChannelBuffer nettyOutAppBuf = null;
try {
boolean needsWrap = false;
for (;;) {
SSLEngineResult result;
boolean needsHandshake = false;
synchronized (handshakeLock) {
if (!handshaken && !handshaking &&
!engine.getUseClientMode() &&
!engine.isInboundDone() && !engine.isOutboundDone()) {
needsHandshake = true;
}
}
if (needsHandshake) {
handshake();
}
synchronized (handshakeLock) {
// Decrypt at least one record in the inbound network buffer.
// It is impossible to consume no record here because we made sure the inbound network buffer
// always contain at least one record in decode(). Therefore, if SSLEngine.unwrap() returns
// BUFFER_OVERFLOW, it is always resolved by retrying after emptying the application buffer.
for (;;) {
try {
result = engine.unwrap(nioInNetBuf, nioOutAppBuf);
switch (result.getStatus()) {
case CLOSED:
// notify about the CLOSED state of the SSLEngine. See #137
sslEngineCloseFuture.setClosed();
break;
case BUFFER_OVERFLOW:
// Flush the unwrapped data in the outAppBuf into frame and try again.
// See the finally block.
continue;
}
break;
} finally {
nioOutAppBuf.flip();
// Sync the offset of the inbound buffer.
nettyInNetBuf.readerIndex(
nettyInNetBufStartOffset + nioInNetBuf.position() - nioInNetBufStartOffset);
// Copy the unwrapped data into a smaller buffer.
if (nioOutAppBuf.hasRemaining()) {
if (nettyOutAppBuf == null) {
ChannelBufferFactory factory = ctx.getChannel().getConfig().getBufferFactory();
nettyOutAppBuf = factory.getBuffer(initialNettyOutAppBufCapacity);
}
nettyOutAppBuf.writeBytes(nioOutAppBuf);
}
nioOutAppBuf.clear();
}
}
final HandshakeStatus handshakeStatus = result.getHandshakeStatus();
handleRenegotiation(handshakeStatus);
switch (handshakeStatus) {
case NEED_UNWRAP:
break;
case NEED_WRAP:
wrapNonAppData(ctx, channel);
break;
case NEED_TASK:
runDelegatedTasks();
break;
case FINISHED:
setHandshakeSuccess(channel);
needsWrap = true;
continue;
case NOT_HANDSHAKING:
if (setHandshakeSuccessIfStillHandshaking(channel)) {
needsWrap = true;
continue;
}
if (writeBeforeHandshakeDone) {
// We need to call wrap(...) in case there was a flush done before the handshake completed.
//
// See https://github.com/netty/netty/pull/2437
writeBeforeHandshakeDone = false;
needsWrap = true;
}
break;
default:
throw new IllegalStateException(
"Unknown handshake status: " + handshakeStatus);
}
if (result.getStatus() == Status.BUFFER_UNDERFLOW ||
result.bytesConsumed() == 0 && result.bytesProduced() == 0) {
break;
}
}
}
if (needsWrap) {
// wrap() acquires pendingUnencryptedWrites first and then
// handshakeLock. If handshakeLock is already hold by the
// current thread, calling wrap() will lead to a dead lock
// i.e. pendingUnencryptedWrites -> handshakeLock vs.
// handshakeLock -> pendingUnencryptedLock -> handshakeLock
//
// There is also the same issue between pendingEncryptedWrites
// and pendingUnencryptedWrites.
if (!Thread.holdsLock(handshakeLock) && !pendingEncryptedWritesLock.isHeldByCurrentThread()) {
wrap(ctx, channel);
}
}
} catch (SSLException e) {
setHandshakeFailure(channel, e);
throw e;
} finally {
bufferPool.releaseBuffer(nioOutAppBuf);
}
if (nettyOutAppBuf != null && nettyOutAppBuf.readable()) {
return nettyOutAppBuf;
} else {
return null;
}
}
private void handleRenegotiation(HandshakeStatus handshakeStatus) {
synchronized (handshakeLock) {
if (handshakeStatus == HandshakeStatus.NOT_HANDSHAKING ||
handshakeStatus == HandshakeStatus.FINISHED) {
// Not handshaking
return;
}
if (!handshaken) {
// Not renegotiation
return;
}
final boolean renegotiate;
if (handshaking) {
// Renegotiation in progress or failed already.
// i.e. Renegotiation check has been done already below.
return;
}
if (engine.isInboundDone() || engine.isOutboundDone()) {
// Not handshaking but closing.
return;
}
if (isEnableRenegotiation()) {
// Continue renegotiation.
renegotiate = true;
} else {
// Do not renegotiate.
renegotiate = false;
// Prevent reentrance of this method.
handshaking = true;
}
if (renegotiate) {
// Renegotiate.
handshake();
} else {
// Raise an exception.
fireExceptionCaught(
ctx, new SSLException(
"renegotiation attempted by peer; " +
"closing the connection"));
// Close the connection to stop renegotiation.
Channels.close(ctx, succeededFuture(ctx.getChannel()));
}
}
}
/**
* Fetches all delegated tasks from the {@link SSLEngine} and runs them via the {@link #delegatedTaskExecutor}.
* If the {@link #delegatedTaskExecutor} is {@link ImmediateExecutor}, just call {@link Runnable#run()} directly
* instead of using {@link Executor#execute(Runnable)}. Otherwise, run the tasks via
* the {@link #delegatedTaskExecutor} and wait until the tasks are finished.
*/
private void runDelegatedTasks() {
if (delegatedTaskExecutor == ImmediateExecutor.INSTANCE) {
for (;;) {
final Runnable task;
synchronized (handshakeLock) {
task = engine.getDelegatedTask();
}
if (task == null) {
break;
}
delegatedTaskExecutor.execute(task);
}
} else {
final List<Runnable> tasks = new ArrayList<Runnable>(2);
for (;;) {
final Runnable task;
synchronized (handshakeLock) {
task = engine.getDelegatedTask();
}
if (task == null) {
break;
}
tasks.add(task);
}
if (tasks.isEmpty()) {
return;
}
final CountDownLatch latch = new CountDownLatch(1);
delegatedTaskExecutor.execute(new Runnable() {
public void run() {
try {
for (Runnable task: tasks) {
task.run();
}
} catch (Exception e) {
fireExceptionCaught(ctx, e);
} finally {
latch.countDown();
}
}
});
boolean interrupted = false;
while (latch.getCount() != 0) {
try {
latch.await();
} catch (InterruptedException e) {
// Interrupt later.
interrupted = true;
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Works around some Android {@link SSLEngine} implementations that skip {@link HandshakeStatus#FINISHED} and
* go straight into {@link HandshakeStatus#NOT_HANDSHAKING} when handshake is finished.
*
* @return {@code true} if and only if the workaround has been applied and thus {@link #handshakeFuture} has been
* marked as success by this method
*/
private boolean setHandshakeSuccessIfStillHandshaking(Channel channel) {
if (handshaking && !handshakeFuture.isDone()) {
setHandshakeSuccess(channel);
return true;
}
return false;
}
private void setHandshakeSuccess(Channel channel) {
synchronized (handshakeLock) {
handshaking = false;
handshaken = true;
if (handshakeFuture == null) {
handshakeFuture = future(channel);
}
cancelHandshakeTimeout();
}
if (logger.isDebugEnabled()) {
logger.debug(channel + " HANDSHAKEN: " + engine.getSession().getCipherSuite());
}
handshakeFuture.setSuccess();
}
private void setHandshakeFailure(Channel channel, SSLException cause) {
synchronized (handshakeLock) {
if (!handshaking) {
return;
}
handshaking = false;
handshaken = false;
if (handshakeFuture == null) {
handshakeFuture = future(channel);
}
// cancel the timeout now
cancelHandshakeTimeout();
// Release all resources such as internal buffers that SSLEngine
// is managing.
engine.closeOutbound();
try {
engine.closeInbound();
} catch (SSLException e) {
if (logger.isDebugEnabled()) {
logger.debug(
"SSLEngine.closeInbound() raised an exception after " +
"a handshake failure.", e);
}
}
}
handshakeFuture.setFailure(cause);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
private void closeOutboundAndChannel(
final ChannelHandlerContext context, final ChannelStateEvent e) {
if (!e.getChannel().isConnected()) {
context.sendDownstream(e);
return;
}
// Ensure that the tear-down logic beyond this point is never invoked concurrently nor multiple times.
if (!CLOSED_OUTBOUND_AND_CHANNEL_UPDATER.compareAndSet(this, 0, 1)) {
// The other thread called this method already, and thus the connection will be closed eventually.
// So, just wait until the connection is closed, and then forward the event so that the sink handles
// the duplicate close attempt.
e.getChannel().getCloseFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
context.sendDownstream(e);
}
});
return;
}
boolean passthrough = true;
try {
try {
unwrapNonAppData(ctx, e.getChannel());
} catch (SSLException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to unwrap before sending a close_notify message", ex);
}
}
if (!engine.isOutboundDone()) {
if (SENT_CLOSE_NOTIFY_UPDATER.compareAndSet(this, 0, 1)) {
engine.closeOutbound();
try {
ChannelFuture closeNotifyFuture = wrapNonAppData(context, e.getChannel());
closeNotifyFuture.addListener(
new ClosingChannelFutureListener(context, e));
passthrough = false;
} catch (SSLException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to encode a close_notify message", ex);
}
}
}
}
} finally {
if (passthrough) {
context.sendDownstream(e);
}
}
}
private static final class PendingWrite {
final ChannelFuture future;
final ByteBuffer outAppBuf;
PendingWrite(ChannelFuture future, ByteBuffer outAppBuf) {
this.future = future;
this.outAppBuf = outAppBuf;
}
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext context;
private final ChannelStateEvent e;
ClosingChannelFutureListener(
ChannelHandlerContext context, ChannelStateEvent e) {
this.context = context;
this.e = e;
}
public void operationComplete(ChannelFuture closeNotifyFuture) throws Exception {
if (!(closeNotifyFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(context, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
}
@Override
public void beforeAdd(ChannelHandlerContext ctx) throws Exception {
super.beforeAdd(ctx);
this.ctx = ctx;
}
/**
* Fail all pending writes which we were not able to flush out
*/
@Override
public void afterRemove(ChannelHandlerContext ctx) throws Exception {
closeEngine();
// there is no need for synchronization here as we do not receive downstream events anymore
Throwable cause = null;
for (;;) {
PendingWrite pw = pendingUnencryptedWrites.poll();
if (pw == null) {
break;
}
if (cause == null) {
cause = new IOException("Unable to write data");
}
pw.future.setFailure(cause);
}
for (;;) {
MessageEvent ev = pendingEncryptedWrites.poll();
if (ev == null) {
break;
}
if (cause == null) {
cause = new IOException("Unable to write data");
}
ev.getFuture().setFailure(cause);
}
if (cause != null) {
fireExceptionCaughtLater(ctx, cause);
}
}
/**
* Calls {@link #handshake()} once the {@link Channel} is connected
*/
@Override
public void channelConnected(final ChannelHandlerContext ctx, final ChannelStateEvent e) throws Exception {
if (issueHandshake) {
// issue and handshake and add a listener to it which will fire an exception event if
// an exception was thrown while doing the handshake
handshake().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
// Send the event upstream after the handshake was completed without an error.
//
// See https://github.com/netty/netty/issues/358
ctx.sendUpstream(e);
}
}
});
} else {
super.channelConnected(ctx, e);
}
}
/**
* Loop over all the pending writes and fail them.
*
* See <a href="https://github.com/netty/netty/issues/305">#305</a> for more details.
*/
@Override
public void channelClosed(final ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
// Move the fail of the writes to the IO-Thread to prevent possible deadlock
// See https://github.com/netty/netty/issues/989
ctx.getPipeline().execute(new Runnable() {
public void run() {
if (!pendingUnencryptedWritesLock.tryLock()) {
return;
}
Throwable cause = null;
try {
for (;;) {
PendingWrite pw = pendingUnencryptedWrites.poll();
if (pw == null) {
break;
}
if (cause == null) {
cause = new ClosedChannelException();
}
pw.future.setFailure(cause);
}
for (;;) {
MessageEvent ev = pendingEncryptedWrites.poll();
if (ev == null) {
break;
}
if (cause == null) {
cause = new ClosedChannelException();
}
ev.getFuture().setFailure(cause);
}
} finally {
pendingUnencryptedWritesLock.unlock();
}
if (cause != null) {
fireExceptionCaught(ctx, cause);
}
}
});
super.channelClosed(ctx, e);
}
private final class SSLEngineInboundCloseFuture extends DefaultChannelFuture {
SSLEngineInboundCloseFuture() {
super(null, true);
}
void setClosed() {
super.setSuccess();
}
@Override
public Channel getChannel() {
if (ctx == null) {
// Maybe we should better throw an IllegalStateException() ?
return null;
} else {
return ctx.getChannel();
}
}
@Override
public boolean setSuccess() {
return false;
}
@Override
public boolean setFailure(Throwable cause) {
return false;
}
}
}
|
/*
* Copyright 2012 The Netty Project
*
* The Netty Project licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package org.jboss.netty.handler.ssl;
import org.jboss.netty.buffer.ChannelBuffer;
import org.jboss.netty.buffer.ChannelBufferFactory;
import org.jboss.netty.buffer.ChannelBuffers;
import org.jboss.netty.channel.Channel;
import org.jboss.netty.channel.ChannelDownstreamHandler;
import org.jboss.netty.channel.ChannelEvent;
import org.jboss.netty.channel.ChannelFuture;
import org.jboss.netty.channel.ChannelFutureListener;
import org.jboss.netty.channel.ChannelHandlerContext;
import org.jboss.netty.channel.ChannelPipeline;
import org.jboss.netty.channel.ChannelStateEvent;
import org.jboss.netty.channel.Channels;
import org.jboss.netty.channel.DefaultChannelFuture;
import org.jboss.netty.channel.DownstreamMessageEvent;
import org.jboss.netty.channel.ExceptionEvent;
import org.jboss.netty.channel.MessageEvent;
import org.jboss.netty.handler.codec.frame.FrameDecoder;
import org.jboss.netty.logging.InternalLogger;
import org.jboss.netty.logging.InternalLoggerFactory;
import org.jboss.netty.util.Timeout;
import org.jboss.netty.util.Timer;
import org.jboss.netty.util.TimerTask;
import org.jboss.netty.util.internal.DetectionUtil;
import org.jboss.netty.util.internal.NonReentrantLock;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLEngineResult;
import javax.net.ssl.SSLEngineResult.HandshakeStatus;
import javax.net.ssl.SSLEngineResult.Status;
import javax.net.ssl.SSLException;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.channels.ClosedChannelException;
import java.nio.channels.DatagramChannel;
import java.nio.channels.SocketChannel;
import java.util.ArrayList;
import java.util.LinkedList;
import java.util.List;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicIntegerFieldUpdater;
import java.util.regex.Pattern;
import static org.jboss.netty.channel.Channels.*;
/**
* Adds <a href="http://en.wikipedia.org/wiki/Transport_Layer_Security">SSL
* · TLS</a> and StartTLS support to a {@link Channel}. Please refer
* to the <strong>"SecureChat"</strong> example in the distribution or the web
* site for the detailed usage.
*
* <h3>Beginning the handshake</h3>
* <p>
* You must make sure not to write a message while the
* {@linkplain #handshake() handshake} is in progress unless you are
* renegotiating. You will be notified by the {@link ChannelFuture} which is
* returned by the {@link #handshake()} method when the handshake
* process succeeds or fails.
*
* <h3>Handshake</h3>
* <p>
* If {@link #isIssueHandshake()} is {@code false}
* (default) you will need to take care of calling {@link #handshake()} by your own. In most
* situations were {@link SslHandler} is used in 'client mode' you want to issue a handshake once
* the connection was established. if {@link #setIssueHandshake(boolean)} is set to {@code true}
* you don't need to worry about this as the {@link SslHandler} will take care of it.
* <p>
*
* <h3>Renegotiation</h3>
* <p>
* If {@link #isEnableRenegotiation() enableRenegotiation} is {@code true}
* (default) and the initial handshake has been done successfully, you can call
* {@link #handshake()} to trigger the renegotiation.
* <p>
* If {@link #isEnableRenegotiation() enableRenegotiation} is {@code false},
* an attempt to trigger renegotiation will result in the connection closure.
* <p>
* Please note that TLS renegotiation had a security issue before. If your
* runtime environment did not fix it, please make sure to disable TLS
* renegotiation by calling {@link #setEnableRenegotiation(boolean)} with
* {@code false}. For more information, please refer to the following documents:
* <ul>
* <li><a href="http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-3555">CVE-2009-3555</a></li>
* <li><a href="http://www.ietf.org/rfc/rfc5746.txt">RFC5746</a></li>
* <li><a href="http://www.oracle.com/technetwork/java/javase/documentation/tlsreadme2-176330.html">Phased
* Approach to Fixing the TLS Renegotiation Issue</a></li>
* </ul>
*
* <h3>Closing the session</h3>
* <p>
* To close the SSL session, the {@link #close()} method should be
* called to send the {@code close_notify} message to the remote peer. One
* exception is when you close the {@link Channel} - {@link SslHandler}
* intercepts the close request and send the {@code close_notify} message
* before the channel closure automatically. Once the SSL session is closed,
* it is not reusable, and consequently you should create a new
* {@link SslHandler} with a new {@link SSLEngine} as explained in the
* following section.
*
* <h3>Restarting the session</h3>
* <p>
* To restart the SSL session, you must remove the existing closed
* {@link SslHandler} from the {@link ChannelPipeline}, insert a new
* {@link SslHandler} with a new {@link SSLEngine} into the pipeline,
* and start the handshake process as described in the first section.
*
* <h3>Implementing StartTLS</h3>
* <p>
* <a href="http://en.wikipedia.org/wiki/STARTTLS">StartTLS</a> is the
* communication pattern that secures the wire in the middle of the plaintext
* connection. Please note that it is different from SSL · TLS, that
* secures the wire from the beginning of the connection. Typically, StartTLS
* is composed of three steps:
* <ol>
* <li>Client sends a StartTLS request to server.</li>
* <li>Server sends a StartTLS response to client.</li>
* <li>Client begins SSL handshake.</li>
* </ol>
* If you implement a server, you need to:
* <ol>
* <li>create a new {@link SslHandler} instance with {@code startTls} flag set
* to {@code true},</li>
* <li>insert the {@link SslHandler} to the {@link ChannelPipeline}, and</li>
* <li>write a StartTLS response.</li>
* </ol>
* Please note that you must insert {@link SslHandler} <em>before</em> sending
* the StartTLS response. Otherwise the client can send begin SSL handshake
* before {@link SslHandler} is inserted to the {@link ChannelPipeline}, causing
* data corruption.
* <p>
* The client-side implementation is much simpler.
* <ol>
* <li>Write a StartTLS request,</li>
* <li>wait for the StartTLS response,</li>
* <li>create a new {@link SslHandler} instance with {@code startTls} flag set
* to {@code false},</li>
* <li>insert the {@link SslHandler} to the {@link ChannelPipeline}, and</li>
* <li>Initiate SSL handshake by calling {@link SslHandler#handshake()}.</li>
* </ol>
*
* <h3>Known issues</h3>
* <p>
* Because of a known issue with the current implementation of the SslEngine that comes
* with Java it may be possible that you see blocked IO-Threads while a full GC is done.
* <p>
* So if you are affected you can workaround this problem by adjust the cache settings
* like shown below:
*
* <pre>
* SslContext context = ...;
* context.getServerSessionContext().setSessionCacheSize(someSaneSize);
* context.getServerSessionContext().setSessionTime(someSameTimeout);
* </pre>
* <p>
* What values to use here depends on the nature of your application and should be set
* based on monitoring and debugging of it.
* For more details see
* <a href="https://github.com/netty/netty/issues/832">#832</a> in our issue tracker.
* @apiviz.landmark
* @apiviz.uses org.jboss.netty.handler.ssl.SslBufferPool
*/
public class SslHandler extends FrameDecoder
implements ChannelDownstreamHandler {
private static final InternalLogger logger = InternalLoggerFactory.getInstance(SslHandler.class);
private static final ByteBuffer EMPTY_BUFFER = ByteBuffer.allocate(0);
private static final Pattern IGNORABLE_CLASS_IN_STACK = Pattern.compile(
"^.*(?:Socket|Datagram|Sctp|Udt)Channel.*$");
private static final Pattern IGNORABLE_ERROR_MESSAGE = Pattern.compile(
"^.*(?:connection.*(?:reset|closed|abort|broken)|broken.*pipe).*$", Pattern.CASE_INSENSITIVE);
private static SslBufferPool defaultBufferPool;
/**
* Returns the default {@link SslBufferPool} used when no pool is
* specified in the constructor.
*/
public static synchronized SslBufferPool getDefaultBufferPool() {
if (defaultBufferPool == null) {
defaultBufferPool = new SslBufferPool();
}
return defaultBufferPool;
}
private volatile ChannelHandlerContext ctx;
private final SSLEngine engine;
private final SslBufferPool bufferPool;
private final Executor delegatedTaskExecutor;
private final boolean startTls;
private volatile boolean enableRenegotiation = true;
final Object handshakeLock = new Object();
private boolean handshaking;
private volatile boolean handshaken;
private volatile ChannelFuture handshakeFuture;
@SuppressWarnings("UnusedDeclaration")
private volatile int sentFirstMessage;
@SuppressWarnings("UnusedDeclaration")
private volatile int sentCloseNotify;
@SuppressWarnings("UnusedDeclaration")
private volatile int closedOutboundAndChannel;
private static final AtomicIntegerFieldUpdater<SslHandler> SENT_FIRST_MESSAGE_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SslHandler.class, "sentFirstMessage");
private static final AtomicIntegerFieldUpdater<SslHandler> SENT_CLOSE_NOTIFY_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SslHandler.class, "sentCloseNotify");
private static final AtomicIntegerFieldUpdater<SslHandler> CLOSED_OUTBOUND_AND_CHANNEL_UPDATER =
AtomicIntegerFieldUpdater.newUpdater(SslHandler.class, "closedOutboundAndChannel");
int ignoreClosedChannelException;
final Object ignoreClosedChannelExceptionLock = new Object();
private final Queue<PendingWrite> pendingUnencryptedWrites = new LinkedList<PendingWrite>();
private final NonReentrantLock pendingUnencryptedWritesLock = new NonReentrantLock();
private final Queue<MessageEvent> pendingEncryptedWrites = new ConcurrentLinkedQueue<MessageEvent>();
private final NonReentrantLock pendingEncryptedWritesLock = new NonReentrantLock();
private volatile boolean issueHandshake;
private volatile boolean writeBeforeHandshakeDone;
private final SSLEngineInboundCloseFuture sslEngineCloseFuture = new SSLEngineInboundCloseFuture();
private boolean closeOnSslException;
private int packetLength;
private final Timer timer;
private final long handshakeTimeoutInMillis;
private Timeout handshakeTimeout;
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
*/
public SslHandler(SSLEngine engine) {
this(engine, getDefaultBufferPool(), false, null, 0);
}
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
* @param bufferPool the {@link SslBufferPool} where this handler will
* acquire the buffers required by the {@link SSLEngine}
*/
public SslHandler(SSLEngine engine, SslBufferPool bufferPool) {
this(engine, bufferPool, false, null, 0);
}
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
* @param startTls {@code true} if the first write request shouldn't be
* encrypted by the {@link SSLEngine}
*/
public SslHandler(SSLEngine engine, boolean startTls) {
this(engine, getDefaultBufferPool(), startTls);
}
/**
* Creates a new instance.
*
* @param engine the {@link SSLEngine} this handler will use
* @param bufferPool the {@link SslBufferPool} where this handler will
* acquire the buffers required by the {@link SSLEngine}
* @param startTls {@code true} if the first write request shouldn't be
* encrypted by the {@link SSLEngine}
*/
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls) {
this(engine, bufferPool, startTls, null, 0);
}
/**
* Creates a new instance.
*
* @param engine
* the {@link SSLEngine} this handler will use
* @param bufferPool
* the {@link SslBufferPool} where this handler will acquire
* the buffers required by the {@link SSLEngine}
* @param startTls
* {@code true} if the first write request shouldn't be encrypted
* by the {@link SSLEngine}
* @param timer
* the {@link Timer} which will be used to process the timeout of the {@link #handshake()}.
* Be aware that the given {@link Timer} will not get stopped automaticly, so it is up to you to cleanup
* once you not need it anymore
* @param handshakeTimeoutInMillis
* the time in milliseconds after whic the {@link #handshake()} will be failed, and so the future notified
*/
@SuppressWarnings("deprecation")
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls,
Timer timer, long handshakeTimeoutInMillis) {
this(engine, bufferPool, startTls, ImmediateExecutor.INSTANCE, timer, handshakeTimeoutInMillis);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, Executor delegatedTaskExecutor) {
this(engine, getDefaultBufferPool(), delegatedTaskExecutor);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine, boolean)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, Executor delegatedTaskExecutor) {
this(engine, bufferPool, false, delegatedTaskExecutor);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine, boolean)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, boolean startTls, Executor delegatedTaskExecutor) {
this(engine, getDefaultBufferPool(), startTls, delegatedTaskExecutor);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine, SslBufferPool, boolean)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls, Executor delegatedTaskExecutor) {
this(engine, bufferPool, startTls, delegatedTaskExecutor, null, 0);
}
/**
* @deprecated Use {@link #SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls, Timer timer,
* long handshakeTimeoutInMillis)} instead.
*/
@Deprecated
public SslHandler(SSLEngine engine, SslBufferPool bufferPool, boolean startTls, Executor delegatedTaskExecutor,
Timer timer, long handshakeTimeoutInMillis) {
if (engine == null) {
throw new NullPointerException("engine");
}
if (bufferPool == null) {
throw new NullPointerException("bufferPool");
}
if (delegatedTaskExecutor == null) {
throw new NullPointerException("delegatedTaskExecutor");
}
if (timer == null && handshakeTimeoutInMillis > 0) {
throw new IllegalArgumentException("No Timer was given but a handshakeTimeoutInMillis, need both or none");
}
this.engine = engine;
this.bufferPool = bufferPool;
this.delegatedTaskExecutor = delegatedTaskExecutor;
this.startTls = startTls;
this.timer = timer;
this.handshakeTimeoutInMillis = handshakeTimeoutInMillis;
}
/**
* Returns the {@link SSLEngine} which is used by this handler.
*/
public SSLEngine getEngine() {
return engine;
}
/**
* Starts an SSL / TLS handshake for the specified channel.
*
* @return a {@link ChannelFuture} which is notified when the handshake
* succeeds or fails.
*/
public ChannelFuture handshake() {
synchronized (handshakeLock) {
if (handshaken && !isEnableRenegotiation()) {
throw new IllegalStateException("renegotiation disabled");
}
final ChannelHandlerContext ctx = this.ctx;
final Channel channel = ctx.getChannel();
ChannelFuture handshakeFuture;
Exception exception = null;
if (handshaking) {
return this.handshakeFuture;
}
handshaking = true;
try {
engine.beginHandshake();
runDelegatedTasks();
handshakeFuture = this.handshakeFuture = future(channel);
if (handshakeTimeoutInMillis > 0) {
handshakeTimeout = timer.newTimeout(new TimerTask() {
public void run(Timeout timeout) throws Exception {
ChannelFuture future = SslHandler.this.handshakeFuture;
if (future != null && future.isDone()) {
return;
}
setHandshakeFailure(channel, new SSLException("Handshake did not complete within " +
handshakeTimeoutInMillis + "ms"));
}
}, handshakeTimeoutInMillis, TimeUnit.MILLISECONDS);
}
} catch (Exception e) {
handshakeFuture = this.handshakeFuture = failedFuture(channel, e);
exception = e;
}
if (exception == null) { // Began handshake successfully.
try {
final ChannelFuture hsFuture = handshakeFuture;
wrapNonAppData(ctx, channel).addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
Throwable cause = future.getCause();
hsFuture.setFailure(cause);
fireExceptionCaught(ctx, cause);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
}
});
} catch (SSLException e) {
handshakeFuture.setFailure(e);
fireExceptionCaught(ctx, e);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
} else { // Failed to initiate handshake.
fireExceptionCaught(ctx, exception);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
return handshakeFuture;
}
}
/**
* @deprecated Use {@link #handshake()} instead.
*/
@Deprecated
public ChannelFuture handshake(@SuppressWarnings("unused") Channel channel) {
return handshake();
}
/**
* Sends an SSL {@code close_notify} message to the specified channel and
* destroys the underlying {@link SSLEngine}.
*/
public ChannelFuture close() {
ChannelHandlerContext ctx = this.ctx;
Channel channel = ctx.getChannel();
try {
engine.closeOutbound();
return wrapNonAppData(ctx, channel);
} catch (SSLException e) {
fireExceptionCaught(ctx, e);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
return failedFuture(channel, e);
}
}
/**
* @deprecated Use {@link #close()} instead.
*/
@Deprecated
public ChannelFuture close(@SuppressWarnings("unused") Channel channel) {
return close();
}
/**
* Returns {@code true} if and only if TLS renegotiation is enabled.
*/
public boolean isEnableRenegotiation() {
return enableRenegotiation;
}
/**
* Enables or disables TLS renegotiation.
*/
public void setEnableRenegotiation(boolean enableRenegotiation) {
this.enableRenegotiation = enableRenegotiation;
}
/**
* Enables or disables the automatic handshake once the {@link Channel} is
* connected. The value will only have affect if its set before the
* {@link Channel} is connected.
*/
public void setIssueHandshake(boolean issueHandshake) {
this.issueHandshake = issueHandshake;
}
/**
* Returns {@code true} if the automatic handshake is enabled
*/
public boolean isIssueHandshake() {
return issueHandshake;
}
/**
* Return the {@link ChannelFuture} that will get notified if the inbound of the {@link SSLEngine} will get closed.
*
* This method will return the same {@link ChannelFuture} all the time.
*
* For more informations see the apidocs of {@link SSLEngine}
*
*/
public ChannelFuture getSSLEngineInboundCloseFuture() {
return sslEngineCloseFuture;
}
/**
* Return the timeout (in ms) after which the {@link ChannelFuture} of {@link #handshake()} will be failed, while
* a handshake is in progress
*/
public long getHandshakeTimeout() {
return handshakeTimeoutInMillis;
}
/**
* If set to {@code true}, the {@link Channel} will automatically get closed
* one a {@link SSLException} was caught. This is most times what you want, as after this
* its almost impossible to recover.
*
* Anyway the default is {@code false} to not break compatibility with older releases. This
* will be changed to {@code true} in the next major release.
*
*/
public void setCloseOnSSLException(boolean closeOnSslException) {
if (ctx != null) {
throw new IllegalStateException("Can only get changed before attached to ChannelPipeline");
}
this.closeOnSslException = closeOnSslException;
}
public boolean getCloseOnSSLException() {
return closeOnSslException;
}
public void handleDownstream(
final ChannelHandlerContext context, final ChannelEvent evt) throws Exception {
if (evt instanceof ChannelStateEvent) {
ChannelStateEvent e = (ChannelStateEvent) evt;
switch (e.getState()) {
case OPEN:
case CONNECTED:
case BOUND:
if (Boolean.FALSE.equals(e.getValue()) || e.getValue() == null) {
closeOutboundAndChannel(context, e);
return;
}
}
}
if (!(evt instanceof MessageEvent)) {
context.sendDownstream(evt);
return;
}
MessageEvent e = (MessageEvent) evt;
if (!(e.getMessage() instanceof ChannelBuffer)) {
context.sendDownstream(evt);
return;
}
// Do not encrypt the first write request if this handler is
// created with startTLS flag turned on.
if (startTls && SENT_FIRST_MESSAGE_UPDATER.compareAndSet(this, 0, 1)) {
context.sendDownstream(evt);
return;
}
// Otherwise, all messages are encrypted.
ChannelBuffer msg = (ChannelBuffer) e.getMessage();
PendingWrite pendingWrite;
if (msg.readable()) {
pendingWrite = new PendingWrite(evt.getFuture(), msg.toByteBuffer(msg.readerIndex(), msg.readableBytes()));
} else {
pendingWrite = new PendingWrite(evt.getFuture(), null);
}
pendingUnencryptedWritesLock.lock();
try {
pendingUnencryptedWrites.add(pendingWrite);
} finally {
pendingUnencryptedWritesLock.unlock();
}
if (handshakeFuture == null || !handshakeFuture.isDone()) {
writeBeforeHandshakeDone = true;
}
wrap(context, evt.getChannel());
}
private void cancelHandshakeTimeout() {
if (handshakeTimeout != null) {
// cancel the task as we will fail the handshake future now
handshakeTimeout.cancel();
}
}
@Override
public void channelDisconnected(ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
// Make sure the handshake future is notified when a connection has
// been closed during handshake.
synchronized (handshakeLock) {
if (handshaking) {
cancelHandshakeTimeout();
handshakeFuture.setFailure(new ClosedChannelException());
}
}
try {
super.channelDisconnected(ctx, e);
} finally {
unwrapNonAppData(ctx, e.getChannel());
closeEngine();
}
}
private void closeEngine() {
engine.closeOutbound();
if (sentCloseNotify == 0 && handshaken) {
try {
engine.closeInbound();
} catch (SSLException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to clean up SSLEngine.", ex);
}
}
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, ExceptionEvent e)
throws Exception {
Throwable cause = e.getCause();
if (cause instanceof IOException) {
if (cause instanceof ClosedChannelException) {
synchronized (ignoreClosedChannelExceptionLock) {
if (ignoreClosedChannelException > 0) {
ignoreClosedChannelException --;
if (logger.isDebugEnabled()) {
logger.debug(
"Swallowing an exception raised while " +
"writing non-app data", cause);
}
return;
}
}
} else {
if (ignoreException(cause)) {
return;
}
}
}
ctx.sendUpstream(e);
}
/**
* Checks if the given {@link Throwable} can be ignore and just "swallowed"
*
* When an ssl connection is closed a close_notify message is sent.
* After that the peer also sends close_notify however, it's not mandatory to receive
* the close_notify. The party who sent the initial close_notify can close the connection immediately
* then the peer will get connection reset error.
*
*/
private boolean ignoreException(Throwable t) {
if (!(t instanceof SSLException) && t instanceof IOException && engine.isOutboundDone()) {
String message = String.valueOf(t.getMessage()).toLowerCase();
// first try to match connection reset / broke peer based on the regex. This is the fastest way
// but may fail on different jdk impls or OS's
if (IGNORABLE_ERROR_MESSAGE.matcher(message).matches()) {
return true;
}
// Inspect the StackTraceElements to see if it was a connection reset / broken pipe or not
StackTraceElement[] elements = t.getStackTrace();
for (StackTraceElement element: elements) {
String classname = element.getClassName();
String methodname = element.getMethodName();
// skip all classes that belong to the io.netty package
if (classname.startsWith("org.jboss.netty.")) {
continue;
}
// check if the method name is read if not skip it
if (!"read".equals(methodname)) {
continue;
}
// This will also match against SocketInputStream which is used by openjdk 7 and maybe
// also others
if (IGNORABLE_CLASS_IN_STACK.matcher(classname).matches()) {
return true;
}
try {
// No match by now.. Try to load the class via classloader and inspect it.
// This is mainly done as other JDK implementations may differ in name of
// the impl.
Class<?> clazz = getClass().getClassLoader().loadClass(classname);
if (SocketChannel.class.isAssignableFrom(clazz)
|| DatagramChannel.class.isAssignableFrom(clazz)) {
return true;
}
// also match against SctpChannel via String matching as it may not present.
if (DetectionUtil.javaVersion() >= 7
&& "com.sun.nio.sctp.SctpChannel".equals(clazz.getSuperclass().getName())) {
return true;
}
} catch (ClassNotFoundException e) {
// This should not happen just ignore
}
}
}
return false;
}
/**
* Returns {@code true} if the given {@link ChannelBuffer} is encrypted. Be aware that this method
* will not increase the readerIndex of the given {@link ChannelBuffer}.
*
* @param buffer
* The {@link ChannelBuffer} to read from. Be aware that it must have at least 5 bytes to read,
* otherwise it will throw an {@link IllegalArgumentException}.
* @return encrypted
* {@code true} if the {@link ChannelBuffer} is encrypted, {@code false} otherwise.
* @throws IllegalArgumentException
* Is thrown if the given {@link ChannelBuffer} has not at least 5 bytes to read.
*/
public static boolean isEncrypted(ChannelBuffer buffer) {
return getEncryptedPacketLength(buffer, buffer.readerIndex()) != -1;
}
/**
* Return how much bytes can be read out of the encrypted data. Be aware that this method will not increase
* the readerIndex of the given {@link ChannelBuffer}.
*
* @param buffer
* The {@link ChannelBuffer} to read from. Be aware that it must have at least 5 bytes to read,
* otherwise it will throw an {@link IllegalArgumentException}.
* @return length
* The length of the encrypted packet that is included in the buffer. This will
* return {@code -1} if the given {@link ChannelBuffer} is not encrypted at all.
* @throws IllegalArgumentException
* Is thrown if the given {@link ChannelBuffer} has not at least 5 bytes to read.
*/
private static int getEncryptedPacketLength(ChannelBuffer buffer, int offset) {
int packetLength = 0;
// SSLv3 or TLS - Check ContentType
boolean tls;
switch (buffer.getUnsignedByte(offset)) {
case 20: // change_cipher_spec
case 21: // alert
case 22: // handshake
case 23: // application_data
tls = true;
break;
default:
// SSLv2 or bad data
tls = false;
}
if (tls) {
// SSLv3 or TLS - Check ProtocolVersion
int majorVersion = buffer.getUnsignedByte(offset + 1);
if (majorVersion == 3) {
// SSLv3 or TLS
packetLength = (getShort(buffer, offset + 3) & 0xFFFF) + 5;
if (packetLength <= 5) {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
} else {
// Neither SSLv3 or TLSv1 (i.e. SSLv2 or bad data)
tls = false;
}
}
if (!tls) {
// SSLv2 or bad data - Check the version
boolean sslv2 = true;
int headerLength = (buffer.getUnsignedByte(offset) & 0x80) != 0 ? 2 : 3;
int majorVersion = buffer.getUnsignedByte(offset + headerLength + 1);
if (majorVersion == 2 || majorVersion == 3) {
// SSLv2
if (headerLength == 2) {
packetLength = (getShort(buffer, offset) & 0x7FFF) + 2;
} else {
packetLength = (getShort(buffer, offset) & 0x3FFF) + 3;
}
if (packetLength <= headerLength) {
sslv2 = false;
}
} else {
sslv2 = false;
}
if (!sslv2) {
return -1;
}
}
return packetLength;
}
@Override
protected Object decode(
final ChannelHandlerContext ctx, Channel channel, ChannelBuffer in) throws Exception {
final int startOffset = in.readerIndex();
final int endOffset = in.writerIndex();
int offset = startOffset;
int totalLength = 0;
// If we calculated the length of the current SSL record before, use that information.
if (packetLength > 0) {
if (endOffset - startOffset < packetLength) {
return null;
} else {
offset += packetLength;
totalLength = packetLength;
packetLength = 0;
}
}
boolean nonSslRecord = false;
while (totalLength < OpenSslEngine.MAX_ENCRYPTED_PACKET_LENGTH) {
final int readableBytes = endOffset - offset;
if (readableBytes < 5) {
break;
}
final int packetLength = getEncryptedPacketLength(in, offset);
if (packetLength == -1) {
nonSslRecord = true;
break;
}
assert packetLength > 0;
if (packetLength > readableBytes) {
// wait until the whole packet can be read
this.packetLength = packetLength;
break;
}
int newTotalLength = totalLength + packetLength;
if (newTotalLength > OpenSslEngine.MAX_ENCRYPTED_PACKET_LENGTH) {
// Don't read too much.
break;
}
// We have a whole packet.
// Increment the offset to handle the next packet.
offset += packetLength;
totalLength = newTotalLength;
}
ChannelBuffer unwrapped = null;
if (totalLength > 0) {
// The buffer contains one or more full SSL records.
// Slice out the whole packet so unwrap will only be called with complete packets.
// Also directly reset the packetLength. This is needed as unwrap(..) may trigger
// decode(...) again via:
// 1) unwrap(..) is called
// 2) wrap(...) is called from within unwrap(...)
// 3) wrap(...) calls unwrapLater(...)
// 4) unwrapLater(...) calls decode(...)
//
// See https://github.com/netty/netty/issues/1534
final ByteBuffer inNetBuf = in.toByteBuffer(in.readerIndex(), totalLength);
unwrapped = unwrap(ctx, channel, in, inNetBuf, totalLength);
assert !inNetBuf.hasRemaining() || engine.isInboundDone();
}
if (nonSslRecord) {
// Not an SSL/TLS packet
NotSslRecordException e = new NotSslRecordException(
"not an SSL/TLS record: " + ChannelBuffers.hexDump(in));
in.skipBytes(in.readableBytes());
if (closeOnSslException) {
// first trigger the exception and then close the channel
fireExceptionCaught(ctx, e);
Channels.close(ctx, future(channel));
// just return null as we closed the channel before, that
// will take care of cleanup etc
return null;
} else {
throw e;
}
}
return unwrapped;
}
/**
* Reads a big-endian short integer from the buffer. Please note that we do not use
* {@link ChannelBuffer#getShort(int)} because it might be a little-endian buffer.
*/
private static short getShort(ChannelBuffer buf, int offset) {
return (short) (buf.getByte(offset) << 8 | buf.getByte(offset + 1) & 0xFF);
}
private void wrap(ChannelHandlerContext context, Channel channel) throws SSLException {
ChannelBuffer msg;
ByteBuffer outNetBuf = bufferPool.acquireBuffer();
boolean success = true;
boolean offered = false;
boolean needsUnwrap = false;
PendingWrite pendingWrite = null;
try {
loop:
for (;;) {
// Acquire a lock to make sure unencrypted data is polled
// in order and their encrypted counterpart is offered in
// order.
pendingUnencryptedWritesLock.lock();
try {
pendingWrite = pendingUnencryptedWrites.peek();
if (pendingWrite == null) {
break;
}
ByteBuffer outAppBuf = pendingWrite.outAppBuf;
if (outAppBuf == null) {
// A write request with an empty buffer
pendingUnencryptedWrites.remove();
offerEncryptedWriteRequest(
new DownstreamMessageEvent(
channel, pendingWrite.future,
ChannelBuffers.EMPTY_BUFFER,
channel.getRemoteAddress()));
offered = true;
} else {
synchronized (handshakeLock) {
SSLEngineResult result = null;
try {
result = engine.wrap(outAppBuf, outNetBuf);
} finally {
if (!outAppBuf.hasRemaining()) {
pendingUnencryptedWrites.remove();
}
}
if (result.bytesProduced() > 0) {
outNetBuf.flip();
int remaining = outNetBuf.remaining();
msg = ctx.getChannel().getConfig().getBufferFactory().getBuffer(remaining);
// Transfer the bytes to the new ChannelBuffer using some safe method that will also
// work with "non" heap buffers
//
// See https://github.com/netty/netty/issues/329
msg.writeBytes(outNetBuf);
outNetBuf.clear();
ChannelFuture future;
if (pendingWrite.outAppBuf.hasRemaining()) {
// pendingWrite's future shouldn't be notified if
// only partial data is written.
future = succeededFuture(channel);
} else {
future = pendingWrite.future;
}
MessageEvent encryptedWrite = new DownstreamMessageEvent(
channel, future, msg, channel.getRemoteAddress());
offerEncryptedWriteRequest(encryptedWrite);
offered = true;
} else if (result.getStatus() == Status.CLOSED) {
// SSLEngine has been closed already.
// Any further write attempts should be denied.
success = false;
break;
} else {
final HandshakeStatus handshakeStatus = result.getHandshakeStatus();
handleRenegotiation(handshakeStatus);
switch (handshakeStatus) {
case NEED_WRAP:
if (outAppBuf.hasRemaining()) {
break;
} else {
break loop;
}
case NEED_UNWRAP:
needsUnwrap = true;
break loop;
case NEED_TASK:
runDelegatedTasks();
break;
case FINISHED:
setHandshakeSuccess(channel);
if (result.getStatus() == Status.CLOSED) {
success = false;
}
break loop;
case NOT_HANDSHAKING:
setHandshakeSuccessIfStillHandshaking(channel);
if (result.getStatus() == Status.CLOSED) {
success = false;
}
break loop;
default:
throw new IllegalStateException(
"Unknown handshake status: " +
handshakeStatus);
}
}
}
}
} finally {
pendingUnencryptedWritesLock.unlock();
}
}
} catch (SSLException e) {
success = false;
setHandshakeFailure(channel, e);
throw e;
} finally {
bufferPool.releaseBuffer(outNetBuf);
if (offered) {
flushPendingEncryptedWrites(context);
}
if (!success) {
IllegalStateException cause =
new IllegalStateException("SSLEngine already closed");
// Check if we had a pendingWrite in process, if so we need to also notify as otherwise
// the ChannelFuture will never get notified
if (pendingWrite != null) {
pendingWrite.future.setFailure(cause);
}
// Mark all remaining pending writes as failure if anything
// wrong happened before the write requests are wrapped.
// Please note that we do not call setFailure while a lock is
// acquired, to avoid a potential dead lock.
for (;;) {
pendingUnencryptedWritesLock.lock();
try {
pendingWrite = pendingUnencryptedWrites.poll();
if (pendingWrite == null) {
break;
}
} finally {
pendingUnencryptedWritesLock.unlock();
}
pendingWrite.future.setFailure(cause);
}
}
}
if (needsUnwrap) {
unwrapNonAppData(ctx, channel);
}
}
private void offerEncryptedWriteRequest(MessageEvent encryptedWrite) {
final boolean locked = pendingEncryptedWritesLock.tryLock();
try {
pendingEncryptedWrites.add(encryptedWrite);
} finally {
if (locked) {
pendingEncryptedWritesLock.unlock();
}
}
}
private void flushPendingEncryptedWrites(ChannelHandlerContext ctx) {
while (!pendingEncryptedWrites.isEmpty()) {
// Avoid possible dead lock and data integrity issue
// which is caused by cross communication between more than one channel
// in the same VM.
if (!pendingEncryptedWritesLock.tryLock()) {
return;
}
try {
MessageEvent e;
while ((e = pendingEncryptedWrites.poll()) != null) {
ctx.sendDownstream(e);
}
} finally {
pendingEncryptedWritesLock.unlock();
}
// Other thread might have added more elements at this point, so we loop again if the queue got unempty.
}
}
private ChannelFuture wrapNonAppData(ChannelHandlerContext ctx, Channel channel) throws SSLException {
ChannelFuture future = null;
ByteBuffer outNetBuf = bufferPool.acquireBuffer();
SSLEngineResult result;
try {
for (;;) {
synchronized (handshakeLock) {
result = engine.wrap(EMPTY_BUFFER, outNetBuf);
}
if (result.bytesProduced() > 0) {
outNetBuf.flip();
ChannelBuffer msg =
ctx.getChannel().getConfig().getBufferFactory().getBuffer(outNetBuf.remaining());
// Transfer the bytes to the new ChannelBuffer using some safe method that will also
// work with "non" heap buffers
//
// See https://github.com/netty/netty/issues/329
msg.writeBytes(outNetBuf);
outNetBuf.clear();
future = future(channel);
future.addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future)
throws Exception {
if (future.getCause() instanceof ClosedChannelException) {
synchronized (ignoreClosedChannelExceptionLock) {
ignoreClosedChannelException ++;
}
}
}
});
write(ctx, future, msg);
}
final HandshakeStatus handshakeStatus = result.getHandshakeStatus();
handleRenegotiation(handshakeStatus);
switch (handshakeStatus) {
case FINISHED:
setHandshakeSuccess(channel);
runDelegatedTasks();
break;
case NEED_TASK:
runDelegatedTasks();
break;
case NEED_UNWRAP:
if (!Thread.holdsLock(handshakeLock)) {
// unwrap shouldn't be called when this method was
// called by unwrap - unwrap will keep running after
// this method returns.
unwrapNonAppData(ctx, channel);
}
break;
case NOT_HANDSHAKING:
if (setHandshakeSuccessIfStillHandshaking(channel)) {
runDelegatedTasks();
}
break;
case NEED_WRAP:
break;
default:
throw new IllegalStateException(
"Unexpected handshake status: " + handshakeStatus);
}
if (result.bytesProduced() == 0) {
break;
}
}
} catch (SSLException e) {
setHandshakeFailure(channel, e);
throw e;
} finally {
bufferPool.releaseBuffer(outNetBuf);
}
if (future == null) {
future = succeededFuture(channel);
}
return future;
}
/**
* Calls {@link SSLEngine#unwrap(ByteBuffer, ByteBuffer)} with an empty buffer to handle handshakes, etc.
*/
private void unwrapNonAppData(ChannelHandlerContext ctx, Channel channel) throws SSLException {
unwrap(ctx, channel, ChannelBuffers.EMPTY_BUFFER, EMPTY_BUFFER, -1);
}
/**
* Unwraps inbound SSL records.
*/
private ChannelBuffer unwrap(
ChannelHandlerContext ctx, Channel channel,
ChannelBuffer nettyInNetBuf, ByteBuffer nioInNetBuf,
int initialNettyOutAppBufCapacity) throws SSLException {
final int nettyInNetBufStartOffset = nettyInNetBuf.readerIndex();
final int nioInNetBufStartOffset = nioInNetBuf.position();
final ByteBuffer nioOutAppBuf = bufferPool.acquireBuffer();
ChannelBuffer nettyOutAppBuf = null;
try {
boolean needsWrap = false;
for (;;) {
SSLEngineResult result;
boolean needsHandshake = false;
synchronized (handshakeLock) {
if (!handshaken && !handshaking &&
!engine.getUseClientMode() &&
!engine.isInboundDone() && !engine.isOutboundDone()) {
needsHandshake = true;
}
}
if (needsHandshake) {
handshake();
}
synchronized (handshakeLock) {
// Decrypt at least one record in the inbound network buffer.
// It is impossible to consume no record here because we made sure the inbound network buffer
// always contain at least one record in decode(). Therefore, if SSLEngine.unwrap() returns
// BUFFER_OVERFLOW, it is always resolved by retrying after emptying the application buffer.
for (;;) {
final int outAppBufSize = engine.getSession().getApplicationBufferSize();
final ByteBuffer outAppBuf;
if (nioOutAppBuf.capacity() < outAppBufSize) {
// SSLEngine wants a buffer larger than what the pool can provide.
// Allocate a temporary heap buffer.
outAppBuf = ByteBuffer.allocate(outAppBufSize);
} else {
outAppBuf = nioOutAppBuf;
}
try {
result = engine.unwrap(nioInNetBuf, outAppBuf);
switch (result.getStatus()) {
case CLOSED:
// notify about the CLOSED state of the SSLEngine. See #137
sslEngineCloseFuture.setClosed();
break;
case BUFFER_OVERFLOW:
// Flush the unwrapped data in the outAppBuf into frame and try again.
// See the finally block.
continue;
}
break;
} finally {
outAppBuf.flip();
// Sync the offset of the inbound buffer.
nettyInNetBuf.readerIndex(
nettyInNetBufStartOffset + nioInNetBuf.position() - nioInNetBufStartOffset);
// Copy the unwrapped data into a smaller buffer.
if (outAppBuf.hasRemaining()) {
if (nettyOutAppBuf == null) {
ChannelBufferFactory factory = ctx.getChannel().getConfig().getBufferFactory();
nettyOutAppBuf = factory.getBuffer(initialNettyOutAppBufCapacity);
}
nettyOutAppBuf.writeBytes(outAppBuf);
}
outAppBuf.clear();
}
}
final HandshakeStatus handshakeStatus = result.getHandshakeStatus();
handleRenegotiation(handshakeStatus);
switch (handshakeStatus) {
case NEED_UNWRAP:
break;
case NEED_WRAP:
wrapNonAppData(ctx, channel);
break;
case NEED_TASK:
runDelegatedTasks();
break;
case FINISHED:
setHandshakeSuccess(channel);
needsWrap = true;
continue;
case NOT_HANDSHAKING:
if (setHandshakeSuccessIfStillHandshaking(channel)) {
needsWrap = true;
continue;
}
if (writeBeforeHandshakeDone) {
// We need to call wrap(...) in case there was a flush done before the handshake completed.
//
// See https://github.com/netty/netty/pull/2437
writeBeforeHandshakeDone = false;
needsWrap = true;
}
break;
default:
throw new IllegalStateException(
"Unknown handshake status: " + handshakeStatus);
}
if (result.getStatus() == Status.BUFFER_UNDERFLOW ||
result.bytesConsumed() == 0 && result.bytesProduced() == 0) {
break;
}
}
}
if (needsWrap) {
// wrap() acquires pendingUnencryptedWrites first and then
// handshakeLock. If handshakeLock is already hold by the
// current thread, calling wrap() will lead to a dead lock
// i.e. pendingUnencryptedWrites -> handshakeLock vs.
// handshakeLock -> pendingUnencryptedLock -> handshakeLock
//
// There is also the same issue between pendingEncryptedWrites
// and pendingUnencryptedWrites.
if (!Thread.holdsLock(handshakeLock) && !pendingEncryptedWritesLock.isHeldByCurrentThread()) {
wrap(ctx, channel);
}
}
} catch (SSLException e) {
setHandshakeFailure(channel, e);
throw e;
} finally {
bufferPool.releaseBuffer(nioOutAppBuf);
}
if (nettyOutAppBuf != null && nettyOutAppBuf.readable()) {
return nettyOutAppBuf;
} else {
return null;
}
}
private void handleRenegotiation(HandshakeStatus handshakeStatus) {
synchronized (handshakeLock) {
if (handshakeStatus == HandshakeStatus.NOT_HANDSHAKING ||
handshakeStatus == HandshakeStatus.FINISHED) {
// Not handshaking
return;
}
if (!handshaken) {
// Not renegotiation
return;
}
final boolean renegotiate;
if (handshaking) {
// Renegotiation in progress or failed already.
// i.e. Renegotiation check has been done already below.
return;
}
if (engine.isInboundDone() || engine.isOutboundDone()) {
// Not handshaking but closing.
return;
}
if (isEnableRenegotiation()) {
// Continue renegotiation.
renegotiate = true;
} else {
// Do not renegotiate.
renegotiate = false;
// Prevent reentrance of this method.
handshaking = true;
}
if (renegotiate) {
// Renegotiate.
handshake();
} else {
// Raise an exception.
fireExceptionCaught(
ctx, new SSLException(
"renegotiation attempted by peer; " +
"closing the connection"));
// Close the connection to stop renegotiation.
Channels.close(ctx, succeededFuture(ctx.getChannel()));
}
}
}
/**
* Fetches all delegated tasks from the {@link SSLEngine} and runs them via the {@link #delegatedTaskExecutor}.
* If the {@link #delegatedTaskExecutor} is {@link ImmediateExecutor}, just call {@link Runnable#run()} directly
* instead of using {@link Executor#execute(Runnable)}. Otherwise, run the tasks via
* the {@link #delegatedTaskExecutor} and wait until the tasks are finished.
*/
private void runDelegatedTasks() {
if (delegatedTaskExecutor == ImmediateExecutor.INSTANCE) {
for (;;) {
final Runnable task;
synchronized (handshakeLock) {
task = engine.getDelegatedTask();
}
if (task == null) {
break;
}
delegatedTaskExecutor.execute(task);
}
} else {
final List<Runnable> tasks = new ArrayList<Runnable>(2);
for (;;) {
final Runnable task;
synchronized (handshakeLock) {
task = engine.getDelegatedTask();
}
if (task == null) {
break;
}
tasks.add(task);
}
if (tasks.isEmpty()) {
return;
}
final CountDownLatch latch = new CountDownLatch(1);
delegatedTaskExecutor.execute(new Runnable() {
public void run() {
try {
for (Runnable task: tasks) {
task.run();
}
} catch (Exception e) {
fireExceptionCaught(ctx, e);
} finally {
latch.countDown();
}
}
});
boolean interrupted = false;
while (latch.getCount() != 0) {
try {
latch.await();
} catch (InterruptedException e) {
// Interrupt later.
interrupted = true;
}
}
if (interrupted) {
Thread.currentThread().interrupt();
}
}
}
/**
* Works around some Android {@link SSLEngine} implementations that skip {@link HandshakeStatus#FINISHED} and
* go straight into {@link HandshakeStatus#NOT_HANDSHAKING} when handshake is finished.
*
* @return {@code true} if and only if the workaround has been applied and thus {@link #handshakeFuture} has been
* marked as success by this method
*/
private boolean setHandshakeSuccessIfStillHandshaking(Channel channel) {
if (handshaking && !handshakeFuture.isDone()) {
setHandshakeSuccess(channel);
return true;
}
return false;
}
private void setHandshakeSuccess(Channel channel) {
synchronized (handshakeLock) {
handshaking = false;
handshaken = true;
if (handshakeFuture == null) {
handshakeFuture = future(channel);
}
cancelHandshakeTimeout();
}
if (logger.isDebugEnabled()) {
logger.debug(channel + " HANDSHAKEN: " + engine.getSession().getCipherSuite());
}
handshakeFuture.setSuccess();
}
private void setHandshakeFailure(Channel channel, SSLException cause) {
synchronized (handshakeLock) {
if (!handshaking) {
return;
}
handshaking = false;
handshaken = false;
if (handshakeFuture == null) {
handshakeFuture = future(channel);
}
// cancel the timeout now
cancelHandshakeTimeout();
// Release all resources such as internal buffers that SSLEngine
// is managing.
engine.closeOutbound();
try {
engine.closeInbound();
} catch (SSLException e) {
if (logger.isDebugEnabled()) {
logger.debug(
"SSLEngine.closeInbound() raised an exception after " +
"a handshake failure.", e);
}
}
}
handshakeFuture.setFailure(cause);
if (closeOnSslException) {
Channels.close(ctx, future(channel));
}
}
private void closeOutboundAndChannel(
final ChannelHandlerContext context, final ChannelStateEvent e) {
if (!e.getChannel().isConnected()) {
context.sendDownstream(e);
return;
}
// Ensure that the tear-down logic beyond this point is never invoked concurrently nor multiple times.
if (!CLOSED_OUTBOUND_AND_CHANNEL_UPDATER.compareAndSet(this, 0, 1)) {
// The other thread called this method already, and thus the connection will be closed eventually.
// So, just wait until the connection is closed, and then forward the event so that the sink handles
// the duplicate close attempt.
e.getChannel().getCloseFuture().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
context.sendDownstream(e);
}
});
return;
}
boolean passthrough = true;
try {
try {
unwrapNonAppData(ctx, e.getChannel());
} catch (SSLException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to unwrap before sending a close_notify message", ex);
}
}
if (!engine.isOutboundDone()) {
if (SENT_CLOSE_NOTIFY_UPDATER.compareAndSet(this, 0, 1)) {
engine.closeOutbound();
try {
ChannelFuture closeNotifyFuture = wrapNonAppData(context, e.getChannel());
closeNotifyFuture.addListener(
new ClosingChannelFutureListener(context, e));
passthrough = false;
} catch (SSLException ex) {
if (logger.isDebugEnabled()) {
logger.debug("Failed to encode a close_notify message", ex);
}
}
}
}
} finally {
if (passthrough) {
context.sendDownstream(e);
}
}
}
private static final class PendingWrite {
final ChannelFuture future;
final ByteBuffer outAppBuf;
PendingWrite(ChannelFuture future, ByteBuffer outAppBuf) {
this.future = future;
this.outAppBuf = outAppBuf;
}
}
private static final class ClosingChannelFutureListener implements ChannelFutureListener {
private final ChannelHandlerContext context;
private final ChannelStateEvent e;
ClosingChannelFutureListener(
ChannelHandlerContext context, ChannelStateEvent e) {
this.context = context;
this.e = e;
}
public void operationComplete(ChannelFuture closeNotifyFuture) throws Exception {
if (!(closeNotifyFuture.getCause() instanceof ClosedChannelException)) {
Channels.close(context, e.getFuture());
} else {
e.getFuture().setSuccess();
}
}
}
@Override
public void beforeAdd(ChannelHandlerContext ctx) throws Exception {
super.beforeAdd(ctx);
this.ctx = ctx;
}
/**
* Fail all pending writes which we were not able to flush out
*/
@Override
public void afterRemove(ChannelHandlerContext ctx) throws Exception {
closeEngine();
// there is no need for synchronization here as we do not receive downstream events anymore
Throwable cause = null;
for (;;) {
PendingWrite pw = pendingUnencryptedWrites.poll();
if (pw == null) {
break;
}
if (cause == null) {
cause = new IOException("Unable to write data");
}
pw.future.setFailure(cause);
}
for (;;) {
MessageEvent ev = pendingEncryptedWrites.poll();
if (ev == null) {
break;
}
if (cause == null) {
cause = new IOException("Unable to write data");
}
ev.getFuture().setFailure(cause);
}
if (cause != null) {
fireExceptionCaughtLater(ctx, cause);
}
}
/**
* Calls {@link #handshake()} once the {@link Channel} is connected
*/
@Override
public void channelConnected(final ChannelHandlerContext ctx, final ChannelStateEvent e) throws Exception {
if (issueHandshake) {
// issue and handshake and add a listener to it which will fire an exception event if
// an exception was thrown while doing the handshake
handshake().addListener(new ChannelFutureListener() {
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
// Send the event upstream after the handshake was completed without an error.
//
// See https://github.com/netty/netty/issues/358
ctx.sendUpstream(e);
}
}
});
} else {
super.channelConnected(ctx, e);
}
}
/**
* Loop over all the pending writes and fail them.
*
* See <a href="https://github.com/netty/netty/issues/305">#305</a> for more details.
*/
@Override
public void channelClosed(final ChannelHandlerContext ctx, ChannelStateEvent e) throws Exception {
// Move the fail of the writes to the IO-Thread to prevent possible deadlock
// See https://github.com/netty/netty/issues/989
ctx.getPipeline().execute(new Runnable() {
public void run() {
if (!pendingUnencryptedWritesLock.tryLock()) {
return;
}
Throwable cause = null;
try {
for (;;) {
PendingWrite pw = pendingUnencryptedWrites.poll();
if (pw == null) {
break;
}
if (cause == null) {
cause = new ClosedChannelException();
}
pw.future.setFailure(cause);
}
for (;;) {
MessageEvent ev = pendingEncryptedWrites.poll();
if (ev == null) {
break;
}
if (cause == null) {
cause = new ClosedChannelException();
}
ev.getFuture().setFailure(cause);
}
} finally {
pendingUnencryptedWritesLock.unlock();
}
if (cause != null) {
fireExceptionCaught(ctx, cause);
}
}
});
super.channelClosed(ctx, e);
}
private final class SSLEngineInboundCloseFuture extends DefaultChannelFuture {
SSLEngineInboundCloseFuture() {
super(null, true);
}
void setClosed() {
super.setSuccess();
}
@Override
public Channel getChannel() {
if (ctx == null) {
// Maybe we should better throw an IllegalStateException() ?
return null;
} else {
return ctx.getChannel();
}
}
@Override
public boolean setSuccess() {
return false;
}
@Override
public boolean setFailure(Throwable cause) {
return false;
}
}
}
|
2139_0
|
crossvul
|
java
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
java
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import java.util.Arrays;
import javax.crypto.BadPaddingException;
import javax.crypto.ShortBufferException;
import com.southernstorm.noise.crypto.GHASH;
import com.southernstorm.noise.crypto.RijndaelAES;
/**
* Fallback implementation of "AESGCM" on platforms where
* the JCA/JCE does not have a suitable GCM or CTR provider.
*/
class AESGCMFallbackCipherState implements CipherState {
private RijndaelAES aes;
private long n;
private byte[] iv;
private byte[] enciv;
private byte[] hashKey;
private GHASH ghash;
private boolean haskey;
/**
* Constructs a new cipher state for the "AESGCM" algorithm.
*/
public AESGCMFallbackCipherState()
{
aes = new RijndaelAES();
n = 0;
iv = new byte [16];
enciv = new byte [16];
hashKey = new byte [16];
ghash = new GHASH();
haskey = false;
}
@Override
public void destroy() {
aes.destroy();
ghash.destroy();
Noise.destroy(hashKey);
Noise.destroy(iv);
Noise.destroy(enciv);
}
@Override
public String getCipherName() {
return "AESGCM";
}
@Override
public int getKeyLength() {
return 32;
}
@Override
public int getMACLength() {
return haskey ? 16 : 0;
}
@Override
public void initializeKey(byte[] key, int offset) {
// Set up the AES key.
aes.setupEnc(key, offset, 256);
haskey = true;
// Generate the hashing key by encrypting a block of zeroes.
Arrays.fill(hashKey, (byte)0);
aes.encrypt(hashKey, 0, hashKey, 0);
ghash.reset(hashKey, 0);
// Reset the nonce.
n = 0;
}
@Override
public boolean hasKey() {
return haskey;
}
/**
* Set up to encrypt or decrypt the next packet.
*
* @param ad The associated data for the packet.
*/
private void setup(byte[] ad)
{
// Check for nonce wrap-around.
if (n == -1L)
throw new IllegalStateException("Nonce has wrapped around");
// Format the counter/IV block.
iv[0] = 0;
iv[1] = 0;
iv[2] = 0;
iv[3] = 0;
iv[4] = (byte)(n >> 56);
iv[5] = (byte)(n >> 48);
iv[6] = (byte)(n >> 40);
iv[7] = (byte)(n >> 32);
iv[8] = (byte)(n >> 24);
iv[9] = (byte)(n >> 16);
iv[10] = (byte)(n >> 8);
iv[11] = (byte)n;
iv[12] = 0;
iv[13] = 0;
iv[14] = 0;
iv[15] = 1;
++n;
// Encrypt a block of zeroes to generate the hash key to XOR
// the GHASH tag with at the end of the encrypt/decrypt operation.
Arrays.fill(hashKey, (byte)0);
aes.encrypt(iv, 0, hashKey, 0);
// Initialize the GHASH with the associated data value.
ghash.reset();
if (ad != null) {
ghash.update(ad, 0, ad.length);
ghash.pad();
}
}
/**
* Encrypts a block in CTR mode.
*
* @param plaintext The plaintext to encrypt.
* @param plaintextOffset Offset of the first plaintext byte.
* @param ciphertext The resulting ciphertext.
* @param ciphertextOffset Offset of the first ciphertext byte.
* @param length The number of bytes to encrypt.
*
* This function can also be used to decrypt.
*/
private void encryptCTR(byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length)
{
while (length > 0) {
// Increment the IV and encrypt it to get the next keystream block.
if (++(iv[15]) == 0)
if (++(iv[14]) == 0)
if (++(iv[13]) == 0)
++(iv[12]);
aes.encrypt(iv, 0, enciv, 0);
// XOR the keystream block with the plaintext to create the ciphertext.
int temp = length;
if (temp > 16)
temp = 16;
for (int index = 0; index < temp; ++index)
ciphertext[ciphertextOffset + index] = (byte)(plaintext[plaintextOffset + index] ^ enciv[index]);
// Advance to the next block.
plaintextOffset += temp;
ciphertextOffset += temp;
length -= temp;
}
}
@Override
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length)
throws ShortBufferException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (!haskey) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
setup(ad);
encryptCTR(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
ghash.update(ciphertext, ciphertextOffset, length);
ghash.pad(ad != null ? ad.length : 0, length);
ghash.finish(ciphertext, ciphertextOffset + length, 16);
for (int index = 0; index < 16; ++index)
ciphertext[ciphertextOffset + length + index] ^= hashKey[index];
return length + 16;
}
@Override
public int decryptWithAd(byte[] ad, byte[] ciphertext,
int ciphertextOffset, byte[] plaintext, int plaintextOffset,
int length) throws ShortBufferException, BadPaddingException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (length > space)
throw new ShortBufferException();
if (plaintextOffset > plaintext.length)
space = 0;
else
space = plaintext.length - plaintextOffset;
if (!haskey) {
// The key is not set yet - return the ciphertext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length);
return length;
}
if (length < 16)
Noise.throwBadTagException();
int dataLen = length - 16;
if (dataLen > space)
throw new ShortBufferException();
setup(ad);
ghash.update(ciphertext, ciphertextOffset, dataLen);
ghash.pad(ad != null ? ad.length : 0, dataLen);
ghash.finish(enciv, 0, 16);
int temp = 0;
for (int index = 0; index < 16; ++index)
temp |= (hashKey[index] ^ enciv[index] ^ ciphertext[ciphertextOffset + dataLen + index]);
if ((temp & 0xFF) != 0)
Noise.throwBadTagException();
encryptCTR(ciphertext, ciphertextOffset, plaintext, plaintextOffset, dataLen);
return dataLen;
}
@Override
public CipherState fork(byte[] key, int offset) {
CipherState cipher;
cipher = new AESGCMFallbackCipherState();
cipher.initializeKey(key, offset);
return cipher;
}
@Override
public void setNonce(long nonce) {
n = nonce;
}
}
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import java.util.Arrays;
import javax.crypto.BadPaddingException;
import javax.crypto.ShortBufferException;
import com.southernstorm.noise.crypto.GHASH;
import com.southernstorm.noise.crypto.RijndaelAES;
/**
* Fallback implementation of "AESGCM" on platforms where
* the JCA/JCE does not have a suitable GCM or CTR provider.
*/
class AESGCMFallbackCipherState implements CipherState {
private RijndaelAES aes;
private long n;
private byte[] iv;
private byte[] enciv;
private byte[] hashKey;
private GHASH ghash;
private boolean haskey;
/**
* Constructs a new cipher state for the "AESGCM" algorithm.
*/
public AESGCMFallbackCipherState()
{
aes = new RijndaelAES();
n = 0;
iv = new byte [16];
enciv = new byte [16];
hashKey = new byte [16];
ghash = new GHASH();
haskey = false;
}
@Override
public void destroy() {
aes.destroy();
ghash.destroy();
Noise.destroy(hashKey);
Noise.destroy(iv);
Noise.destroy(enciv);
}
@Override
public String getCipherName() {
return "AESGCM";
}
@Override
public int getKeyLength() {
return 32;
}
@Override
public int getMACLength() {
return haskey ? 16 : 0;
}
@Override
public void initializeKey(byte[] key, int offset) {
// Set up the AES key.
aes.setupEnc(key, offset, 256);
haskey = true;
// Generate the hashing key by encrypting a block of zeroes.
Arrays.fill(hashKey, (byte)0);
aes.encrypt(hashKey, 0, hashKey, 0);
ghash.reset(hashKey, 0);
// Reset the nonce.
n = 0;
}
@Override
public boolean hasKey() {
return haskey;
}
/**
* Set up to encrypt or decrypt the next packet.
*
* @param ad The associated data for the packet.
*/
private void setup(byte[] ad)
{
// Check for nonce wrap-around.
if (n == -1L)
throw new IllegalStateException("Nonce has wrapped around");
// Format the counter/IV block.
iv[0] = 0;
iv[1] = 0;
iv[2] = 0;
iv[3] = 0;
iv[4] = (byte)(n >> 56);
iv[5] = (byte)(n >> 48);
iv[6] = (byte)(n >> 40);
iv[7] = (byte)(n >> 32);
iv[8] = (byte)(n >> 24);
iv[9] = (byte)(n >> 16);
iv[10] = (byte)(n >> 8);
iv[11] = (byte)n;
iv[12] = 0;
iv[13] = 0;
iv[14] = 0;
iv[15] = 1;
++n;
// Encrypt a block of zeroes to generate the hash key to XOR
// the GHASH tag with at the end of the encrypt/decrypt operation.
Arrays.fill(hashKey, (byte)0);
aes.encrypt(iv, 0, hashKey, 0);
// Initialize the GHASH with the associated data value.
ghash.reset();
if (ad != null) {
ghash.update(ad, 0, ad.length);
ghash.pad();
}
}
/**
* Encrypts a block in CTR mode.
*
* @param plaintext The plaintext to encrypt.
* @param plaintextOffset Offset of the first plaintext byte.
* @param ciphertext The resulting ciphertext.
* @param ciphertextOffset Offset of the first ciphertext byte.
* @param length The number of bytes to encrypt.
*
* This function can also be used to decrypt.
*/
private void encryptCTR(byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length)
{
while (length > 0) {
// Increment the IV and encrypt it to get the next keystream block.
if (++(iv[15]) == 0)
if (++(iv[14]) == 0)
if (++(iv[13]) == 0)
++(iv[12]);
aes.encrypt(iv, 0, enciv, 0);
// XOR the keystream block with the plaintext to create the ciphertext.
int temp = length;
if (temp > 16)
temp = 16;
for (int index = 0; index < temp; ++index)
ciphertext[ciphertextOffset + index] = (byte)(plaintext[plaintextOffset + index] ^ enciv[index]);
// Advance to the next block.
plaintextOffset += temp;
ciphertextOffset += temp;
length -= temp;
}
}
@Override
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length)
throws ShortBufferException {
int space;
if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length)
throw new IllegalArgumentException();
if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length)
throw new IllegalArgumentException();
space = ciphertext.length - ciphertextOffset;
if (!haskey) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
setup(ad);
encryptCTR(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
ghash.update(ciphertext, ciphertextOffset, length);
ghash.pad(ad != null ? ad.length : 0, length);
ghash.finish(ciphertext, ciphertextOffset + length, 16);
for (int index = 0; index < 16; ++index)
ciphertext[ciphertextOffset + length + index] ^= hashKey[index];
return length + 16;
}
@Override
public int decryptWithAd(byte[] ad, byte[] ciphertext,
int ciphertextOffset, byte[] plaintext, int plaintextOffset,
int length) throws ShortBufferException, BadPaddingException {
int space;
if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length)
throw new IllegalArgumentException();
else
space = ciphertext.length - ciphertextOffset;
if (length > space)
throw new ShortBufferException();
if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length)
throw new IllegalArgumentException();
space = plaintext.length - plaintextOffset;
if (!haskey) {
// The key is not set yet - return the ciphertext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length);
return length;
}
if (length < 16)
Noise.throwBadTagException();
int dataLen = length - 16;
if (dataLen > space)
throw new ShortBufferException();
setup(ad);
ghash.update(ciphertext, ciphertextOffset, dataLen);
ghash.pad(ad != null ? ad.length : 0, dataLen);
ghash.finish(enciv, 0, 16);
int temp = 0;
for (int index = 0; index < 16; ++index)
temp |= (hashKey[index] ^ enciv[index] ^ ciphertext[ciphertextOffset + dataLen + index]);
if ((temp & 0xFF) != 0)
Noise.throwBadTagException();
encryptCTR(ciphertext, ciphertextOffset, plaintext, plaintextOffset, dataLen);
return dataLen;
}
@Override
public CipherState fork(byte[] key, int offset) {
CipherState cipher;
cipher = new AESGCMFallbackCipherState();
cipher.initializeKey(key, offset);
return cipher;
}
@Override
public void setNonce(long nonce) {
n = nonce;
}
}
|
4291_1
|
crossvul
|
java
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
java
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import javax.crypto.BadPaddingException;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.ShortBufferException;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import com.southernstorm.noise.crypto.GHASH;
/**
* Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding"
* transformation from JCA/JCE.
*
* This class is used on platforms that don't have "AES/GCM/NoPadding",
* but which do have the older "AES/CTR/NoPadding".
*/
class AESGCMOnCtrCipherState implements CipherState {
private Cipher cipher;
private SecretKeySpec keySpec;
private long n;
private byte[] iv;
private byte[] hashKey;
private GHASH ghash;
/**
* Constructs a new cipher state for the "AESGCM" algorithm.
*
* @throws NoSuchAlgorithmException The system does not have a
* provider for this algorithm.
*/
public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException
{
try {
cipher = Cipher.getInstance("AES/CTR/NoPadding");
} catch (NoSuchPaddingException e) {
// AES/CTR is available, but not the unpadded version? Huh?
throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e);
}
keySpec = null;
n = 0;
iv = new byte [16];
hashKey = new byte [16];
ghash = new GHASH();
// Try to set a 256-bit key on the cipher. Some JCE's are
// configured to disallow 256-bit AES if an extra policy
// file has not been installed.
try {
SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES");
IvParameterSpec params = new IvParameterSpec(iv);
cipher.init(Cipher.ENCRYPT_MODE, spec, params);
} catch (InvalidKeyException e) {
throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e);
} catch (InvalidAlgorithmParameterException e) {
throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e);
}
}
@Override
public void destroy() {
// There doesn't seem to be a standard API to clean out a Cipher.
// So we instead set the key and IV to all-zeroes to hopefully
// destroy the sensitive data in the cipher instance.
ghash.destroy();
Noise.destroy(hashKey);
Noise.destroy(iv);
keySpec = new SecretKeySpec(new byte [32], "AES");
IvParameterSpec params = new IvParameterSpec(iv);
try {
cipher.init(Cipher.ENCRYPT_MODE, keySpec, params);
} catch (InvalidKeyException e) {
// Shouldn't happen.
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
}
}
@Override
public String getCipherName() {
return "AESGCM";
}
@Override
public int getKeyLength() {
return 32;
}
@Override
public int getMACLength() {
return keySpec != null ? 16 : 0;
}
@Override
public void initializeKey(byte[] key, int offset) {
// Set the encryption key.
keySpec = new SecretKeySpec(key, offset, 32, "AES");
// Generate the hashing key by encrypting a block of zeroes.
Arrays.fill(iv, (byte)0);
Arrays.fill(hashKey, (byte)0);
try {
cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv));
} catch (InvalidKeyException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
try {
int result = cipher.update(hashKey, 0, 16, hashKey, 0);
cipher.doFinal(hashKey, result);
} catch (ShortBufferException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (IllegalBlockSizeException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (BadPaddingException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
ghash.reset(hashKey, 0);
// Reset the nonce.
n = 0;
}
@Override
public boolean hasKey() {
return keySpec != null;
}
/**
* Set up to encrypt or decrypt the next packet.
*
* @param ad The associated data for the packet.
*/
private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException
{
// Check for nonce wrap-around.
if (n == -1L)
throw new IllegalStateException("Nonce has wrapped around");
// Format the counter/IV block for AES/CTR/NoPadding.
iv[0] = 0;
iv[1] = 0;
iv[2] = 0;
iv[3] = 0;
iv[4] = (byte)(n >> 56);
iv[5] = (byte)(n >> 48);
iv[6] = (byte)(n >> 40);
iv[7] = (byte)(n >> 32);
iv[8] = (byte)(n >> 24);
iv[9] = (byte)(n >> 16);
iv[10] = (byte)(n >> 8);
iv[11] = (byte)n;
iv[12] = 0;
iv[13] = 0;
iv[14] = 0;
iv[15] = 1;
++n;
// Initialize the CTR mode cipher with the key and IV.
cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv));
// Encrypt a block of zeroes to generate the hash key to XOR
// the GHASH tag with at the end of the encrypt/decrypt operation.
Arrays.fill(hashKey, (byte)0);
try {
cipher.update(hashKey, 0, 16, hashKey, 0);
} catch (ShortBufferException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
// Initialize the GHASH with the associated data value.
ghash.reset();
if (ad != null) {
ghash.update(ad, 0, ad.length);
ghash.pad();
}
}
@Override
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length)
throws ShortBufferException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (keySpec == null) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
try {
setup(ad);
int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset);
cipher.doFinal(ciphertext, ciphertextOffset + result);
} catch (InvalidKeyException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (IllegalBlockSizeException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (BadPaddingException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
ghash.update(ciphertext, ciphertextOffset, length);
ghash.pad(ad != null ? ad.length : 0, length);
ghash.finish(ciphertext, ciphertextOffset + length, 16);
for (int index = 0; index < 16; ++index)
ciphertext[ciphertextOffset + length + index] ^= hashKey[index];
return length + 16;
}
@Override
public int decryptWithAd(byte[] ad, byte[] ciphertext,
int ciphertextOffset, byte[] plaintext, int plaintextOffset,
int length) throws ShortBufferException, BadPaddingException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (length > space)
throw new ShortBufferException();
if (plaintextOffset > plaintext.length)
space = 0;
else
space = plaintext.length - plaintextOffset;
if (keySpec == null) {
// The key is not set yet - return the ciphertext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length);
return length;
}
if (length < 16)
Noise.throwBadTagException();
int dataLen = length - 16;
if (dataLen > space)
throw new ShortBufferException();
try {
setup(ad);
} catch (InvalidKeyException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
ghash.update(ciphertext, ciphertextOffset, dataLen);
ghash.pad(ad != null ? ad.length : 0, dataLen);
ghash.finish(iv, 0, 16);
int temp = 0;
for (int index = 0; index < 16; ++index)
temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]);
if ((temp & 0xFF) != 0)
Noise.throwBadTagException();
try {
int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset);
cipher.doFinal(plaintext, plaintextOffset + result);
} catch (IllegalBlockSizeException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (BadPaddingException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
return dataLen;
}
@Override
public CipherState fork(byte[] key, int offset) {
CipherState cipher;
try {
cipher = new AESGCMOnCtrCipherState();
} catch (NoSuchAlgorithmException e) {
// Shouldn't happen.
return null;
}
cipher.initializeKey(key, offset);
return cipher;
}
@Override
public void setNonce(long nonce) {
n = nonce;
}
}
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import java.security.InvalidAlgorithmParameterException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
import javax.crypto.BadPaddingException;
import javax.crypto.Cipher;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import javax.crypto.ShortBufferException;
import javax.crypto.spec.IvParameterSpec;
import javax.crypto.spec.SecretKeySpec;
import com.southernstorm.noise.crypto.GHASH;
/**
* Emulates the "AESGCM" cipher for Noise using the "AES/CTR/NoPadding"
* transformation from JCA/JCE.
*
* This class is used on platforms that don't have "AES/GCM/NoPadding",
* but which do have the older "AES/CTR/NoPadding".
*/
class AESGCMOnCtrCipherState implements CipherState {
private Cipher cipher;
private SecretKeySpec keySpec;
private long n;
private byte[] iv;
private byte[] hashKey;
private GHASH ghash;
/**
* Constructs a new cipher state for the "AESGCM" algorithm.
*
* @throws NoSuchAlgorithmException The system does not have a
* provider for this algorithm.
*/
public AESGCMOnCtrCipherState() throws NoSuchAlgorithmException
{
try {
cipher = Cipher.getInstance("AES/CTR/NoPadding");
} catch (NoSuchPaddingException e) {
// AES/CTR is available, but not the unpadded version? Huh?
throw new NoSuchAlgorithmException("AES/CTR/NoPadding not available", e);
}
keySpec = null;
n = 0;
iv = new byte [16];
hashKey = new byte [16];
ghash = new GHASH();
// Try to set a 256-bit key on the cipher. Some JCE's are
// configured to disallow 256-bit AES if an extra policy
// file has not been installed.
try {
SecretKeySpec spec = new SecretKeySpec(new byte [32], "AES");
IvParameterSpec params = new IvParameterSpec(iv);
cipher.init(Cipher.ENCRYPT_MODE, spec, params);
} catch (InvalidKeyException e) {
throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e);
} catch (InvalidAlgorithmParameterException e) {
throw new NoSuchAlgorithmException("AES/CTR/NoPadding does not support 256-bit keys", e);
}
}
@Override
public void destroy() {
// There doesn't seem to be a standard API to clean out a Cipher.
// So we instead set the key and IV to all-zeroes to hopefully
// destroy the sensitive data in the cipher instance.
ghash.destroy();
Noise.destroy(hashKey);
Noise.destroy(iv);
keySpec = new SecretKeySpec(new byte [32], "AES");
IvParameterSpec params = new IvParameterSpec(iv);
try {
cipher.init(Cipher.ENCRYPT_MODE, keySpec, params);
} catch (InvalidKeyException e) {
// Shouldn't happen.
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
}
}
@Override
public String getCipherName() {
return "AESGCM";
}
@Override
public int getKeyLength() {
return 32;
}
@Override
public int getMACLength() {
return keySpec != null ? 16 : 0;
}
@Override
public void initializeKey(byte[] key, int offset) {
// Set the encryption key.
keySpec = new SecretKeySpec(key, offset, 32, "AES");
// Generate the hashing key by encrypting a block of zeroes.
Arrays.fill(iv, (byte)0);
Arrays.fill(hashKey, (byte)0);
try {
cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv));
} catch (InvalidKeyException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
try {
int result = cipher.update(hashKey, 0, 16, hashKey, 0);
cipher.doFinal(hashKey, result);
} catch (ShortBufferException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (IllegalBlockSizeException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (BadPaddingException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
ghash.reset(hashKey, 0);
// Reset the nonce.
n = 0;
}
@Override
public boolean hasKey() {
return keySpec != null;
}
/**
* Set up to encrypt or decrypt the next packet.
*
* @param ad The associated data for the packet.
*/
private void setup(byte[] ad) throws InvalidKeyException, InvalidAlgorithmParameterException
{
// Check for nonce wrap-around.
if (n == -1L)
throw new IllegalStateException("Nonce has wrapped around");
// Format the counter/IV block for AES/CTR/NoPadding.
iv[0] = 0;
iv[1] = 0;
iv[2] = 0;
iv[3] = 0;
iv[4] = (byte)(n >> 56);
iv[5] = (byte)(n >> 48);
iv[6] = (byte)(n >> 40);
iv[7] = (byte)(n >> 32);
iv[8] = (byte)(n >> 24);
iv[9] = (byte)(n >> 16);
iv[10] = (byte)(n >> 8);
iv[11] = (byte)n;
iv[12] = 0;
iv[13] = 0;
iv[14] = 0;
iv[15] = 1;
++n;
// Initialize the CTR mode cipher with the key and IV.
cipher.init(Cipher.ENCRYPT_MODE, keySpec, new IvParameterSpec(iv));
// Encrypt a block of zeroes to generate the hash key to XOR
// the GHASH tag with at the end of the encrypt/decrypt operation.
Arrays.fill(hashKey, (byte)0);
try {
cipher.update(hashKey, 0, 16, hashKey, 0);
} catch (ShortBufferException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
// Initialize the GHASH with the associated data value.
ghash.reset();
if (ad != null) {
ghash.update(ad, 0, ad.length);
ghash.pad();
}
}
@Override
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length)
throws ShortBufferException {
int space;
if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length)
throw new IllegalArgumentException();
if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length)
throw new IllegalArgumentException();
space = ciphertext.length - ciphertextOffset;
if (keySpec == null) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
try {
setup(ad);
int result = cipher.update(plaintext, plaintextOffset, length, ciphertext, ciphertextOffset);
cipher.doFinal(ciphertext, ciphertextOffset + result);
} catch (InvalidKeyException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (IllegalBlockSizeException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (BadPaddingException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
ghash.update(ciphertext, ciphertextOffset, length);
ghash.pad(ad != null ? ad.length : 0, length);
ghash.finish(ciphertext, ciphertextOffset + length, 16);
for (int index = 0; index < 16; ++index)
ciphertext[ciphertextOffset + length + index] ^= hashKey[index];
return length + 16;
}
@Override
public int decryptWithAd(byte[] ad, byte[] ciphertext,
int ciphertextOffset, byte[] plaintext, int plaintextOffset,
int length) throws ShortBufferException, BadPaddingException {
int space;
if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length)
throw new IllegalArgumentException();
else
space = ciphertext.length - ciphertextOffset;
if (length > space)
throw new ShortBufferException();
if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length)
throw new IllegalArgumentException();
space = plaintext.length - plaintextOffset;
if (keySpec == null) {
// The key is not set yet - return the ciphertext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length);
return length;
}
if (length < 16)
Noise.throwBadTagException();
int dataLen = length - 16;
if (dataLen > space)
throw new ShortBufferException();
try {
setup(ad);
} catch (InvalidKeyException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (InvalidAlgorithmParameterException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
ghash.update(ciphertext, ciphertextOffset, dataLen);
ghash.pad(ad != null ? ad.length : 0, dataLen);
ghash.finish(iv, 0, 16);
int temp = 0;
for (int index = 0; index < 16; ++index)
temp |= (hashKey[index] ^ iv[index] ^ ciphertext[ciphertextOffset + dataLen + index]);
if ((temp & 0xFF) != 0)
Noise.throwBadTagException();
try {
int result = cipher.update(ciphertext, ciphertextOffset, dataLen, plaintext, plaintextOffset);
cipher.doFinal(plaintext, plaintextOffset + result);
} catch (IllegalBlockSizeException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
} catch (BadPaddingException e) {
// Shouldn't happen.
throw new IllegalStateException(e);
}
return dataLen;
}
@Override
public CipherState fork(byte[] key, int offset) {
CipherState cipher;
try {
cipher = new AESGCMOnCtrCipherState();
} catch (NoSuchAlgorithmException e) {
// Shouldn't happen.
return null;
}
cipher.initializeKey(key, offset);
return cipher;
}
@Override
public void setNonce(long nonce) {
n = nonce;
}
}
|
4291_2
|
crossvul
|
java
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
java
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import java.util.Arrays;
import javax.crypto.BadPaddingException;
import javax.crypto.ShortBufferException;
import com.southernstorm.noise.crypto.ChaChaCore;
import com.southernstorm.noise.crypto.Poly1305;
/**
* Implements the ChaChaPoly cipher for Noise.
*/
class ChaChaPolyCipherState implements CipherState {
private Poly1305 poly;
private int[] input;
private int[] output;
private byte[] polyKey;
long n;
private boolean haskey;
/**
* Constructs a new cipher state for the "ChaChaPoly" algorithm.
*/
public ChaChaPolyCipherState()
{
poly = new Poly1305();
input = new int [16];
output = new int [16];
polyKey = new byte [32];
n = 0;
haskey = false;
}
@Override
public void destroy() {
poly.destroy();
Arrays.fill(input, 0);
Arrays.fill(output, 0);
Noise.destroy(polyKey);
}
@Override
public String getCipherName() {
return "ChaChaPoly";
}
@Override
public int getKeyLength() {
return 32;
}
@Override
public int getMACLength() {
return haskey ? 16 : 0;
}
@Override
public void initializeKey(byte[] key, int offset) {
ChaChaCore.initKey256(input, key, offset);
n = 0;
haskey = true;
}
@Override
public boolean hasKey() {
return haskey;
}
/**
* XOR's the output of ChaCha20 with a byte buffer.
*
* @param input The input byte buffer.
* @param inputOffset The offset of the first input byte.
* @param output The output byte buffer (can be the same as the input).
* @param outputOffset The offset of the first output byte.
* @param length The number of bytes to XOR between 1 and 64.
* @param block The ChaCha20 output block.
*/
private static void xorBlock(byte[] input, int inputOffset, byte[] output, int outputOffset, int length, int[] block)
{
int posn = 0;
int value;
while (length >= 4) {
value = block[posn++];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
output[outputOffset + 1] = (byte)(input[inputOffset + 1] ^ (value >> 8));
output[outputOffset + 2] = (byte)(input[inputOffset + 2] ^ (value >> 16));
output[outputOffset + 3] = (byte)(input[inputOffset + 3] ^ (value >> 24));
inputOffset += 4;
outputOffset += 4;
length -= 4;
}
if (length == 3) {
value = block[posn];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
output[outputOffset + 1] = (byte)(input[inputOffset + 1] ^ (value >> 8));
output[outputOffset + 2] = (byte)(input[inputOffset + 2] ^ (value >> 16));
} else if (length == 2) {
value = block[posn];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
output[outputOffset + 1] = (byte)(input[inputOffset + 1] ^ (value >> 8));
} else if (length == 1) {
value = block[posn];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
}
}
/**
* Set up to encrypt or decrypt the next packet.
*
* @param ad The associated data for the packet.
*/
private void setup(byte[] ad)
{
if (n == -1L)
throw new IllegalStateException("Nonce has wrapped around");
ChaChaCore.initIV(input, n++);
ChaChaCore.hash(output, input);
Arrays.fill(polyKey, (byte)0);
xorBlock(polyKey, 0, polyKey, 0, 32, output);
poly.reset(polyKey, 0);
if (ad != null) {
poly.update(ad, 0, ad.length);
poly.pad();
}
if (++(input[12]) == 0)
++(input[13]);
}
/**
* Puts a 64-bit integer into a buffer in little-endian order.
*
* @param output The output buffer.
* @param offset The offset into the output buffer.
* @param value The 64-bit integer value.
*/
private static void putLittleEndian64(byte[] output, int offset, long value)
{
output[offset] = (byte)value;
output[offset + 1] = (byte)(value >> 8);
output[offset + 2] = (byte)(value >> 16);
output[offset + 3] = (byte)(value >> 24);
output[offset + 4] = (byte)(value >> 32);
output[offset + 5] = (byte)(value >> 40);
output[offset + 6] = (byte)(value >> 48);
output[offset + 7] = (byte)(value >> 56);
}
/**
* Finishes up the authentication tag for a packet.
*
* @param ad The associated data.
* @param length The length of the plaintext data.
*/
private void finish(byte[] ad, int length)
{
poly.pad();
putLittleEndian64(polyKey, 0, ad != null ? ad.length : 0);
putLittleEndian64(polyKey, 8, length);
poly.update(polyKey, 0, 16);
poly.finish(polyKey, 0);
}
/**
* Encrypts or decrypts a buffer of bytes for the active packet.
*
* @param plaintext The plaintext data to be encrypted.
* @param plaintextOffset The offset to the first plaintext byte.
* @param ciphertext The ciphertext data that results from encryption.
* @param ciphertextOffset The offset to the first ciphertext byte.
* @param length The number of bytes to encrypt.
*/
private void encrypt(byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length) {
while (length > 0) {
int tempLen = 64;
if (tempLen > length)
tempLen = length;
ChaChaCore.hash(output, input);
xorBlock(plaintext, plaintextOffset, ciphertext, ciphertextOffset, tempLen, output);
if (++(input[12]) == 0)
++(input[13]);
plaintextOffset += tempLen;
ciphertextOffset += tempLen;
length -= tempLen;
}
}
@Override
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (!haskey) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
setup(ad);
encrypt(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
poly.update(ciphertext, ciphertextOffset, length);
finish(ad, length);
System.arraycopy(polyKey, 0, ciphertext, ciphertextOffset + length, 16);
return length + 16;
}
@Override
public int decryptWithAd(byte[] ad, byte[] ciphertext,
int ciphertextOffset, byte[] plaintext, int plaintextOffset,
int length) throws ShortBufferException, BadPaddingException {
int space;
if (ciphertextOffset > ciphertext.length)
space = 0;
else
space = ciphertext.length - ciphertextOffset;
if (length > space)
throw new ShortBufferException();
if (plaintextOffset > plaintext.length)
space = 0;
else
space = plaintext.length - plaintextOffset;
if (!haskey) {
// The key is not set yet - return the ciphertext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length);
return length;
}
if (length < 16)
Noise.throwBadTagException();
int dataLen = length - 16;
if (dataLen > space)
throw new ShortBufferException();
setup(ad);
poly.update(ciphertext, ciphertextOffset, dataLen);
finish(ad, dataLen);
int temp = 0;
for (int index = 0; index < 16; ++index)
temp |= (polyKey[index] ^ ciphertext[ciphertextOffset + dataLen + index]);
if ((temp & 0xFF) != 0)
Noise.throwBadTagException();
encrypt(ciphertext, ciphertextOffset, plaintext, plaintextOffset, dataLen);
return dataLen;
}
@Override
public CipherState fork(byte[] key, int offset) {
CipherState cipher = new ChaChaPolyCipherState();
cipher.initializeKey(key, offset);
return cipher;
}
@Override
public void setNonce(long nonce) {
n = nonce;
}
}
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import java.util.Arrays;
import javax.crypto.BadPaddingException;
import javax.crypto.ShortBufferException;
import com.southernstorm.noise.crypto.ChaChaCore;
import com.southernstorm.noise.crypto.Poly1305;
/**
* Implements the ChaChaPoly cipher for Noise.
*/
class ChaChaPolyCipherState implements CipherState {
private Poly1305 poly;
private int[] input;
private int[] output;
private byte[] polyKey;
long n;
private boolean haskey;
/**
* Constructs a new cipher state for the "ChaChaPoly" algorithm.
*/
public ChaChaPolyCipherState()
{
poly = new Poly1305();
input = new int [16];
output = new int [16];
polyKey = new byte [32];
n = 0;
haskey = false;
}
@Override
public void destroy() {
poly.destroy();
Arrays.fill(input, 0);
Arrays.fill(output, 0);
Noise.destroy(polyKey);
}
@Override
public String getCipherName() {
return "ChaChaPoly";
}
@Override
public int getKeyLength() {
return 32;
}
@Override
public int getMACLength() {
return haskey ? 16 : 0;
}
@Override
public void initializeKey(byte[] key, int offset) {
ChaChaCore.initKey256(input, key, offset);
n = 0;
haskey = true;
}
@Override
public boolean hasKey() {
return haskey;
}
/**
* XOR's the output of ChaCha20 with a byte buffer.
*
* @param input The input byte buffer.
* @param inputOffset The offset of the first input byte.
* @param output The output byte buffer (can be the same as the input).
* @param outputOffset The offset of the first output byte.
* @param length The number of bytes to XOR between 1 and 64.
* @param block The ChaCha20 output block.
*/
private static void xorBlock(byte[] input, int inputOffset, byte[] output, int outputOffset, int length, int[] block)
{
int posn = 0;
int value;
while (length >= 4) {
value = block[posn++];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
output[outputOffset + 1] = (byte)(input[inputOffset + 1] ^ (value >> 8));
output[outputOffset + 2] = (byte)(input[inputOffset + 2] ^ (value >> 16));
output[outputOffset + 3] = (byte)(input[inputOffset + 3] ^ (value >> 24));
inputOffset += 4;
outputOffset += 4;
length -= 4;
}
if (length == 3) {
value = block[posn];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
output[outputOffset + 1] = (byte)(input[inputOffset + 1] ^ (value >> 8));
output[outputOffset + 2] = (byte)(input[inputOffset + 2] ^ (value >> 16));
} else if (length == 2) {
value = block[posn];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
output[outputOffset + 1] = (byte)(input[inputOffset + 1] ^ (value >> 8));
} else if (length == 1) {
value = block[posn];
output[outputOffset] = (byte)(input[inputOffset] ^ value);
}
}
/**
* Set up to encrypt or decrypt the next packet.
*
* @param ad The associated data for the packet.
*/
private void setup(byte[] ad)
{
if (n == -1L)
throw new IllegalStateException("Nonce has wrapped around");
ChaChaCore.initIV(input, n++);
ChaChaCore.hash(output, input);
Arrays.fill(polyKey, (byte)0);
xorBlock(polyKey, 0, polyKey, 0, 32, output);
poly.reset(polyKey, 0);
if (ad != null) {
poly.update(ad, 0, ad.length);
poly.pad();
}
if (++(input[12]) == 0)
++(input[13]);
}
/**
* Puts a 64-bit integer into a buffer in little-endian order.
*
* @param output The output buffer.
* @param offset The offset into the output buffer.
* @param value The 64-bit integer value.
*/
private static void putLittleEndian64(byte[] output, int offset, long value)
{
output[offset] = (byte)value;
output[offset + 1] = (byte)(value >> 8);
output[offset + 2] = (byte)(value >> 16);
output[offset + 3] = (byte)(value >> 24);
output[offset + 4] = (byte)(value >> 32);
output[offset + 5] = (byte)(value >> 40);
output[offset + 6] = (byte)(value >> 48);
output[offset + 7] = (byte)(value >> 56);
}
/**
* Finishes up the authentication tag for a packet.
*
* @param ad The associated data.
* @param length The length of the plaintext data.
*/
private void finish(byte[] ad, int length)
{
poly.pad();
putLittleEndian64(polyKey, 0, ad != null ? ad.length : 0);
putLittleEndian64(polyKey, 8, length);
poly.update(polyKey, 0, 16);
poly.finish(polyKey, 0);
}
/**
* Encrypts or decrypts a buffer of bytes for the active packet.
*
* @param plaintext The plaintext data to be encrypted.
* @param plaintextOffset The offset to the first plaintext byte.
* @param ciphertext The ciphertext data that results from encryption.
* @param ciphertextOffset The offset to the first ciphertext byte.
* @param length The number of bytes to encrypt.
*/
private void encrypt(byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length) {
while (length > 0) {
int tempLen = 64;
if (tempLen > length)
tempLen = length;
ChaChaCore.hash(output, input);
xorBlock(plaintext, plaintextOffset, ciphertext, ciphertextOffset, tempLen, output);
if (++(input[12]) == 0)
++(input[13]);
plaintextOffset += tempLen;
ciphertextOffset += tempLen;
length -= tempLen;
}
}
@Override
public int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset,
byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException {
int space;
if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length)
throw new IllegalArgumentException();
if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length)
throw new IllegalArgumentException();
space = ciphertext.length - ciphertextOffset;
if (!haskey) {
// The key is not set yet - return the plaintext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
return length;
}
if (space < 16 || length > (space - 16))
throw new ShortBufferException();
setup(ad);
encrypt(plaintext, plaintextOffset, ciphertext, ciphertextOffset, length);
poly.update(ciphertext, ciphertextOffset, length);
finish(ad, length);
System.arraycopy(polyKey, 0, ciphertext, ciphertextOffset + length, 16);
return length + 16;
}
@Override
public int decryptWithAd(byte[] ad, byte[] ciphertext,
int ciphertextOffset, byte[] plaintext, int plaintextOffset,
int length) throws ShortBufferException, BadPaddingException {
int space;
if (ciphertextOffset < 0 || ciphertextOffset > ciphertext.length)
throw new IllegalArgumentException();
else
space = ciphertext.length - ciphertextOffset;
if (length > space)
throw new ShortBufferException();
if (length < 0 || plaintextOffset < 0 || plaintextOffset > plaintext.length)
throw new IllegalArgumentException();
space = plaintext.length - plaintextOffset;
if (!haskey) {
// The key is not set yet - return the ciphertext as-is.
if (length > space)
throw new ShortBufferException();
if (plaintext != ciphertext || plaintextOffset != ciphertextOffset)
System.arraycopy(ciphertext, ciphertextOffset, plaintext, plaintextOffset, length);
return length;
}
if (length < 16)
Noise.throwBadTagException();
int dataLen = length - 16;
if (dataLen > space)
throw new ShortBufferException();
setup(ad);
poly.update(ciphertext, ciphertextOffset, dataLen);
finish(ad, dataLen);
int temp = 0;
for (int index = 0; index < 16; ++index)
temp |= (polyKey[index] ^ ciphertext[ciphertextOffset + dataLen + index]);
if ((temp & 0xFF) != 0)
Noise.throwBadTagException();
encrypt(ciphertext, ciphertextOffset, plaintext, plaintextOffset, dataLen);
return dataLen;
}
@Override
public CipherState fork(byte[] key, int offset) {
CipherState cipher = new ChaChaPolyCipherState();
cipher.initializeKey(key, offset);
return cipher;
}
@Override
public void setNonce(long nonce) {
n = nonce;
}
}
|
4291_3
|
crossvul
|
java
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
java
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import javax.crypto.BadPaddingException;
import javax.crypto.ShortBufferException;
/**
* Interface to an authenticated cipher for use in the Noise protocol.
*
* CipherState objects are used to encrypt or decrypt data during a
* session. Once the handshake has completed, HandshakeState.split()
* will create two CipherState objects for encrypting packets sent to
* the other party, and decrypting packets received from the other party.
*/
public interface CipherState extends Destroyable {
/**
* Gets the Noise protocol name for this cipher.
*
* @return The cipher name.
*/
String getCipherName();
/**
* Gets the length of the key values for this cipher.
*
* @return The length of the key in bytes; usually 32.
*/
int getKeyLength();
/**
* Gets the length of the MAC values for this cipher.
*
* @return The length of MAC values in bytes, or zero if the
* key has not yet been initialized.
*/
int getMACLength();
/**
* Initializes the key on this cipher object.
*
* @param key Points to a buffer that contains the key.
* @param offset The offset of the key in the key buffer.
*
* The key buffer must contain at least getKeyLength() bytes
* starting at offset.
*
* @see #hasKey()
*/
void initializeKey(byte[] key, int offset);
/**
* Determine if this cipher object has been configured with a key.
*
* @return true if this cipher object has a key; false if the
* key has not yet been set with initializeKey().
*
* @see #initializeKey(byte[], int)
*/
boolean hasKey();
/**
* Encrypts a plaintext buffer using the cipher and a block of associated data.
*
* @param ad The associated data, or null if there is none.
* @param plaintext The buffer containing the plaintext to encrypt.
* @param plaintextOffset The offset within the plaintext buffer of the
* first byte or plaintext data.
* @param ciphertext The buffer to place the ciphertext in. This can
* be the same as the plaintext buffer.
* @param ciphertextOffset The first offset within the ciphertext buffer
* to place the ciphertext and the MAC tag.
* @param length The length of the plaintext.
* @return The length of the ciphertext plus the MAC tag, or -1 if the
* ciphertext buffer is not large enough to hold the result.
*
* @throws ShortBufferException The ciphertext buffer does not have
* enough space to hold the ciphertext plus MAC.
*
* @throws IllegalStateException The nonce has wrapped around.
*
* The plaintext and ciphertext buffers can be the same for in-place
* encryption. In that case, plaintextOffset must be identical to
* ciphertextOffset.
*
* There must be enough space in the ciphertext buffer to accomodate
* length + getMACLength() bytes of data starting at ciphertextOffset.
*/
int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException;
/**
* Decrypts a ciphertext buffer using the cipher and a block of associated data.
*
* @param ad The associated data, or null if there is none.
* @param ciphertext The buffer containing the ciphertext to decrypt.
* @param ciphertextOffset The offset within the ciphertext buffer of
* the first byte of ciphertext data.
* @param plaintext The buffer to place the plaintext in. This can be
* the same as the ciphertext buffer.
* @param plaintextOffset The first offset within the plaintext buffer
* to place the plaintext.
* @param length The length of the incoming ciphertext plus the MAC tag.
* @return The length of the plaintext with the MAC tag stripped off.
*
* @throws ShortBufferException The plaintext buffer does not have
* enough space to store the decrypted data.
*
* @throws BadPaddingException The MAC value failed to verify.
*
* @throws IllegalStateException The nonce has wrapped around.
*
* The plaintext and ciphertext buffers can be the same for in-place
* decryption. In that case, ciphertextOffset must be identical to
* plaintextOffset.
*/
int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException;
/**
* Creates a new instance of this cipher and initializes it with a key.
*
* @param key The buffer containing the key.
* @param offset The offset into the key buffer of the first key byte.
* @return A new CipherState of the same class as this one.
*/
CipherState fork(byte[] key, int offset);
/**
* Sets the nonce value.
*
* @param nonce The new nonce value, which must be greater than or equal
* to the current value.
*
* This function is intended for testing purposes only. If the nonce
* value goes backwards then security may be compromised.
*/
void setNonce(long nonce);
}
|
/*
* Copyright (C) 2016 Southern Storm Software, Pty Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
package com.southernstorm.noise.protocol;
import javax.crypto.BadPaddingException;
import javax.crypto.ShortBufferException;
/**
* Interface to an authenticated cipher for use in the Noise protocol.
*
* CipherState objects are used to encrypt or decrypt data during a
* session. Once the handshake has completed, HandshakeState.split()
* will create two CipherState objects for encrypting packets sent to
* the other party, and decrypting packets received from the other party.
*/
public interface CipherState extends Destroyable {
/**
* Gets the Noise protocol name for this cipher.
*
* @return The cipher name.
*/
String getCipherName();
/**
* Gets the length of the key values for this cipher.
*
* @return The length of the key in bytes; usually 32.
*/
int getKeyLength();
/**
* Gets the length of the MAC values for this cipher.
*
* @return The length of MAC values in bytes, or zero if the
* key has not yet been initialized.
*/
int getMACLength();
/**
* Initializes the key on this cipher object.
*
* @param key Points to a buffer that contains the key.
* @param offset The offset of the key in the key buffer.
*
* The key buffer must contain at least getKeyLength() bytes
* starting at offset.
*
* @see #hasKey()
*/
void initializeKey(byte[] key, int offset);
/**
* Determine if this cipher object has been configured with a key.
*
* @return true if this cipher object has a key; false if the
* key has not yet been set with initializeKey().
*
* @see #initializeKey(byte[], int)
*/
boolean hasKey();
/**
* Encrypts a plaintext buffer using the cipher and a block of associated data.
*
* @param ad The associated data, or null if there is none.
* @param plaintext The buffer containing the plaintext to encrypt.
* @param plaintextOffset The offset within the plaintext buffer of the
* first byte or plaintext data.
* @param ciphertext The buffer to place the ciphertext in. This can
* be the same as the plaintext buffer.
* @param ciphertextOffset The first offset within the ciphertext buffer
* to place the ciphertext and the MAC tag.
* @param length The length of the plaintext.
* @return The length of the ciphertext plus the MAC tag, or -1 if the
* ciphertext buffer is not large enough to hold the result.
*
* @throws ShortBufferException The ciphertext buffer does not have
* enough space to hold the ciphertext plus MAC.
*
* @throws IllegalStateException The nonce has wrapped around.
*
* @throws IllegalArgumentException One of the parameters is out of range.
*
* The plaintext and ciphertext buffers can be the same for in-place
* encryption. In that case, plaintextOffset must be identical to
* ciphertextOffset.
*
* There must be enough space in the ciphertext buffer to accomodate
* length + getMACLength() bytes of data starting at ciphertextOffset.
*/
int encryptWithAd(byte[] ad, byte[] plaintext, int plaintextOffset, byte[] ciphertext, int ciphertextOffset, int length) throws ShortBufferException;
/**
* Decrypts a ciphertext buffer using the cipher and a block of associated data.
*
* @param ad The associated data, or null if there is none.
* @param ciphertext The buffer containing the ciphertext to decrypt.
* @param ciphertextOffset The offset within the ciphertext buffer of
* the first byte of ciphertext data.
* @param plaintext The buffer to place the plaintext in. This can be
* the same as the ciphertext buffer.
* @param plaintextOffset The first offset within the plaintext buffer
* to place the plaintext.
* @param length The length of the incoming ciphertext plus the MAC tag.
* @return The length of the plaintext with the MAC tag stripped off.
*
* @throws ShortBufferException The plaintext buffer does not have
* enough space to store the decrypted data.
*
* @throws BadPaddingException The MAC value failed to verify.
*
* @throws IllegalStateException The nonce has wrapped around.
*
* @throws IllegalArgumentException One of the parameters is out of range.
*
* The plaintext and ciphertext buffers can be the same for in-place
* decryption. In that case, ciphertextOffset must be identical to
* plaintextOffset.
*/
int decryptWithAd(byte[] ad, byte[] ciphertext, int ciphertextOffset, byte[] plaintext, int plaintextOffset, int length) throws ShortBufferException, BadPaddingException;
/**
* Creates a new instance of this cipher and initializes it with a key.
*
* @param key The buffer containing the key.
* @param offset The offset into the key buffer of the first key byte.
* @return A new CipherState of the same class as this one.
*/
CipherState fork(byte[] key, int offset);
/**
* Sets the nonce value.
*
* @param nonce The new nonce value, which must be greater than or equal
* to the current value.
*
* This function is intended for testing purposes only. If the nonce
* value goes backwards then security may be compromised.
*/
void setNonce(long nonce);
}
|
4291_4
|
crossvul
|
java
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
rust
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A double-ended queue implemented with a growable ring buffer.
//!
//! This queue has `O(1)` amortized inserts and removals from both ends of the
//! container. It also has `O(1)` indexing like a vector. The contained elements
//! are not required to be copyable, and the queue will be sendable if the
//! contained type is sendable.
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::Ordering;
use core::fmt;
use core::iter::{repeat, FromIterator, FusedIterator};
use core::mem;
use core::ops::{Index, IndexMut, Place, Placer, InPlace};
use core::ptr;
use core::ptr::Shared;
use core::slice;
use core::hash::{Hash, Hasher};
use core::cmp;
use raw_vec::RawVec;
use super::range::RangeArgument;
use Bound::{Excluded, Included, Unbounded};
use super::vec::Vec;
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
#[cfg(target_pointer_width = "32")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two
#[cfg(target_pointer_width = "64")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of two
/// A double-ended queue implemented with a growable ring buffer.
///
/// The "default" usage of this type as a queue is to use [`push_back`] to add to
/// the queue, and [`pop_front`] to remove from the queue. [`extend`] and [`append`]
/// push onto the back in this manner, and iterating over `VecDeque` goes front
/// to back.
///
/// [`push_back`]: #method.push_back
/// [`pop_front`]: #method.pop_front
/// [`extend`]: #method.extend
/// [`append`]: #method.append
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VecDeque<T> {
// tail and head are pointers into the buffer. Tail always points
// to the first element that could be read, Head always points
// to where data should be written.
// If tail == head the buffer is empty. The length of the ringbuffer
// is defined as the distance between the two.
tail: usize,
head: usize,
buf: RawVec<T>,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for VecDeque<T> {
fn clone(&self) -> VecDeque<T> {
self.iter().cloned().collect()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T> Drop for VecDeque<T> {
fn drop(&mut self) {
let (front, back) = self.as_mut_slices();
unsafe {
// use drop for [T]
ptr::drop_in_place(front);
ptr::drop_in_place(back);
}
// RawVec handles deallocation
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for VecDeque<T> {
/// Creates an empty `VecDeque<T>`.
#[inline]
fn default() -> VecDeque<T> {
VecDeque::new()
}
}
impl<T> VecDeque<T> {
/// Marginally more convenient
#[inline]
fn ptr(&self) -> *mut T {
self.buf.ptr()
}
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 {
// For zero sized types, we are always at maximum capacity
MAXIMUM_ZST_CAPACITY
} else {
self.buf.cap()
}
}
/// Turn ptr into a slice
#[inline]
unsafe fn buffer_as_slice(&self) -> &[T] {
slice::from_raw_parts(self.ptr(), self.cap())
}
/// Turn ptr into a mut slice
#[inline]
unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
slice::from_raw_parts_mut(self.ptr(), self.cap())
}
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
ptr::read(self.ptr().offset(off as isize))
}
/// Writes an element into the buffer, moving it.
#[inline]
unsafe fn buffer_write(&mut self, off: usize, value: T) {
ptr::write(self.ptr().offset(off as isize), value);
}
/// Returns `true` if and only if the buffer is at full capacity.
#[inline]
fn is_full(&self) -> bool {
self.cap() - self.len() == 1
}
/// Returns the index in the underlying buffer for a given logical element
/// index.
#[inline]
fn wrap_index(&self, idx: usize) -> usize {
wrap_index(idx, self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
wrap_index(idx.wrapping_add(addend), self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
wrap_index(idx.wrapping_sub(subtrahend), self.cap())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
debug_assert!(src + len <= self.cap(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
ptr::copy(self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
debug_assert!(src + len <= self.cap(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
ptr::copy_nonoverlapping(self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a potentially wrapping block of memory len long from src to dest.
/// (abs(dst - src) + len) must be no larger than cap() (There must be at
/// most one continuous overlapping region between src and dest).
unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
#[allow(dead_code)]
fn diff(a: usize, b: usize) -> usize {
if a <= b { b - a } else { a - b }
}
debug_assert!(cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
"wrc dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
if src == dst || len == 0 {
return;
}
let dst_after_src = self.wrap_sub(dst, src) < len;
let src_pre_wrap_len = self.cap() - src;
let dst_pre_wrap_len = self.cap() - dst;
let src_wraps = src_pre_wrap_len < len;
let dst_wraps = dst_pre_wrap_len < len;
match (dst_after_src, src_wraps, dst_wraps) {
(_, false, false) => {
// src doesn't wrap, dst doesn't wrap
//
// S . . .
// 1 [_ _ A A B B C C _]
// 2 [_ _ A A A A B B _]
// D . . .
//
self.copy(dst, src, len);
}
(false, false, true) => {
// dst before src, src doesn't wrap, dst wraps
//
// S . . .
// 1 [A A B B _ _ _ C C]
// 2 [A A B B _ _ _ A A]
// 3 [B B B B _ _ _ A A]
// . . D .
//
self.copy(dst, src, dst_pre_wrap_len);
self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
}
(true, false, true) => {
// src before dst, src doesn't wrap, dst wraps
//
// S . . .
// 1 [C C _ _ _ A A B B]
// 2 [B B _ _ _ A A B B]
// 3 [B B _ _ _ A A A A]
// . . D .
//
self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
self.copy(dst, src, dst_pre_wrap_len);
}
(false, true, false) => {
// dst before src, src wraps, dst doesn't wrap
//
// . . S .
// 1 [C C _ _ _ A A B B]
// 2 [C C _ _ _ B B B B]
// 3 [C C _ _ _ B B C C]
// D . . .
//
self.copy(dst, src, src_pre_wrap_len);
self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
}
(true, true, false) => {
// src before dst, src wraps, dst doesn't wrap
//
// . . S .
// 1 [A A B B _ _ _ C C]
// 2 [A A A A _ _ _ C C]
// 3 [C C A A _ _ _ C C]
// D . . .
//
self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
self.copy(dst, src, src_pre_wrap_len);
}
(false, true, true) => {
// dst before src, src wraps, dst wraps
//
// . . . S .
// 1 [A B C D _ E F G H]
// 2 [A B C D _ E G H H]
// 3 [A B C D _ E G H A]
// 4 [B C C D _ E G H A]
// . . D . .
//
debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
let delta = dst_pre_wrap_len - src_pre_wrap_len;
self.copy(dst, src, src_pre_wrap_len);
self.copy(dst + src_pre_wrap_len, 0, delta);
self.copy(0, delta, len - dst_pre_wrap_len);
}
(true, true, true) => {
// src before dst, src wraps, dst wraps
//
// . . S . .
// 1 [A B C D _ E F G H]
// 2 [A A B D _ E F G H]
// 3 [H A B D _ E F G H]
// 4 [H A B D _ E F F G]
// . . . D .
//
debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
let delta = src_pre_wrap_len - dst_pre_wrap_len;
self.copy(delta, 0, len - src_pre_wrap_len);
self.copy(0, self.cap() - delta, delta);
self.copy(dst, src, dst_pre_wrap_len);
}
}
}
/// Frobs the head and tail sections around to handle the fact that we
/// just reallocated. Unsafe because it trusts old_cap.
#[inline]
unsafe fn handle_cap_increase(&mut self, old_cap: usize) {
let new_cap = self.cap();
// Move the shortest contiguous section of the ring buffer
// T H
// [o o o o o o o . ]
// T H
// A [o o o o o o o . . . . . . . . . ]
// H T
// [o o . o o o o o ]
// T H
// B [. . . o o o o o o o . . . . . . ]
// H T
// [o o o o o . o o ]
// H T
// C [o o o o o . . . . . . . . . o o ]
if self.tail <= self.head {
// A
// Nop
} else if self.head < old_cap - self.tail {
// B
self.copy_nonoverlapping(old_cap, 0, self.head);
self.head += old_cap;
debug_assert!(self.head > self.tail);
} else {
// C
let new_tail = new_cap - (old_cap - self.tail);
self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail);
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
impl<T> VecDeque<T> {
/// Creates an empty `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let vector: VecDeque<u32> = VecDeque::new();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> VecDeque<T> {
VecDeque::with_capacity(INITIAL_CAPACITY)
}
/// Creates an empty `VecDeque` with space for at least `n` elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let vector: VecDeque<u32> = VecDeque::with_capacity(10);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(n: usize) -> VecDeque<T> {
// +1 since the ringbuffer always leaves one space empty
let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
assert!(cap > n, "capacity overflow");
VecDeque {
tail: 0,
head: 0,
buf: RawVec::with_capacity(cap),
}
}
/// Retrieves an element in the `VecDeque` by index.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// assert_eq!(buf.get(1), Some(&4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
unsafe { Some(&*self.ptr().offset(idx as isize)) }
} else {
None
}
}
/// Retrieves an element in the `VecDeque` mutably by index.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// if let Some(elem) = buf.get_mut(1) {
/// *elem = 7;
/// }
///
/// assert_eq!(buf[1], 7);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
unsafe { Some(&mut *self.ptr().offset(idx as isize)) }
} else {
None
}
}
/// Swaps elements at indices `i` and `j`.
///
/// `i` and `j` may be equal.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if either index is out of bounds.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// assert_eq!(buf, [3, 4, 5]);
/// buf.swap(0, 2);
/// assert_eq!(buf, [5, 4, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
ptr::swap(self.ptr().offset(ri as isize),
self.ptr().offset(rj as isize))
}
}
/// Returns the number of elements the `VecDeque` can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let buf: VecDeque<i32> = VecDeque::with_capacity(10);
/// assert!(buf.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.cap() - 1
}
/// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
/// given `VecDeque`. Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it requests. Therefore
/// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
/// insertions are expected.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<i32> = vec![1].into_iter().collect();
/// buf.reserve_exact(10);
/// assert!(buf.capacity() >= 11);
/// ```
///
/// [`reserve`]: #method.reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.reserve(additional);
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
/// `VecDeque`. The collection may reserve more space to avoid frequent reallocations.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<i32> = vec![1].into_iter().collect();
/// buf.reserve(10);
/// assert!(buf.capacity() >= 11);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
let old_cap = self.cap();
let used_cap = self.len() + 1;
let new_cap = used_cap.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
.expect("capacity overflow");
if new_cap > self.capacity() {
self.buf.reserve_exact(used_cap, new_cap - used_cap);
unsafe {
self.handle_cap_increase(old_cap);
}
}
}
/// Shrinks the capacity of the `VecDeque` as much as possible.
///
/// It will drop down as close as possible to the length but the allocator may still inform the
/// `VecDeque` that there is space for a few more elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::with_capacity(15);
/// buf.extend(0..4);
/// assert_eq!(buf.capacity(), 15);
/// buf.shrink_to_fit();
/// assert!(buf.capacity() >= 4);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn shrink_to_fit(&mut self) {
// +1 since the ringbuffer always leaves one space empty
// len + 1 can't overflow for an existing, well-formed ringbuffer.
let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
if target_cap < self.cap() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
// Elements are discontiguous, and tail is out of desired bounds
//
// At all other times, element positions are unaffected.
//
// Indicates that elements at the head should be moved.
let head_outside = self.head == 0 || self.head >= target_cap;
// Move elements from out of desired bounds (positions after target_cap)
if self.tail >= target_cap && head_outside {
// T H
// [. . . . . . . . o o o o o o o . ]
// T H
// [o o o o o o o . ]
unsafe {
self.copy_nonoverlapping(0, self.tail, self.len());
}
self.head = self.len();
self.tail = 0;
} else if self.tail != 0 && self.tail < target_cap && head_outside {
// T H
// [. . . o o o o o o o . . . . . . ]
// H T
// [o o . o o o o o ]
let len = self.wrap_sub(self.head, target_cap);
unsafe {
self.copy_nonoverlapping(0, target_cap, len);
}
self.head = len;
debug_assert!(self.head < self.tail);
} else if self.tail >= target_cap {
// H T
// [o o o o o . . . . . . . . . o o ]
// H T
// [o o o o o . o o ]
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
let len = self.cap() - self.tail;
let new_tail = target_cap - len;
unsafe {
self.copy_nonoverlapping(new_tail, self.tail, len);
}
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
self.buf.shrink_to_fit(target_cap);
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
/// Shortens the `VecDeque`, dropping excess elements from the back.
///
/// If `len` is greater than the `VecDeque`'s current length, this has no
/// effect.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(10);
/// buf.push_back(15);
/// assert_eq!(buf, [5, 10, 15]);
/// buf.truncate(1);
/// assert_eq!(buf, [5]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn truncate(&mut self, len: usize) {
for _ in len..self.len() {
self.pop_back();
}
}
/// Returns a front-to-back iterator.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(3);
/// buf.push_back(4);
/// let b: &[_] = &[&5, &3, &4];
/// let c: Vec<&i32> = buf.iter().collect();
/// assert_eq!(&c[..], b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<T> {
Iter {
tail: self.tail,
head: self.head,
ring: unsafe { self.buffer_as_slice() },
}
}
/// Returns a front-to-back iterator that returns mutable references.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(3);
/// buf.push_back(4);
/// for num in buf.iter_mut() {
/// *num = *num - 2;
/// }
/// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
/// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<T> {
IterMut {
tail: self.tail,
head: self.head,
ring: unsafe { self.buffer_as_mut_slice() },
}
}
/// Returns a pair of slices which contain, in order, the contents of the
/// `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
/// vector.push_back(2);
///
/// assert_eq!(vector.as_slices(), (&[0, 1, 2][..], &[][..]));
///
/// vector.push_front(10);
/// vector.push_front(9);
///
/// assert_eq!(vector.as_slices(), (&[9, 10][..], &[0, 1, 2][..]));
/// ```
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
unsafe {
let buf = self.buffer_as_slice();
RingSlices::ring_slices(buf, self.head, self.tail)
}
}
/// Returns a pair of slices which contain, in order, the contents of the
/// `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
///
/// vector.push_front(10);
/// vector.push_front(9);
///
/// vector.as_mut_slices().0[0] = 42;
/// vector.as_mut_slices().1[0] = 24;
/// assert_eq!(vector.as_slices(), (&[42, 10][..], &[24, 1][..]));
/// ```
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
unsafe {
let head = self.head;
let tail = self.tail;
let buf = self.buffer_as_mut_slice();
RingSlices::ring_slices(buf, head, tail)
}
}
/// Returns the number of elements in the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert_eq!(v.len(), 0);
/// v.push_back(1);
/// assert_eq!(v.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
count(self.tail, self.head, self.cap())
}
/// Returns `true` if the `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert!(v.is_empty());
/// v.push_front(1);
/// assert!(!v.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.tail == self.head
}
/// Create a draining iterator that removes the specified range in the
/// `VecDeque` and yields the removed items.
///
/// Note 1: The element range is removed even if the iterator is not
/// consumed until the end.
///
/// Note 2: It is unspecified how many elements are removed from the deque,
/// if the `Drain` value is not dropped, but the borrow it holds expires
/// (eg. due to mem::forget).
///
/// # Panics
///
/// Panics if the starting point is greater than the end point or if
/// the end point is greater than the length of the vector.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect();
/// let drained = v.drain(2..).collect::<VecDeque<_>>();
/// assert_eq!(drained, [3]);
/// assert_eq!(v, [1, 2]);
///
/// // A full range clears all contents
/// v.drain(..);
/// assert!(v.is_empty());
/// ```
#[inline]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain<R>(&mut self, range: R) -> Drain<T>
where R: RangeArgument<usize>
{
// Memory safety
//
// When the Drain is first created, the source deque is shortened to
// make sure no uninitialized or moved-from elements are accessible at
// all if the Drain's destructor never gets to run.
//
// Drain will ptr::read out the values to remove.
// When finished, the remaining data will be copied back to cover the hole,
// and the head/tail values will be restored correctly.
//
let len = self.len();
let start = match range.start() {
Included(&n) => n,
Excluded(&n) => n + 1,
Unbounded => 0,
};
let end = match range.end() {
Included(&n) => n + 1,
Excluded(&n) => n,
Unbounded => len,
};
assert!(start <= end, "drain lower bound was too large");
assert!(end <= len, "drain upper bound was too large");
// The deque's elements are parted into three segments:
// * self.tail -> drain_tail
// * drain_tail -> drain_head
// * drain_head -> self.head
//
// T = self.tail; H = self.head; t = drain_tail; h = drain_head
//
// We store drain_tail as self.head, and drain_head and self.head as
// after_tail and after_head respectively on the Drain. This also
// truncates the effective array such that if the Drain is leaked, we
// have forgotten about the potentially moved values after the start of
// the drain.
//
// T t h H
// [. . . o o x x o o . . .]
//
let drain_tail = self.wrap_add(self.tail, start);
let drain_head = self.wrap_add(self.tail, end);
let head = self.head;
// "forget" about the values after the start of the drain until after
// the drain is complete and the Drain destructor is run.
self.head = drain_tail;
Drain {
deque: Shared::from(&mut *self),
after_tail: drain_head,
after_head: head,
iter: Iter {
tail: drain_tail,
head: drain_head,
ring: unsafe { self.buffer_as_mut_slice() },
},
}
}
/// Clears the buffer, removing all values.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// v.push_back(1);
/// v.clear();
/// assert!(v.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn clear(&mut self) {
self.drain(..);
}
/// Returns `true` if the `VecDeque` contains an element equal to the
/// given value.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector: VecDeque<u32> = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
///
/// assert_eq!(vector.contains(&1), true);
/// assert_eq!(vector.contains(&10), false);
/// ```
#[stable(feature = "vec_deque_contains", since = "1.12.0")]
pub fn contains(&self, x: &T) -> bool
where T: PartialEq<T>
{
let (a, b) = self.as_slices();
a.contains(x) || b.contains(x)
}
/// Provides a reference to the front element, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.front(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// assert_eq!(d.front(), Some(&1));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front(&self) -> Option<&T> {
if !self.is_empty() {
Some(&self[0])
} else {
None
}
}
/// Provides a mutable reference to the front element, or `None` if the
/// `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.front_mut(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// match d.front_mut() {
/// Some(x) => *x = 9,
/// None => (),
/// }
/// assert_eq!(d.front(), Some(&9));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front_mut(&mut self) -> Option<&mut T> {
if !self.is_empty() {
Some(&mut self[0])
} else {
None
}
}
/// Provides a reference to the back element, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.back(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// assert_eq!(d.back(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back(&self) -> Option<&T> {
if !self.is_empty() {
Some(&self[self.len() - 1])
} else {
None
}
}
/// Provides a mutable reference to the back element, or `None` if the
/// `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.back(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// match d.back_mut() {
/// Some(x) => *x = 9,
/// None => (),
/// }
/// assert_eq!(d.back(), Some(&9));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back_mut(&mut self) -> Option<&mut T> {
let len = self.len();
if !self.is_empty() {
Some(&mut self[len - 1])
} else {
None
}
}
/// Removes the first element and returns it, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// d.push_back(1);
/// d.push_back(2);
///
/// assert_eq!(d.pop_front(), Some(1));
/// assert_eq!(d.pop_front(), Some(2));
/// assert_eq!(d.pop_front(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
let tail = self.tail;
self.tail = self.wrap_add(self.tail, 1);
unsafe { Some(self.buffer_read(tail)) }
}
}
/// Prepends an element to the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// d.push_front(1);
/// d.push_front(2);
/// assert_eq!(d.front(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, value: T) {
self.grow_if_necessary();
self.tail = self.wrap_sub(self.tail, 1);
let tail = self.tail;
unsafe {
self.buffer_write(tail, value);
}
}
/// Appends an element to the back of the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(1);
/// buf.push_back(3);
/// assert_eq!(3, *buf.back().unwrap());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, value: T) {
self.grow_if_necessary();
let head = self.head;
self.head = self.wrap_add(self.head, 1);
unsafe { self.buffer_write(head, value) }
}
/// Removes the last element from the `VecDeque` and returns it, or `None` if
/// it is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.pop_back(), None);
/// buf.push_back(1);
/// buf.push_back(3);
/// assert_eq!(buf.pop_back(), Some(3));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop_back(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
self.head = self.wrap_sub(self.head, 1);
let head = self.head;
unsafe { Some(self.buffer_read(head)) }
}
}
#[inline]
fn is_contiguous(&self) -> bool {
self.tail <= self.head
}
/// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the
/// last element.
///
/// This does not preserve ordering, but is O(1).
///
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_remove_back(0), None);
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.swap_remove_back(0), Some(1));
/// assert_eq!(buf, [3, 2]);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
let length = self.len();
if length > 0 && index < length - 1 {
self.swap(index, length - 1);
} else if index >= length {
return None;
}
self.pop_back()
}
/// Removes an element from anywhere in the `VecDeque` and returns it,
/// replacing it with the first element.
///
/// This does not preserve ordering, but is O(1).
///
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_remove_front(0), None);
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.swap_remove_front(2), Some(3));
/// assert_eq!(buf, [2, 1]);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
let length = self.len();
if length > 0 && index < length && index != 0 {
self.swap(index, 0);
} else if index >= length {
return None;
}
self.pop_front()
}
/// Inserts an element at `index` within the `VecDeque`, shifting all elements with indices
/// greater than or equal to `index` towards the back.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if `index` is greater than `VecDeque`'s length
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vec_deque = VecDeque::new();
/// vec_deque.push_back('a');
/// vec_deque.push_back('b');
/// vec_deque.push_back('c');
/// assert_eq!(vec_deque, &['a', 'b', 'c']);
///
/// vec_deque.insert(1, 'd');
/// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn insert(&mut self, index: usize, value: T) {
assert!(index <= self.len(), "index out of bounds");
self.grow_if_necessary();
// Move the least number of elements in the ring buffer and insert
// the given object
//
// At most len/2 - 1 elements will be moved. O(min(n, n-i))
//
// There are three main cases:
// Elements are contiguous
// - special case when tail is 0
// Elements are discontiguous and the insert is in the tail section
// Elements are discontiguous and the insert is in the head section
//
// For each of those there are two more cases:
// Insert is closer to tail
// Insert is closer to head
//
// Key: H - self.head
// T - self.tail
// o - Valid element
// I - Insertion element
// A - The element that should be after the insertion point
// M - Indicates element was moved
let idx = self.wrap_add(self.tail, index);
let distance_to_tail = index;
let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
(true, true, _) if index == 0 => {
// push_front
//
// T
// I H
// [A o o o o o o . . . . . . . . .]
//
// H T
// [A o o o o o o o . . . . . I]
//
self.tail = self.wrap_sub(self.tail, 1);
}
(true, true, _) => {
unsafe {
// contiguous, insert closer to tail:
//
// T I H
// [. . . o o A o o o o . . . . . .]
//
// T H
// [. . o o I A o o o o . . . . . .]
// M M
//
// contiguous, insert closer to tail and tail is 0:
//
//
// T I H
// [o o A o o o o . . . . . . . . .]
//
// H T
// [o I A o o o o o . . . . . . . o]
// M M
let new_tail = self.wrap_sub(self.tail, 1);
self.copy(new_tail, self.tail, 1);
// Already moved the tail, so we only copy `index - 1` elements.
self.copy(self.tail, self.tail + 1, index - 1);
self.tail = new_tail;
}
}
(true, false, _) => {
unsafe {
// contiguous, insert closer to head:
//
// T I H
// [. . . o o o o A o o . . . . . .]
//
// T H
// [. . . o o o o I A o o . . . . .]
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head = self.wrap_add(self.head, 1);
}
}
(false, true, true) => {
unsafe {
// discontiguous, insert closer to tail, tail section:
//
// H T I
// [o o o o o o . . . . . o o A o o]
//
// H T
// [o o o o o o . . . . o o I A o o]
// M M
self.copy(self.tail - 1, self.tail, index);
self.tail -= 1;
}
}
(false, false, true) => {
unsafe {
// discontiguous, insert closer to head, tail section:
//
// H T I
// [o o . . . . . . . o o o o o A o]
//
// H T
// [o o o . . . . . . o o o o o I A]
// M M M M
// copy elements up to new head
self.copy(1, 0, self.head);
// copy last element into empty spot at bottom of buffer
self.copy(0, self.cap() - 1, 1);
// move elements from idx to end forward not including ^ element
self.copy(idx + 1, idx, self.cap() - 1 - idx);
self.head += 1;
}
}
(false, true, false) if idx == 0 => {
unsafe {
// discontiguous, insert is closer to tail, head section,
// and is at index zero in the internal buffer:
//
// I H T
// [A o o o o o o o o o . . . o o o]
//
// H T
// [A o o o o o o o o o . . o o o I]
// M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap() - 1, 0, 1);
self.tail -= 1;
}
}
(false, true, false) => {
unsafe {
// discontiguous, insert closer to tail, head section:
//
// I H T
// [o o o A o o o o o o . . . o o o]
//
// H T
// [o o I A o o o o o o . . o o o o]
// M M M M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap() - 1, 0, 1);
// move elements from idx-1 to end forward not including ^ element
self.copy(0, 1, idx - 1);
self.tail -= 1;
}
}
(false, false, false) => {
unsafe {
// discontiguous, insert closer to head, head section:
//
// I H T
// [o o o o A o o . . . . . . o o o]
//
// H T
// [o o o o I A o o . . . . . o o o]
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head += 1;
}
}
}
// tail might've been changed so we need to recalculate
let new_idx = self.wrap_add(self.tail, index);
unsafe {
self.buffer_write(new_idx, value);
}
}
/// Removes and returns the element at `index` from the `VecDeque`.
/// Whichever end is closer to the removal point will be moved to make
/// room, and all the affected elements will be moved to new positions.
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.remove(1), Some(2));
/// assert_eq!(buf, [1, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, index: usize) -> Option<T> {
if self.is_empty() || self.len() <= index {
return None;
}
// There are three main cases:
// Elements are contiguous
// Elements are discontiguous and the removal is in the tail section
// Elements are discontiguous and the removal is in the head section
// - special case when elements are technically contiguous,
// but self.head = 0
//
// For each of those there are two more cases:
// Insert is closer to tail
// Insert is closer to head
//
// Key: H - self.head
// T - self.tail
// o - Valid element
// x - Element marked for removal
// R - Indicates element that is being removed
// M - Indicates element was moved
let idx = self.wrap_add(self.tail, index);
let elem = unsafe { Some(self.buffer_read(idx)) };
let distance_to_tail = index;
let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
(true, true, _) => {
unsafe {
// contiguous, remove closer to tail:
//
// T R H
// [. . . o o x o o o o . . . . . .]
//
// T H
// [. . . . o o o o o o . . . . . .]
// M M
self.copy(self.tail + 1, self.tail, index);
self.tail += 1;
}
}
(true, false, _) => {
unsafe {
// contiguous, remove closer to head:
//
// T R H
// [. . . o o o o x o o . . . . . .]
//
// T H
// [. . . o o o o o o . . . . . . .]
// M M
self.copy(idx, idx + 1, self.head - idx - 1);
self.head -= 1;
}
}
(false, true, true) => {
unsafe {
// discontiguous, remove closer to tail, tail section:
//
// H T R
// [o o o o o o . . . . . o o x o o]
//
// H T
// [o o o o o o . . . . . . o o o o]
// M M
self.copy(self.tail + 1, self.tail, index);
self.tail = self.wrap_add(self.tail, 1);
}
}
(false, false, false) => {
unsafe {
// discontiguous, remove closer to head, head section:
//
// R H T
// [o o o o x o o . . . . . . o o o]
//
// H T
// [o o o o o o . . . . . . . o o o]
// M M
self.copy(idx, idx + 1, self.head - idx - 1);
self.head -= 1;
}
}
(false, false, true) => {
unsafe {
// discontiguous, remove closer to head, tail section:
//
// H T R
// [o o o . . . . . . o o o o o x o]
//
// H T
// [o o . . . . . . . o o o o o o o]
// M M M M
//
// or quasi-discontiguous, remove next to head, tail section:
//
// H T R
// [. . . . . . . . . o o o o o x o]
//
// T H
// [. . . . . . . . . o o o o o o .]
// M
// draw in elements in the tail section
self.copy(idx, idx + 1, self.cap() - idx - 1);
// Prevents underflow.
if self.head != 0 {
// copy first element into empty spot
self.copy(self.cap() - 1, 0, 1);
// move elements in the head section backwards
self.copy(0, 1, self.head - 1);
}
self.head = self.wrap_sub(self.head, 1);
}
}
(false, true, false) => {
unsafe {
// discontiguous, remove closer to tail, head section:
//
// R H T
// [o o x o o o o o o o . . . o o o]
//
// H T
// [o o o o o o o o o o . . . . o o]
// M M M M M
// draw in elements up to idx
self.copy(1, 0, idx);
// copy last element into empty spot
self.copy(0, self.cap() - 1, 1);
// move elements from tail to end forward, excluding the last one
self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
self.tail = self.wrap_add(self.tail, 1);
}
}
}
return elem;
}
/// Splits the collection into two at the given index.
///
/// Returns a newly allocated `Self`. `self` contains elements `[0, at)`,
/// and the returned `Self` contains elements `[at, len)`.
///
/// Note that the capacity of `self` does not change.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if `at > len`
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect();
/// let buf2 = buf.split_off(1);
/// assert_eq!(buf, [1]);
/// assert_eq!(buf2, [2, 3]);
/// ```
#[inline]
#[stable(feature = "split_off", since = "1.4.0")]
pub fn split_off(&mut self, at: usize) -> Self {
let len = self.len();
assert!(at <= len, "`at` out of bounds");
let other_len = len - at;
let mut other = VecDeque::with_capacity(other_len);
unsafe {
let (first_half, second_half) = self.as_slices();
let first_len = first_half.len();
let second_len = second_half.len();
if at < first_len {
// `at` lies in the first half.
let amount_in_first = first_len - at;
ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
other.ptr(),
amount_in_first);
// just take all of the second half.
ptr::copy_nonoverlapping(second_half.as_ptr(),
other.ptr().offset(amount_in_first as isize),
second_len);
} else {
// `at` lies in the second half, need to factor in the elements we skipped
// in the first half.
let offset = at - first_len;
let amount_in_second = second_len - offset;
ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
other.ptr(),
amount_in_second);
}
}
// Cleanup where the ends of the buffers are
self.head = self.wrap_sub(self.head, other_len);
other.head = other.wrap_index(other_len);
other
}
/// Moves all the elements of `other` into `Self`, leaving `other` empty.
///
/// # Panics
///
/// Panics if the new number of elements in self overflows a `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1, 2].into_iter().collect();
/// let mut buf2: VecDeque<_> = vec![3, 4].into_iter().collect();
/// buf.append(&mut buf2);
/// assert_eq!(buf, [1, 2, 3, 4]);
/// assert_eq!(buf2, []);
/// ```
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
// naive impl
self.extend(other.drain(..));
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all elements `e` such that `f(&e)` returns false.
/// This method operates in place and preserves the order of the retained
/// elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.extend(1..5);
/// buf.retain(|&x| x%2 == 0);
/// assert_eq!(buf, [2, 4]);
/// ```
#[stable(feature = "vec_deque_retain", since = "1.4.0")]
pub fn retain<F>(&mut self, mut f: F)
where F: FnMut(&T) -> bool
{
let len = self.len();
let mut del = 0;
for i in 0..len {
if !f(&self[i]) {
del += 1;
} else if del > 0 {
self.swap(i - del, i);
}
}
if del > 0 {
self.truncate(len - del);
}
}
// This may panic or abort
#[inline]
fn grow_if_necessary(&mut self) {
if self.is_full() {
let old_cap = self.cap();
self.buf.double();
unsafe {
self.handle_cap_increase(old_cap);
}
debug_assert!(!self.is_full());
}
}
/// Returns a place for insertion at the back of the `VecDeque`.
///
/// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back),
/// but may be more efficient.
///
/// # Examples
///
/// ```
/// #![feature(collection_placement)]
/// #![feature(placement_in_syntax)]
///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.place_back() <- 3;
/// buf.place_back() <- 4;
/// assert_eq!(&buf, &[3, 4]);
/// ```
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
pub fn place_back(&mut self) -> PlaceBack<T> {
PlaceBack { vec_deque: self }
}
/// Returns a place for insertion at the front of the `VecDeque`.
///
/// Using this method with placement syntax is equivalent to [`push_front`](#method.push_front),
/// but may be more efficient.
///
/// # Examples
///
/// ```
/// #![feature(collection_placement)]
/// #![feature(placement_in_syntax)]
///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.place_front() <- 3;
/// buf.place_front() <- 4;
/// assert_eq!(&buf, &[4, 3]);
/// ```
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
pub fn place_front(&mut self) -> PlaceFront<T> {
PlaceFront { vec_deque: self }
}
}
impl<T: Clone> VecDeque<T> {
/// Modifies the `VecDeque` in-place so that `len()` is equal to new_len,
/// either by removing excess elements or by appending clones of `value` to the back.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(10);
/// buf.push_back(15);
/// assert_eq!(buf, [5, 10, 15]);
///
/// buf.resize(2, 0);
/// assert_eq!(buf, [5, 10]);
///
/// buf.resize(5, 20);
/// assert_eq!(buf, [5, 10, 20, 20, 20]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn resize(&mut self, new_len: usize, value: T) {
let len = self.len();
if new_len > len {
self.extend(repeat(value).take(new_len - len))
} else {
self.truncate(new_len);
}
}
}
/// Returns the index in the underlying buffer for a given logical element index.
#[inline]
fn wrap_index(index: usize, size: usize) -> usize {
// size is always a power of 2
debug_assert!(size.is_power_of_two());
index & (size - 1)
}
/// Returns the two slices that cover the `VecDeque`'s valid range
trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<'a, T> RingSlices for &'a [T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<'a, T> RingSlices for &'a mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
/// Calculate the number of elements left to be read in the buffer
#[inline]
fn count(tail: usize, head: usize, size: usize) -> usize {
// size is always a power of 2
(head.wrapping_sub(tail)) & (size - 1)
}
/// An iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`iter`]: struct.VecDeque.html#method.iter
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ring: &'a [T],
tail: usize,
head: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Iter")
.field(&self.ring)
.field(&self.tail)
.field(&self.head)
.finish()
}
}
// FIXME(#19839) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> {
Iter {
ring: self.ring,
tail: self.tail,
head: self.head,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(tail)) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = front.iter().fold(accum, &mut f);
back.iter().fold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(self.head)) }
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = back.iter().rfold(accum, &mut f);
front.iter().rfold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {
fn is_empty(&self) -> bool {
self.head == self.tail
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for Iter<'a, T> {}
/// A mutable iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter_mut`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.VecDeque.html#method.iter_mut
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ring: &'a mut [T],
tail: usize,
head: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IterMut")
.field(&self.ring)
.field(&self.tail)
.field(&self.head)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(tail);
Some(&mut *(elem as *mut _))
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = front.iter_mut().fold(accum, &mut f);
back.iter_mut().fold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(self.head);
Some(&mut *(elem as *mut _))
}
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = back.iter_mut().rfold(accum, &mut f);
front.iter_mut().rfold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {
fn is_empty(&self) -> bool {
self.head == self.tail
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for IterMut<'a, T> {}
/// An owning iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`into_iter`] method on [`VecDeque`][`VecDeque`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.VecDeque.html#method.into_iter
/// [`VecDeque`]: struct.VecDeque.html
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
inner: VecDeque<T>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IntoIter")
.field(&self.inner)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Iterator for IntoIter<T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.inner.pop_front()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.inner.len();
(len, Some(len))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> DoubleEndedIterator for IntoIter<T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.inner.pop_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {
fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<T> FusedIterator for IntoIter<T> {}
/// A draining iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`drain`]: struct.VecDeque.html#method.drain
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, T: 'a> {
after_tail: usize,
after_head: usize,
iter: Iter<'a, T>,
deque: Shared<VecDeque<T>>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Drain<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
.field(&self.iter)
.finish()
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Send> Send for Drain<'a, T> {}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Drop for Drain<'a, T> {
fn drop(&mut self) {
for _ in self.by_ref() {}
let source_deque = unsafe { self.deque.as_mut() };
// T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
//
// T t h H
// [. . . o o x x o o . . .]
//
let orig_tail = source_deque.tail;
let drain_tail = source_deque.head;
let drain_head = self.after_tail;
let orig_head = self.after_head;
let tail_len = count(orig_tail, drain_tail, source_deque.cap());
let head_len = count(drain_head, orig_head, source_deque.cap());
// Restore the original head value
source_deque.head = orig_head;
match (tail_len, head_len) {
(0, 0) => {
source_deque.head = 0;
source_deque.tail = 0;
}
(0, _) => {
source_deque.tail = drain_head;
}
(_, 0) => {
source_deque.head = drain_tail;
}
_ => unsafe {
if tail_len <= head_len {
source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
} else {
source_deque.head = source_deque.wrap_add(drain_tail, head_len);
source_deque.wrap_copy(drain_tail, drain_head, head_len);
}
},
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Iterator for Drain<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.iter.next().map(|elt| unsafe { ptr::read(elt) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialEq> PartialEq for VecDeque<A> {
fn eq(&self, other: &VecDeque<A>) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other.as_slices();
if sa.len() == oa.len() {
sa == oa && sb == ob
} else if sa.len() < oa.len() {
// Always divisible in three sections, for example:
// self: [a b c|d e f]
// other: [0 1 2 3|4 5]
// front = 3, mid = 1,
// [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5]
let front = sa.len();
let mid = oa.len() - front;
let (oa_front, oa_mid) = oa.split_at(front);
let (sb_mid, sb_back) = sb.split_at(mid);
debug_assert_eq!(sa.len(), oa_front.len());
debug_assert_eq!(sb_mid.len(), oa_mid.len());
debug_assert_eq!(sb_back.len(), ob.len());
sa == oa_front && sb_mid == oa_mid && sb_back == ob
} else {
let front = oa.len();
let mid = sa.len() - front;
let (sa_front, sa_mid) = sa.split_at(front);
let (ob_mid, ob_back) = ob.split_at(mid);
debug_assert_eq!(sa_front.len(), oa.len());
debug_assert_eq!(sa_mid.len(), ob_mid.len());
debug_assert_eq!(sb.len(), ob_back.len());
sa_front == oa && sa_mid == ob_mid && sb == ob_back
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Eq> Eq for VecDeque<A> {}
macro_rules! __impl_slice_eq1 {
($Lhs: ty, $Rhs: ty) => {
__impl_slice_eq1! { $Lhs, $Rhs, Sized }
};
($Lhs: ty, $Rhs: ty, $Bound: ident) => {
#[stable(feature = "vec-deque-partial-eq-slice", since = "1.17.0")]
impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> {
fn eq(&self, other: &$Rhs) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other[..].split_at(sa.len());
sa == oa && sb == ob
}
}
}
}
__impl_slice_eq1! { VecDeque<A>, Vec<B> }
__impl_slice_eq1! { VecDeque<A>, &'b [B] }
__impl_slice_eq1! { VecDeque<A>, &'b mut [B] }
macro_rules! array_impls {
($($N: expr)+) => {
$(
__impl_slice_eq1! { VecDeque<A>, [B; $N] }
__impl_slice_eq1! { VecDeque<A>, &'b [B; $N] }
__impl_slice_eq1! { VecDeque<A>, &'b mut [B; $N] }
)+
}
}
array_impls! {
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialOrd> PartialOrd for VecDeque<A> {
fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Ord> Ord for VecDeque<A> {
#[inline]
fn cmp(&self, other: &VecDeque<A>) -> Ordering {
self.iter().cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Hash> Hash for VecDeque<A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.len().hash(state);
let (a, b) = self.as_slices();
Hash::hash_slice(a, state);
Hash::hash_slice(b, state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Index<usize> for VecDeque<A> {
type Output = A;
#[inline]
fn index(&self, index: usize) -> &A {
self.get(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> IndexMut<usize> for VecDeque<A> {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut A {
self.get_mut(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> FromIterator<A> for VecDeque<A> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> VecDeque<A> {
let iterator = iter.into_iter();
let (lower, _) = iterator.size_hint();
let mut deq = VecDeque::with_capacity(lower);
deq.extend(iterator);
deq
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for VecDeque<T> {
type Item = T;
type IntoIter = IntoIter<T>;
/// Consumes the list into a front-to-back iterator yielding elements by
/// value.
fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a VecDeque<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut VecDeque<T> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Extend<A> for VecDeque<A> {
fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) {
for elt in iter {
self.push_back(elt);
}
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for VecDeque<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}
#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
impl<T> From<Vec<T>> for VecDeque<T> {
fn from(mut other: Vec<T>) -> Self {
unsafe {
let other_buf = other.as_mut_ptr();
let mut buf = RawVec::from_raw_parts(other_buf, other.capacity());
let len = other.len();
mem::forget(other);
// We need to extend the buf if it's not a power of two, too small
// or doesn't have at least one free space
if !buf.cap().is_power_of_two() || (buf.cap() < (MINIMUM_CAPACITY + 1)) ||
(buf.cap() == len) {
let cap = cmp::max(buf.cap() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
buf.reserve_exact(len, cap - len);
}
VecDeque {
tail: 0,
head: len,
buf,
}
}
}
}
#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
impl<T> From<VecDeque<T>> for Vec<T> {
fn from(other: VecDeque<T>) -> Self {
unsafe {
let buf = other.buf.ptr();
let len = other.len();
let tail = other.tail;
let head = other.head;
let cap = other.cap();
// Need to move the ring to the front of the buffer, as vec will expect this.
if other.is_contiguous() {
ptr::copy(buf.offset(tail as isize), buf, len);
} else {
if (tail - head) >= cmp::min((cap - tail), head) {
// There is enough free space in the centre for the shortest block so we can
// do this in at most three copy moves.
if (cap - tail) > head {
// right hand block is the long one; move that enough for the left
ptr::copy(buf.offset(tail as isize),
buf.offset((tail - head) as isize),
cap - tail);
// copy left in the end
ptr::copy(buf, buf.offset((cap - head) as isize), head);
// shift the new thing to the start
ptr::copy(buf.offset((tail - head) as isize), buf, len);
} else {
// left hand block is the long one, we can do it in two!
ptr::copy(buf, buf.offset((cap - tail) as isize), head);
ptr::copy(buf.offset(tail as isize), buf, cap - tail);
}
} else {
// Need to use N swaps to move the ring
// We can use the space at the end of the ring as a temp store
let mut left_edge: usize = 0;
let mut right_edge: usize = tail;
// The general problem looks like this
// GHIJKLM...ABCDEF - before any swaps
// ABCDEFM...GHIJKL - after 1 pass of swaps
// ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
// - then restart the algorithm with a new (smaller) store
// Sometimes the temp store is reached when the right edge is at the end
// of the buffer - this means we've hit the right order with fewer swaps!
// E.g
// EF..ABCD
// ABCDEF.. - after four only swaps we've finished
while left_edge < len && right_edge != cap {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
let src: isize = (right_edge + right_offset) as isize;
ptr::swap(buf.offset(i as isize), buf.offset(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
right_edge += right_offset + 1;
}
}
}
let out = Vec::from_raw_parts(buf, len, cap);
mem::forget(other);
out
}
}
}
/// A place for insertion at the back of a `VecDeque`.
///
/// See [`VecDeque::place_back`](struct.VecDeque.html#method.place_back) for details.
#[must_use = "places do nothing unless written to with `<-` syntax"]
#[unstable(feature = "collection_placement",
reason = "struct name and placement protocol are subject to change",
issue = "30172")]
#[derive(Debug)]
pub struct PlaceBack<'a, T: 'a> {
vec_deque: &'a mut VecDeque<T>,
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Placer<T> for PlaceBack<'a, T> {
type Place = PlaceBack<'a, T>;
fn make_place(self) -> Self {
self.vec_deque.grow_if_necessary();
self
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Place<T> for PlaceBack<'a, T> {
fn pointer(&mut self) -> *mut T {
unsafe { self.vec_deque.ptr().offset(self.vec_deque.head as isize) }
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> InPlace<T> for PlaceBack<'a, T> {
type Owner = &'a mut T;
unsafe fn finalize(self) -> &'a mut T {
let head = self.vec_deque.head;
self.vec_deque.head = self.vec_deque.wrap_add(head, 1);
&mut *(self.vec_deque.ptr().offset(head as isize))
}
}
/// A place for insertion at the front of a `VecDeque`.
///
/// See [`VecDeque::place_front`](struct.VecDeque.html#method.place_front) for details.
#[must_use = "places do nothing unless written to with `<-` syntax"]
#[unstable(feature = "collection_placement",
reason = "struct name and placement protocol are subject to change",
issue = "30172")]
#[derive(Debug)]
pub struct PlaceFront<'a, T: 'a> {
vec_deque: &'a mut VecDeque<T>,
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Placer<T> for PlaceFront<'a, T> {
type Place = PlaceFront<'a, T>;
fn make_place(self) -> Self {
self.vec_deque.grow_if_necessary();
self
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Place<T> for PlaceFront<'a, T> {
fn pointer(&mut self) -> *mut T {
let tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1);
unsafe { self.vec_deque.ptr().offset(tail as isize) }
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> InPlace<T> for PlaceFront<'a, T> {
type Owner = &'a mut T;
unsafe fn finalize(self) -> &'a mut T {
self.vec_deque.tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1);
&mut *(self.vec_deque.ptr().offset(self.vec_deque.tail as isize))
}
}
#[cfg(test)]
mod tests {
use test;
use super::VecDeque;
#[bench]
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_back(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_front(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
fn bench_pop_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
})
}
#[bench]
fn bench_pop_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
})
}
#[test]
fn test_swap_front_back_remove() {
fn test(back: bool) {
// This test checks that every single combination of tail position and length is tested.
// Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
let usable_cap = tester.capacity();
let final_len = usable_cap / 2;
for len in 0..final_len {
let expected: VecDeque<_> = if back {
(0..len).collect()
} else {
(0..len).rev().collect()
};
for tail_pos in 0..usable_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
if back {
for i in 0..len * 2 {
tester.push_front(i);
}
for i in 0..len {
assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
}
} else {
for i in 0..len * 2 {
tester.push_back(i);
}
for i in 0..len {
let idx = tester.len() - 1 - i;
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
test(true);
test(false);
}
#[test]
fn test_insert() {
// This test checks that every single combination of tail position, length, and
// insertion position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* insertion
for len in 1..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_insert in 0..len {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_remove() {
// This test checks that every single combination of tail position, length, and
// removal position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* removal
for len in 0..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_remove in 0..len + 1 {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
}
tester.push_back(i);
}
if to_remove == len {
tester.push_back(1234);
}
tester.remove(to_remove);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_drain() {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
for len in 0..cap + 1 {
for tail in 0..cap + 1 {
for drain_start in 0..len + 1 {
for drain_end in drain_start..len + 1 {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
tester.push_back(i);
}
// Check that we drain the correct values
let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
assert_eq!(drained, drained_expected);
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start)
.chain(drain_end..len)
.collect();
assert_eq!(expected, tester);
}
}
}
}
}
#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
// is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
tester.reserve(63);
let max_cap = tester.capacity();
for len in 0..cap + 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..max_cap + 1 {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
#[test]
fn test_split_off() {
// This test checks that every single combination of tail position, length, and
// split position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *before* splitting
for len in 0..cap {
// index to split at
for at in 0..len + 1 {
// 0, 1, 2, .., at - 1 (may be empty)
let expected_self = (0..).take(at).collect::<VecDeque<_>>();
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(result.tail < result.cap());
assert!(result.head < result.cap());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
}
}
}
#[test]
fn test_from_vec() {
use super::super::vec::Vec;
for cap in 0..35 {
for len in 0..cap + 1 {
let mut vec = Vec::with_capacity(cap);
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
}
#[test]
fn test_vec_from_vecdeque() {
use super::super::vec::Vec;
fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) {
let mut vd = VecDeque::with_capacity(cap);
for _ in 0..offset {
vd.push_back(0);
vd.pop_front();
}
vd.extend(0..len);
let vec: Vec<_> = Vec::from(vd.clone());
assert_eq!(vec.len(), vd.len());
assert!(vec.into_iter().eq(vd));
}
for cap_pwr in 0..7 {
// Make capacity as a (2^x)-1, so that the ring size is 2^x
let cap = (2i32.pow(cap_pwr) - 1) as usize;
// In these cases there is enough free space to solve it with copies
for len in 0..((cap + 1) / 2) {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
// Now there's not (necessarily) space to straighten the ring with simple copies,
// the ring will use swapping when:
// (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
// right block size > free space && left block size > free space
for len in ((cap + 1) / 2)..cap {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
}
}
}
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! A double-ended queue implemented with a growable ring buffer.
//!
//! This queue has `O(1)` amortized inserts and removals from both ends of the
//! container. It also has `O(1)` indexing like a vector. The contained elements
//! are not required to be copyable, and the queue will be sendable if the
//! contained type is sendable.
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::Ordering;
use core::fmt;
use core::iter::{repeat, FromIterator, FusedIterator};
use core::mem;
use core::ops::{Index, IndexMut, Place, Placer, InPlace};
use core::ptr;
use core::ptr::Shared;
use core::slice;
use core::hash::{Hash, Hasher};
use core::cmp;
use raw_vec::RawVec;
use super::range::RangeArgument;
use Bound::{Excluded, Included, Unbounded};
use super::vec::Vec;
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1
#[cfg(target_pointer_width = "32")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (32 - 1); // Largest possible power of two
#[cfg(target_pointer_width = "64")]
const MAXIMUM_ZST_CAPACITY: usize = 1 << (64 - 1); // Largest possible power of two
/// A double-ended queue implemented with a growable ring buffer.
///
/// The "default" usage of this type as a queue is to use [`push_back`] to add to
/// the queue, and [`pop_front`] to remove from the queue. [`extend`] and [`append`]
/// push onto the back in this manner, and iterating over `VecDeque` goes front
/// to back.
///
/// [`push_back`]: #method.push_back
/// [`pop_front`]: #method.pop_front
/// [`extend`]: #method.extend
/// [`append`]: #method.append
#[stable(feature = "rust1", since = "1.0.0")]
pub struct VecDeque<T> {
// tail and head are pointers into the buffer. Tail always points
// to the first element that could be read, Head always points
// to where data should be written.
// If tail == head the buffer is empty. The length of the ringbuffer
// is defined as the distance between the two.
tail: usize,
head: usize,
buf: RawVec<T>,
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Clone> Clone for VecDeque<T> {
fn clone(&self) -> VecDeque<T> {
self.iter().cloned().collect()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T> Drop for VecDeque<T> {
fn drop(&mut self) {
let (front, back) = self.as_mut_slices();
unsafe {
// use drop for [T]
ptr::drop_in_place(front);
ptr::drop_in_place(back);
}
// RawVec handles deallocation
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Default for VecDeque<T> {
/// Creates an empty `VecDeque<T>`.
#[inline]
fn default() -> VecDeque<T> {
VecDeque::new()
}
}
impl<T> VecDeque<T> {
/// Marginally more convenient
#[inline]
fn ptr(&self) -> *mut T {
self.buf.ptr()
}
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 {
// For zero sized types, we are always at maximum capacity
MAXIMUM_ZST_CAPACITY
} else {
self.buf.cap()
}
}
/// Turn ptr into a slice
#[inline]
unsafe fn buffer_as_slice(&self) -> &[T] {
slice::from_raw_parts(self.ptr(), self.cap())
}
/// Turn ptr into a mut slice
#[inline]
unsafe fn buffer_as_mut_slice(&mut self) -> &mut [T] {
slice::from_raw_parts_mut(self.ptr(), self.cap())
}
/// Moves an element out of the buffer
#[inline]
unsafe fn buffer_read(&mut self, off: usize) -> T {
ptr::read(self.ptr().offset(off as isize))
}
/// Writes an element into the buffer, moving it.
#[inline]
unsafe fn buffer_write(&mut self, off: usize, value: T) {
ptr::write(self.ptr().offset(off as isize), value);
}
/// Returns `true` if and only if the buffer is at full capacity.
#[inline]
fn is_full(&self) -> bool {
self.cap() - self.len() == 1
}
/// Returns the index in the underlying buffer for a given logical element
/// index.
#[inline]
fn wrap_index(&self, idx: usize) -> usize {
wrap_index(idx, self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index + addend.
#[inline]
fn wrap_add(&self, idx: usize, addend: usize) -> usize {
wrap_index(idx.wrapping_add(addend), self.cap())
}
/// Returns the index in the underlying buffer for a given logical element
/// index - subtrahend.
#[inline]
fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
wrap_index(idx.wrapping_sub(subtrahend), self.cap())
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
debug_assert!(src + len <= self.cap(),
"cpy dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
ptr::copy(self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a contiguous block of memory len long from src to dst
#[inline]
unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
debug_assert!(dst + len <= self.cap(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
debug_assert!(src + len <= self.cap(),
"cno dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
ptr::copy_nonoverlapping(self.ptr().offset(src as isize),
self.ptr().offset(dst as isize),
len);
}
/// Copies a potentially wrapping block of memory len long from src to dest.
/// (abs(dst - src) + len) must be no larger than cap() (There must be at
/// most one continuous overlapping region between src and dest).
unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
#[allow(dead_code)]
fn diff(a: usize, b: usize) -> usize {
if a <= b { b - a } else { a - b }
}
debug_assert!(cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
"wrc dst={} src={} len={} cap={}",
dst,
src,
len,
self.cap());
if src == dst || len == 0 {
return;
}
let dst_after_src = self.wrap_sub(dst, src) < len;
let src_pre_wrap_len = self.cap() - src;
let dst_pre_wrap_len = self.cap() - dst;
let src_wraps = src_pre_wrap_len < len;
let dst_wraps = dst_pre_wrap_len < len;
match (dst_after_src, src_wraps, dst_wraps) {
(_, false, false) => {
// src doesn't wrap, dst doesn't wrap
//
// S . . .
// 1 [_ _ A A B B C C _]
// 2 [_ _ A A A A B B _]
// D . . .
//
self.copy(dst, src, len);
}
(false, false, true) => {
// dst before src, src doesn't wrap, dst wraps
//
// S . . .
// 1 [A A B B _ _ _ C C]
// 2 [A A B B _ _ _ A A]
// 3 [B B B B _ _ _ A A]
// . . D .
//
self.copy(dst, src, dst_pre_wrap_len);
self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
}
(true, false, true) => {
// src before dst, src doesn't wrap, dst wraps
//
// S . . .
// 1 [C C _ _ _ A A B B]
// 2 [B B _ _ _ A A B B]
// 3 [B B _ _ _ A A A A]
// . . D .
//
self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
self.copy(dst, src, dst_pre_wrap_len);
}
(false, true, false) => {
// dst before src, src wraps, dst doesn't wrap
//
// . . S .
// 1 [C C _ _ _ A A B B]
// 2 [C C _ _ _ B B B B]
// 3 [C C _ _ _ B B C C]
// D . . .
//
self.copy(dst, src, src_pre_wrap_len);
self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
}
(true, true, false) => {
// src before dst, src wraps, dst doesn't wrap
//
// . . S .
// 1 [A A B B _ _ _ C C]
// 2 [A A A A _ _ _ C C]
// 3 [C C A A _ _ _ C C]
// D . . .
//
self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
self.copy(dst, src, src_pre_wrap_len);
}
(false, true, true) => {
// dst before src, src wraps, dst wraps
//
// . . . S .
// 1 [A B C D _ E F G H]
// 2 [A B C D _ E G H H]
// 3 [A B C D _ E G H A]
// 4 [B C C D _ E G H A]
// . . D . .
//
debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
let delta = dst_pre_wrap_len - src_pre_wrap_len;
self.copy(dst, src, src_pre_wrap_len);
self.copy(dst + src_pre_wrap_len, 0, delta);
self.copy(0, delta, len - dst_pre_wrap_len);
}
(true, true, true) => {
// src before dst, src wraps, dst wraps
//
// . . S . .
// 1 [A B C D _ E F G H]
// 2 [A A B D _ E F G H]
// 3 [H A B D _ E F G H]
// 4 [H A B D _ E F F G]
// . . . D .
//
debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
let delta = src_pre_wrap_len - dst_pre_wrap_len;
self.copy(delta, 0, len - src_pre_wrap_len);
self.copy(0, self.cap() - delta, delta);
self.copy(dst, src, dst_pre_wrap_len);
}
}
}
/// Frobs the head and tail sections around to handle the fact that we
/// just reallocated. Unsafe because it trusts old_cap.
#[inline]
unsafe fn handle_cap_increase(&mut self, old_cap: usize) {
let new_cap = self.cap();
// Move the shortest contiguous section of the ring buffer
// T H
// [o o o o o o o . ]
// T H
// A [o o o o o o o . . . . . . . . . ]
// H T
// [o o . o o o o o ]
// T H
// B [. . . o o o o o o o . . . . . . ]
// H T
// [o o o o o . o o ]
// H T
// C [o o o o o . . . . . . . . . o o ]
if self.tail <= self.head {
// A
// Nop
} else if self.head < old_cap - self.tail {
// B
self.copy_nonoverlapping(old_cap, 0, self.head);
self.head += old_cap;
debug_assert!(self.head > self.tail);
} else {
// C
let new_tail = new_cap - (old_cap - self.tail);
self.copy_nonoverlapping(new_tail, self.tail, old_cap - self.tail);
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
impl<T> VecDeque<T> {
/// Creates an empty `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let vector: VecDeque<u32> = VecDeque::new();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn new() -> VecDeque<T> {
VecDeque::with_capacity(INITIAL_CAPACITY)
}
/// Creates an empty `VecDeque` with space for at least `n` elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let vector: VecDeque<u32> = VecDeque::with_capacity(10);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn with_capacity(n: usize) -> VecDeque<T> {
// +1 since the ringbuffer always leaves one space empty
let cap = cmp::max(n + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
assert!(cap > n, "capacity overflow");
VecDeque {
tail: 0,
head: 0,
buf: RawVec::with_capacity(cap),
}
}
/// Retrieves an element in the `VecDeque` by index.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// assert_eq!(buf.get(1), Some(&4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get(&self, index: usize) -> Option<&T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
unsafe { Some(&*self.ptr().offset(idx as isize)) }
} else {
None
}
}
/// Retrieves an element in the `VecDeque` mutably by index.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// if let Some(elem) = buf.get_mut(1) {
/// *elem = 7;
/// }
///
/// assert_eq!(buf[1], 7);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
if index < self.len() {
let idx = self.wrap_add(self.tail, index);
unsafe { Some(&mut *self.ptr().offset(idx as isize)) }
} else {
None
}
}
/// Swaps elements at indices `i` and `j`.
///
/// `i` and `j` may be equal.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if either index is out of bounds.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
/// assert_eq!(buf, [3, 4, 5]);
/// buf.swap(0, 2);
/// assert_eq!(buf, [5, 4, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap(&mut self, i: usize, j: usize) {
assert!(i < self.len());
assert!(j < self.len());
let ri = self.wrap_add(self.tail, i);
let rj = self.wrap_add(self.tail, j);
unsafe {
ptr::swap(self.ptr().offset(ri as isize),
self.ptr().offset(rj as isize))
}
}
/// Returns the number of elements the `VecDeque` can hold without
/// reallocating.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let buf: VecDeque<i32> = VecDeque::with_capacity(10);
/// assert!(buf.capacity() >= 10);
/// ```
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn capacity(&self) -> usize {
self.cap() - 1
}
/// Reserves the minimum capacity for exactly `additional` more elements to be inserted in the
/// given `VecDeque`. Does nothing if the capacity is already sufficient.
///
/// Note that the allocator may give the collection more space than it requests. Therefore
/// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
/// insertions are expected.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<i32> = vec![1].into_iter().collect();
/// buf.reserve_exact(10);
/// assert!(buf.capacity() >= 11);
/// ```
///
/// [`reserve`]: #method.reserve
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve_exact(&mut self, additional: usize) {
self.reserve(additional);
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
/// `VecDeque`. The collection may reserve more space to avoid frequent reallocations.
///
/// # Panics
///
/// Panics if the new capacity overflows `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<i32> = vec![1].into_iter().collect();
/// buf.reserve(10);
/// assert!(buf.capacity() >= 11);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn reserve(&mut self, additional: usize) {
let old_cap = self.cap();
let used_cap = self.len() + 1;
let new_cap = used_cap.checked_add(additional)
.and_then(|needed_cap| needed_cap.checked_next_power_of_two())
.expect("capacity overflow");
if new_cap > old_cap {
self.buf.reserve_exact(used_cap, new_cap - used_cap);
unsafe {
self.handle_cap_increase(old_cap);
}
}
}
/// Shrinks the capacity of the `VecDeque` as much as possible.
///
/// It will drop down as close as possible to the length but the allocator may still inform the
/// `VecDeque` that there is space for a few more elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::with_capacity(15);
/// buf.extend(0..4);
/// assert_eq!(buf.capacity(), 15);
/// buf.shrink_to_fit();
/// assert!(buf.capacity() >= 4);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn shrink_to_fit(&mut self) {
// +1 since the ringbuffer always leaves one space empty
// len + 1 can't overflow for an existing, well-formed ringbuffer.
let target_cap = cmp::max(self.len() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
if target_cap < self.cap() {
// There are three cases of interest:
// All elements are out of desired bounds
// Elements are contiguous, and head is out of desired bounds
// Elements are discontiguous, and tail is out of desired bounds
//
// At all other times, element positions are unaffected.
//
// Indicates that elements at the head should be moved.
let head_outside = self.head == 0 || self.head >= target_cap;
// Move elements from out of desired bounds (positions after target_cap)
if self.tail >= target_cap && head_outside {
// T H
// [. . . . . . . . o o o o o o o . ]
// T H
// [o o o o o o o . ]
unsafe {
self.copy_nonoverlapping(0, self.tail, self.len());
}
self.head = self.len();
self.tail = 0;
} else if self.tail != 0 && self.tail < target_cap && head_outside {
// T H
// [. . . o o o o o o o . . . . . . ]
// H T
// [o o . o o o o o ]
let len = self.wrap_sub(self.head, target_cap);
unsafe {
self.copy_nonoverlapping(0, target_cap, len);
}
self.head = len;
debug_assert!(self.head < self.tail);
} else if self.tail >= target_cap {
// H T
// [o o o o o . . . . . . . . . o o ]
// H T
// [o o o o o . o o ]
debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
let len = self.cap() - self.tail;
let new_tail = target_cap - len;
unsafe {
self.copy_nonoverlapping(new_tail, self.tail, len);
}
self.tail = new_tail;
debug_assert!(self.head < self.tail);
}
self.buf.shrink_to_fit(target_cap);
debug_assert!(self.head < self.cap());
debug_assert!(self.tail < self.cap());
debug_assert!(self.cap().count_ones() == 1);
}
}
/// Shortens the `VecDeque`, dropping excess elements from the back.
///
/// If `len` is greater than the `VecDeque`'s current length, this has no
/// effect.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(10);
/// buf.push_back(15);
/// assert_eq!(buf, [5, 10, 15]);
/// buf.truncate(1);
/// assert_eq!(buf, [5]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn truncate(&mut self, len: usize) {
for _ in len..self.len() {
self.pop_back();
}
}
/// Returns a front-to-back iterator.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(3);
/// buf.push_back(4);
/// let b: &[_] = &[&5, &3, &4];
/// let c: Vec<&i32> = buf.iter().collect();
/// assert_eq!(&c[..], b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> Iter<T> {
Iter {
tail: self.tail,
head: self.head,
ring: unsafe { self.buffer_as_slice() },
}
}
/// Returns a front-to-back iterator that returns mutable references.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(3);
/// buf.push_back(4);
/// for num in buf.iter_mut() {
/// *num = *num - 2;
/// }
/// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
/// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter_mut(&mut self) -> IterMut<T> {
IterMut {
tail: self.tail,
head: self.head,
ring: unsafe { self.buffer_as_mut_slice() },
}
}
/// Returns a pair of slices which contain, in order, the contents of the
/// `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
/// vector.push_back(2);
///
/// assert_eq!(vector.as_slices(), (&[0, 1, 2][..], &[][..]));
///
/// vector.push_front(10);
/// vector.push_front(9);
///
/// assert_eq!(vector.as_slices(), (&[9, 10][..], &[0, 1, 2][..]));
/// ```
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
unsafe {
let buf = self.buffer_as_slice();
RingSlices::ring_slices(buf, self.head, self.tail)
}
}
/// Returns a pair of slices which contain, in order, the contents of the
/// `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
///
/// vector.push_front(10);
/// vector.push_front(9);
///
/// vector.as_mut_slices().0[0] = 42;
/// vector.as_mut_slices().1[0] = 24;
/// assert_eq!(vector.as_slices(), (&[42, 10][..], &[24, 1][..]));
/// ```
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
unsafe {
let head = self.head;
let tail = self.tail;
let buf = self.buffer_as_mut_slice();
RingSlices::ring_slices(buf, head, tail)
}
}
/// Returns the number of elements in the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert_eq!(v.len(), 0);
/// v.push_back(1);
/// assert_eq!(v.len(), 1);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn len(&self) -> usize {
count(self.tail, self.head, self.cap())
}
/// Returns `true` if the `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// assert!(v.is_empty());
/// v.push_front(1);
/// assert!(!v.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn is_empty(&self) -> bool {
self.tail == self.head
}
/// Create a draining iterator that removes the specified range in the
/// `VecDeque` and yields the removed items.
///
/// Note 1: The element range is removed even if the iterator is not
/// consumed until the end.
///
/// Note 2: It is unspecified how many elements are removed from the deque,
/// if the `Drain` value is not dropped, but the borrow it holds expires
/// (eg. due to mem::forget).
///
/// # Panics
///
/// Panics if the starting point is greater than the end point or if
/// the end point is greater than the length of the vector.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v: VecDeque<_> = vec![1, 2, 3].into_iter().collect();
/// let drained = v.drain(2..).collect::<VecDeque<_>>();
/// assert_eq!(drained, [3]);
/// assert_eq!(v, [1, 2]);
///
/// // A full range clears all contents
/// v.drain(..);
/// assert!(v.is_empty());
/// ```
#[inline]
#[stable(feature = "drain", since = "1.6.0")]
pub fn drain<R>(&mut self, range: R) -> Drain<T>
where R: RangeArgument<usize>
{
// Memory safety
//
// When the Drain is first created, the source deque is shortened to
// make sure no uninitialized or moved-from elements are accessible at
// all if the Drain's destructor never gets to run.
//
// Drain will ptr::read out the values to remove.
// When finished, the remaining data will be copied back to cover the hole,
// and the head/tail values will be restored correctly.
//
let len = self.len();
let start = match range.start() {
Included(&n) => n,
Excluded(&n) => n + 1,
Unbounded => 0,
};
let end = match range.end() {
Included(&n) => n + 1,
Excluded(&n) => n,
Unbounded => len,
};
assert!(start <= end, "drain lower bound was too large");
assert!(end <= len, "drain upper bound was too large");
// The deque's elements are parted into three segments:
// * self.tail -> drain_tail
// * drain_tail -> drain_head
// * drain_head -> self.head
//
// T = self.tail; H = self.head; t = drain_tail; h = drain_head
//
// We store drain_tail as self.head, and drain_head and self.head as
// after_tail and after_head respectively on the Drain. This also
// truncates the effective array such that if the Drain is leaked, we
// have forgotten about the potentially moved values after the start of
// the drain.
//
// T t h H
// [. . . o o x x o o . . .]
//
let drain_tail = self.wrap_add(self.tail, start);
let drain_head = self.wrap_add(self.tail, end);
let head = self.head;
// "forget" about the values after the start of the drain until after
// the drain is complete and the Drain destructor is run.
self.head = drain_tail;
Drain {
deque: Shared::from(&mut *self),
after_tail: drain_head,
after_head: head,
iter: Iter {
tail: drain_tail,
head: drain_head,
ring: unsafe { self.buffer_as_mut_slice() },
},
}
}
/// Clears the buffer, removing all values.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut v = VecDeque::new();
/// v.push_back(1);
/// v.clear();
/// assert!(v.is_empty());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn clear(&mut self) {
self.drain(..);
}
/// Returns `true` if the `VecDeque` contains an element equal to the
/// given value.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vector: VecDeque<u32> = VecDeque::new();
///
/// vector.push_back(0);
/// vector.push_back(1);
///
/// assert_eq!(vector.contains(&1), true);
/// assert_eq!(vector.contains(&10), false);
/// ```
#[stable(feature = "vec_deque_contains", since = "1.12.0")]
pub fn contains(&self, x: &T) -> bool
where T: PartialEq<T>
{
let (a, b) = self.as_slices();
a.contains(x) || b.contains(x)
}
/// Provides a reference to the front element, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.front(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// assert_eq!(d.front(), Some(&1));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front(&self) -> Option<&T> {
if !self.is_empty() {
Some(&self[0])
} else {
None
}
}
/// Provides a mutable reference to the front element, or `None` if the
/// `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.front_mut(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// match d.front_mut() {
/// Some(x) => *x = 9,
/// None => (),
/// }
/// assert_eq!(d.front(), Some(&9));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn front_mut(&mut self) -> Option<&mut T> {
if !self.is_empty() {
Some(&mut self[0])
} else {
None
}
}
/// Provides a reference to the back element, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.back(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// assert_eq!(d.back(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back(&self) -> Option<&T> {
if !self.is_empty() {
Some(&self[self.len() - 1])
} else {
None
}
}
/// Provides a mutable reference to the back element, or `None` if the
/// `VecDeque` is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// assert_eq!(d.back(), None);
///
/// d.push_back(1);
/// d.push_back(2);
/// match d.back_mut() {
/// Some(x) => *x = 9,
/// None => (),
/// }
/// assert_eq!(d.back(), Some(&9));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn back_mut(&mut self) -> Option<&mut T> {
let len = self.len();
if !self.is_empty() {
Some(&mut self[len - 1])
} else {
None
}
}
/// Removes the first element and returns it, or `None` if the `VecDeque` is
/// empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// d.push_back(1);
/// d.push_back(2);
///
/// assert_eq!(d.pop_front(), Some(1));
/// assert_eq!(d.pop_front(), Some(2));
/// assert_eq!(d.pop_front(), None);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop_front(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
let tail = self.tail;
self.tail = self.wrap_add(self.tail, 1);
unsafe { Some(self.buffer_read(tail)) }
}
}
/// Prepends an element to the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut d = VecDeque::new();
/// d.push_front(1);
/// d.push_front(2);
/// assert_eq!(d.front(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_front(&mut self, value: T) {
self.grow_if_necessary();
self.tail = self.wrap_sub(self.tail, 1);
let tail = self.tail;
unsafe {
self.buffer_write(tail, value);
}
}
/// Appends an element to the back of the `VecDeque`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(1);
/// buf.push_back(3);
/// assert_eq!(3, *buf.back().unwrap());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn push_back(&mut self, value: T) {
self.grow_if_necessary();
let head = self.head;
self.head = self.wrap_add(self.head, 1);
unsafe { self.buffer_write(head, value) }
}
/// Removes the last element from the `VecDeque` and returns it, or `None` if
/// it is empty.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.pop_back(), None);
/// buf.push_back(1);
/// buf.push_back(3);
/// assert_eq!(buf.pop_back(), Some(3));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn pop_back(&mut self) -> Option<T> {
if self.is_empty() {
None
} else {
self.head = self.wrap_sub(self.head, 1);
let head = self.head;
unsafe { Some(self.buffer_read(head)) }
}
}
#[inline]
fn is_contiguous(&self) -> bool {
self.tail <= self.head
}
/// Removes an element from anywhere in the `VecDeque` and returns it, replacing it with the
/// last element.
///
/// This does not preserve ordering, but is O(1).
///
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_remove_back(0), None);
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.swap_remove_back(0), Some(1));
/// assert_eq!(buf, [3, 2]);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
let length = self.len();
if length > 0 && index < length - 1 {
self.swap(index, length - 1);
} else if index >= length {
return None;
}
self.pop_back()
}
/// Removes an element from anywhere in the `VecDeque` and returns it,
/// replacing it with the first element.
///
/// This does not preserve ordering, but is O(1).
///
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// assert_eq!(buf.swap_remove_front(0), None);
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.swap_remove_front(2), Some(3));
/// assert_eq!(buf, [2, 1]);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
let length = self.len();
if length > 0 && index < length && index != 0 {
self.swap(index, 0);
} else if index >= length {
return None;
}
self.pop_front()
}
/// Inserts an element at `index` within the `VecDeque`, shifting all elements with indices
/// greater than or equal to `index` towards the back.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if `index` is greater than `VecDeque`'s length
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut vec_deque = VecDeque::new();
/// vec_deque.push_back('a');
/// vec_deque.push_back('b');
/// vec_deque.push_back('c');
/// assert_eq!(vec_deque, &['a', 'b', 'c']);
///
/// vec_deque.insert(1, 'd');
/// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn insert(&mut self, index: usize, value: T) {
assert!(index <= self.len(), "index out of bounds");
self.grow_if_necessary();
// Move the least number of elements in the ring buffer and insert
// the given object
//
// At most len/2 - 1 elements will be moved. O(min(n, n-i))
//
// There are three main cases:
// Elements are contiguous
// - special case when tail is 0
// Elements are discontiguous and the insert is in the tail section
// Elements are discontiguous and the insert is in the head section
//
// For each of those there are two more cases:
// Insert is closer to tail
// Insert is closer to head
//
// Key: H - self.head
// T - self.tail
// o - Valid element
// I - Insertion element
// A - The element that should be after the insertion point
// M - Indicates element was moved
let idx = self.wrap_add(self.tail, index);
let distance_to_tail = index;
let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
(true, true, _) if index == 0 => {
// push_front
//
// T
// I H
// [A o o o o o o . . . . . . . . .]
//
// H T
// [A o o o o o o o . . . . . I]
//
self.tail = self.wrap_sub(self.tail, 1);
}
(true, true, _) => {
unsafe {
// contiguous, insert closer to tail:
//
// T I H
// [. . . o o A o o o o . . . . . .]
//
// T H
// [. . o o I A o o o o . . . . . .]
// M M
//
// contiguous, insert closer to tail and tail is 0:
//
//
// T I H
// [o o A o o o o . . . . . . . . .]
//
// H T
// [o I A o o o o o . . . . . . . o]
// M M
let new_tail = self.wrap_sub(self.tail, 1);
self.copy(new_tail, self.tail, 1);
// Already moved the tail, so we only copy `index - 1` elements.
self.copy(self.tail, self.tail + 1, index - 1);
self.tail = new_tail;
}
}
(true, false, _) => {
unsafe {
// contiguous, insert closer to head:
//
// T I H
// [. . . o o o o A o o . . . . . .]
//
// T H
// [. . . o o o o I A o o . . . . .]
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head = self.wrap_add(self.head, 1);
}
}
(false, true, true) => {
unsafe {
// discontiguous, insert closer to tail, tail section:
//
// H T I
// [o o o o o o . . . . . o o A o o]
//
// H T
// [o o o o o o . . . . o o I A o o]
// M M
self.copy(self.tail - 1, self.tail, index);
self.tail -= 1;
}
}
(false, false, true) => {
unsafe {
// discontiguous, insert closer to head, tail section:
//
// H T I
// [o o . . . . . . . o o o o o A o]
//
// H T
// [o o o . . . . . . o o o o o I A]
// M M M M
// copy elements up to new head
self.copy(1, 0, self.head);
// copy last element into empty spot at bottom of buffer
self.copy(0, self.cap() - 1, 1);
// move elements from idx to end forward not including ^ element
self.copy(idx + 1, idx, self.cap() - 1 - idx);
self.head += 1;
}
}
(false, true, false) if idx == 0 => {
unsafe {
// discontiguous, insert is closer to tail, head section,
// and is at index zero in the internal buffer:
//
// I H T
// [A o o o o o o o o o . . . o o o]
//
// H T
// [A o o o o o o o o o . . o o o I]
// M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap() - 1, 0, 1);
self.tail -= 1;
}
}
(false, true, false) => {
unsafe {
// discontiguous, insert closer to tail, head section:
//
// I H T
// [o o o A o o o o o o . . . o o o]
//
// H T
// [o o I A o o o o o o . . o o o o]
// M M M M M M
// copy elements up to new tail
self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
// copy last element into empty spot at bottom of buffer
self.copy(self.cap() - 1, 0, 1);
// move elements from idx-1 to end forward not including ^ element
self.copy(0, 1, idx - 1);
self.tail -= 1;
}
}
(false, false, false) => {
unsafe {
// discontiguous, insert closer to head, head section:
//
// I H T
// [o o o o A o o . . . . . . o o o]
//
// H T
// [o o o o I A o o . . . . . o o o]
// M M M
self.copy(idx + 1, idx, self.head - idx);
self.head += 1;
}
}
}
// tail might've been changed so we need to recalculate
let new_idx = self.wrap_add(self.tail, index);
unsafe {
self.buffer_write(new_idx, value);
}
}
/// Removes and returns the element at `index` from the `VecDeque`.
/// Whichever end is closer to the removal point will be moved to make
/// room, and all the affected elements will be moved to new positions.
/// Returns `None` if `index` is out of bounds.
///
/// Element at index 0 is the front of the queue.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(1);
/// buf.push_back(2);
/// buf.push_back(3);
/// assert_eq!(buf, [1, 2, 3]);
///
/// assert_eq!(buf.remove(1), Some(2));
/// assert_eq!(buf, [1, 3]);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn remove(&mut self, index: usize) -> Option<T> {
if self.is_empty() || self.len() <= index {
return None;
}
// There are three main cases:
// Elements are contiguous
// Elements are discontiguous and the removal is in the tail section
// Elements are discontiguous and the removal is in the head section
// - special case when elements are technically contiguous,
// but self.head = 0
//
// For each of those there are two more cases:
// Insert is closer to tail
// Insert is closer to head
//
// Key: H - self.head
// T - self.tail
// o - Valid element
// x - Element marked for removal
// R - Indicates element that is being removed
// M - Indicates element was moved
let idx = self.wrap_add(self.tail, index);
let elem = unsafe { Some(self.buffer_read(idx)) };
let distance_to_tail = index;
let distance_to_head = self.len() - index;
let contiguous = self.is_contiguous();
match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
(true, true, _) => {
unsafe {
// contiguous, remove closer to tail:
//
// T R H
// [. . . o o x o o o o . . . . . .]
//
// T H
// [. . . . o o o o o o . . . . . .]
// M M
self.copy(self.tail + 1, self.tail, index);
self.tail += 1;
}
}
(true, false, _) => {
unsafe {
// contiguous, remove closer to head:
//
// T R H
// [. . . o o o o x o o . . . . . .]
//
// T H
// [. . . o o o o o o . . . . . . .]
// M M
self.copy(idx, idx + 1, self.head - idx - 1);
self.head -= 1;
}
}
(false, true, true) => {
unsafe {
// discontiguous, remove closer to tail, tail section:
//
// H T R
// [o o o o o o . . . . . o o x o o]
//
// H T
// [o o o o o o . . . . . . o o o o]
// M M
self.copy(self.tail + 1, self.tail, index);
self.tail = self.wrap_add(self.tail, 1);
}
}
(false, false, false) => {
unsafe {
// discontiguous, remove closer to head, head section:
//
// R H T
// [o o o o x o o . . . . . . o o o]
//
// H T
// [o o o o o o . . . . . . . o o o]
// M M
self.copy(idx, idx + 1, self.head - idx - 1);
self.head -= 1;
}
}
(false, false, true) => {
unsafe {
// discontiguous, remove closer to head, tail section:
//
// H T R
// [o o o . . . . . . o o o o o x o]
//
// H T
// [o o . . . . . . . o o o o o o o]
// M M M M
//
// or quasi-discontiguous, remove next to head, tail section:
//
// H T R
// [. . . . . . . . . o o o o o x o]
//
// T H
// [. . . . . . . . . o o o o o o .]
// M
// draw in elements in the tail section
self.copy(idx, idx + 1, self.cap() - idx - 1);
// Prevents underflow.
if self.head != 0 {
// copy first element into empty spot
self.copy(self.cap() - 1, 0, 1);
// move elements in the head section backwards
self.copy(0, 1, self.head - 1);
}
self.head = self.wrap_sub(self.head, 1);
}
}
(false, true, false) => {
unsafe {
// discontiguous, remove closer to tail, head section:
//
// R H T
// [o o x o o o o o o o . . . o o o]
//
// H T
// [o o o o o o o o o o . . . . o o]
// M M M M M
// draw in elements up to idx
self.copy(1, 0, idx);
// copy last element into empty spot
self.copy(0, self.cap() - 1, 1);
// move elements from tail to end forward, excluding the last one
self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
self.tail = self.wrap_add(self.tail, 1);
}
}
}
return elem;
}
/// Splits the collection into two at the given index.
///
/// Returns a newly allocated `Self`. `self` contains elements `[0, at)`,
/// and the returned `Self` contains elements `[at, len)`.
///
/// Note that the capacity of `self` does not change.
///
/// Element at index 0 is the front of the queue.
///
/// # Panics
///
/// Panics if `at > len`
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1,2,3].into_iter().collect();
/// let buf2 = buf.split_off(1);
/// assert_eq!(buf, [1]);
/// assert_eq!(buf2, [2, 3]);
/// ```
#[inline]
#[stable(feature = "split_off", since = "1.4.0")]
pub fn split_off(&mut self, at: usize) -> Self {
let len = self.len();
assert!(at <= len, "`at` out of bounds");
let other_len = len - at;
let mut other = VecDeque::with_capacity(other_len);
unsafe {
let (first_half, second_half) = self.as_slices();
let first_len = first_half.len();
let second_len = second_half.len();
if at < first_len {
// `at` lies in the first half.
let amount_in_first = first_len - at;
ptr::copy_nonoverlapping(first_half.as_ptr().offset(at as isize),
other.ptr(),
amount_in_first);
// just take all of the second half.
ptr::copy_nonoverlapping(second_half.as_ptr(),
other.ptr().offset(amount_in_first as isize),
second_len);
} else {
// `at` lies in the second half, need to factor in the elements we skipped
// in the first half.
let offset = at - first_len;
let amount_in_second = second_len - offset;
ptr::copy_nonoverlapping(second_half.as_ptr().offset(offset as isize),
other.ptr(),
amount_in_second);
}
}
// Cleanup where the ends of the buffers are
self.head = self.wrap_sub(self.head, other_len);
other.head = other.wrap_index(other_len);
other
}
/// Moves all the elements of `other` into `Self`, leaving `other` empty.
///
/// # Panics
///
/// Panics if the new number of elements in self overflows a `usize`.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf: VecDeque<_> = vec![1, 2].into_iter().collect();
/// let mut buf2: VecDeque<_> = vec![3, 4].into_iter().collect();
/// buf.append(&mut buf2);
/// assert_eq!(buf, [1, 2, 3, 4]);
/// assert_eq!(buf2, []);
/// ```
#[inline]
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
// naive impl
self.extend(other.drain(..));
}
/// Retains only the elements specified by the predicate.
///
/// In other words, remove all elements `e` such that `f(&e)` returns false.
/// This method operates in place and preserves the order of the retained
/// elements.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.extend(1..5);
/// buf.retain(|&x| x%2 == 0);
/// assert_eq!(buf, [2, 4]);
/// ```
#[stable(feature = "vec_deque_retain", since = "1.4.0")]
pub fn retain<F>(&mut self, mut f: F)
where F: FnMut(&T) -> bool
{
let len = self.len();
let mut del = 0;
for i in 0..len {
if !f(&self[i]) {
del += 1;
} else if del > 0 {
self.swap(i - del, i);
}
}
if del > 0 {
self.truncate(len - del);
}
}
// This may panic or abort
#[inline]
fn grow_if_necessary(&mut self) {
if self.is_full() {
let old_cap = self.cap();
self.buf.double();
unsafe {
self.handle_cap_increase(old_cap);
}
debug_assert!(!self.is_full());
}
}
/// Returns a place for insertion at the back of the `VecDeque`.
///
/// Using this method with placement syntax is equivalent to [`push_back`](#method.push_back),
/// but may be more efficient.
///
/// # Examples
///
/// ```
/// #![feature(collection_placement)]
/// #![feature(placement_in_syntax)]
///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.place_back() <- 3;
/// buf.place_back() <- 4;
/// assert_eq!(&buf, &[3, 4]);
/// ```
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
pub fn place_back(&mut self) -> PlaceBack<T> {
PlaceBack { vec_deque: self }
}
/// Returns a place for insertion at the front of the `VecDeque`.
///
/// Using this method with placement syntax is equivalent to [`push_front`](#method.push_front),
/// but may be more efficient.
///
/// # Examples
///
/// ```
/// #![feature(collection_placement)]
/// #![feature(placement_in_syntax)]
///
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.place_front() <- 3;
/// buf.place_front() <- 4;
/// assert_eq!(&buf, &[4, 3]);
/// ```
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
pub fn place_front(&mut self) -> PlaceFront<T> {
PlaceFront { vec_deque: self }
}
}
impl<T: Clone> VecDeque<T> {
/// Modifies the `VecDeque` in-place so that `len()` is equal to new_len,
/// either by removing excess elements or by appending clones of `value` to the back.
///
/// # Examples
///
/// ```
/// use std::collections::VecDeque;
///
/// let mut buf = VecDeque::new();
/// buf.push_back(5);
/// buf.push_back(10);
/// buf.push_back(15);
/// assert_eq!(buf, [5, 10, 15]);
///
/// buf.resize(2, 0);
/// assert_eq!(buf, [5, 10]);
///
/// buf.resize(5, 20);
/// assert_eq!(buf, [5, 10, 20, 20, 20]);
/// ```
#[stable(feature = "deque_extras", since = "1.16.0")]
pub fn resize(&mut self, new_len: usize, value: T) {
let len = self.len();
if new_len > len {
self.extend(repeat(value).take(new_len - len))
} else {
self.truncate(new_len);
}
}
}
/// Returns the index in the underlying buffer for a given logical element index.
#[inline]
fn wrap_index(index: usize, size: usize) -> usize {
// size is always a power of 2
debug_assert!(size.is_power_of_two());
index & (size - 1)
}
/// Returns the two slices that cover the `VecDeque`'s valid range
trait RingSlices: Sized {
fn slice(self, from: usize, to: usize) -> Self;
fn split_at(self, i: usize) -> (Self, Self);
fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
let contiguous = tail <= head;
if contiguous {
let (empty, buf) = buf.split_at(0);
(buf.slice(tail, head), empty)
} else {
let (mid, right) = buf.split_at(tail);
let (left, _) = mid.split_at(head);
(right, left)
}
}
}
impl<'a, T> RingSlices for &'a [T] {
fn slice(self, from: usize, to: usize) -> Self {
&self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at(i)
}
}
impl<'a, T> RingSlices for &'a mut [T] {
fn slice(self, from: usize, to: usize) -> Self {
&mut self[from..to]
}
fn split_at(self, i: usize) -> (Self, Self) {
(*self).split_at_mut(i)
}
}
/// Calculate the number of elements left to be read in the buffer
#[inline]
fn count(tail: usize, head: usize, size: usize) -> usize {
// size is always a power of 2
(head.wrapping_sub(tail)) & (size - 1)
}
/// An iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`iter`]: struct.VecDeque.html#method.iter
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Iter<'a, T: 'a> {
ring: &'a [T],
tail: usize,
head: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Iter<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Iter")
.field(&self.ring)
.field(&self.tail)
.field(&self.head)
.finish()
}
}
// FIXME(#19839) Remove in favor of `#[derive(Clone)]`
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Clone for Iter<'a, T> {
fn clone(&self) -> Iter<'a, T> {
Iter {
ring: self.ring,
tail: self.tail,
head: self.head,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for Iter<'a, T> {
type Item = &'a T;
#[inline]
fn next(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(tail)) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = front.iter().fold(accum, &mut f);
back.iter().fold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe { Some(self.ring.get_unchecked(self.head)) }
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = back.iter().rfold(accum, &mut f);
front.iter().rfold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for Iter<'a, T> {
fn is_empty(&self) -> bool {
self.head == self.tail
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for Iter<'a, T> {}
/// A mutable iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`iter_mut`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`iter_mut`]: struct.VecDeque.html#method.iter_mut
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IterMut<'a, T: 'a> {
ring: &'a mut [T],
tail: usize,
head: usize,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for IterMut<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IterMut")
.field(&self.ring)
.field(&self.tail)
.field(&self.head)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
#[inline]
fn next(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
let tail = self.tail;
self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(tail);
Some(&mut *(elem as *mut _))
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = count(self.tail, self.head, self.ring.len());
(len, Some(len))
}
fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = front.iter_mut().fold(accum, &mut f);
back.iter_mut().fold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<&'a mut T> {
if self.tail == self.head {
return None;
}
self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
unsafe {
let elem = self.ring.get_unchecked_mut(self.head);
Some(&mut *(elem as *mut _))
}
}
fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
where F: FnMut(Acc, Self::Item) -> Acc
{
let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
accum = back.iter_mut().rfold(accum, &mut f);
front.iter_mut().rfold(accum, &mut f)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> ExactSizeIterator for IterMut<'a, T> {
fn is_empty(&self) -> bool {
self.head == self.tail
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T> FusedIterator for IterMut<'a, T> {}
/// An owning iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`into_iter`] method on [`VecDeque`][`VecDeque`]
/// (provided by the `IntoIterator` trait). See its documentation for more.
///
/// [`into_iter`]: struct.VecDeque.html#method.into_iter
/// [`VecDeque`]: struct.VecDeque.html
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
inner: VecDeque<T>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("IntoIter")
.field(&self.inner)
.finish()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> Iterator for IntoIter<T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.inner.pop_front()
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.inner.len();
(len, Some(len))
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> DoubleEndedIterator for IntoIter<T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.inner.pop_back()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ExactSizeIterator for IntoIter<T> {
fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
#[unstable(feature = "fused", issue = "35602")]
impl<T> FusedIterator for IntoIter<T> {}
/// A draining iterator over the elements of a `VecDeque`.
///
/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
/// documentation for more.
///
/// [`drain`]: struct.VecDeque.html#method.drain
/// [`VecDeque`]: struct.VecDeque.html
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<'a, T: 'a> {
after_tail: usize,
after_head: usize,
iter: Iter<'a, T>,
deque: Shared<VecDeque<T>>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<'a, T: 'a + fmt::Debug> fmt::Debug for Drain<'a, T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
.field(&self.iter)
.finish()
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Sync> Sync for Drain<'a, T> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<'a, T: Send> Send for Drain<'a, T> {}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Drop for Drain<'a, T> {
fn drop(&mut self) {
for _ in self.by_ref() {}
let source_deque = unsafe { self.deque.as_mut() };
// T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
//
// T t h H
// [. . . o o x x o o . . .]
//
let orig_tail = source_deque.tail;
let drain_tail = source_deque.head;
let drain_head = self.after_tail;
let orig_head = self.after_head;
let tail_len = count(orig_tail, drain_tail, source_deque.cap());
let head_len = count(drain_head, orig_head, source_deque.cap());
// Restore the original head value
source_deque.head = orig_head;
match (tail_len, head_len) {
(0, 0) => {
source_deque.head = 0;
source_deque.tail = 0;
}
(0, _) => {
source_deque.tail = drain_head;
}
(_, 0) => {
source_deque.head = drain_tail;
}
_ => unsafe {
if tail_len <= head_len {
source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
} else {
source_deque.head = source_deque.wrap_add(drain_tail, head_len);
source_deque.wrap_copy(drain_tail, drain_head, head_len);
}
},
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> Iterator for Drain<'a, T> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.iter.next().map(|elt| unsafe { ptr::read(elt) })
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> DoubleEndedIterator for Drain<'a, T> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<'a, T: 'a> ExactSizeIterator for Drain<'a, T> {}
#[unstable(feature = "fused", issue = "35602")]
impl<'a, T: 'a> FusedIterator for Drain<'a, T> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialEq> PartialEq for VecDeque<A> {
fn eq(&self, other: &VecDeque<A>) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other.as_slices();
if sa.len() == oa.len() {
sa == oa && sb == ob
} else if sa.len() < oa.len() {
// Always divisible in three sections, for example:
// self: [a b c|d e f]
// other: [0 1 2 3|4 5]
// front = 3, mid = 1,
// [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5]
let front = sa.len();
let mid = oa.len() - front;
let (oa_front, oa_mid) = oa.split_at(front);
let (sb_mid, sb_back) = sb.split_at(mid);
debug_assert_eq!(sa.len(), oa_front.len());
debug_assert_eq!(sb_mid.len(), oa_mid.len());
debug_assert_eq!(sb_back.len(), ob.len());
sa == oa_front && sb_mid == oa_mid && sb_back == ob
} else {
let front = oa.len();
let mid = sa.len() - front;
let (sa_front, sa_mid) = sa.split_at(front);
let (ob_mid, ob_back) = ob.split_at(mid);
debug_assert_eq!(sa_front.len(), oa.len());
debug_assert_eq!(sa_mid.len(), ob_mid.len());
debug_assert_eq!(sb.len(), ob_back.len());
sa_front == oa && sa_mid == ob_mid && sb == ob_back
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Eq> Eq for VecDeque<A> {}
macro_rules! __impl_slice_eq1 {
($Lhs: ty, $Rhs: ty) => {
__impl_slice_eq1! { $Lhs, $Rhs, Sized }
};
($Lhs: ty, $Rhs: ty, $Bound: ident) => {
#[stable(feature = "vec-deque-partial-eq-slice", since = "1.17.0")]
impl<'a, 'b, A: $Bound, B> PartialEq<$Rhs> for $Lhs where A: PartialEq<B> {
fn eq(&self, other: &$Rhs) -> bool {
if self.len() != other.len() {
return false;
}
let (sa, sb) = self.as_slices();
let (oa, ob) = other[..].split_at(sa.len());
sa == oa && sb == ob
}
}
}
}
__impl_slice_eq1! { VecDeque<A>, Vec<B> }
__impl_slice_eq1! { VecDeque<A>, &'b [B] }
__impl_slice_eq1! { VecDeque<A>, &'b mut [B] }
macro_rules! array_impls {
($($N: expr)+) => {
$(
__impl_slice_eq1! { VecDeque<A>, [B; $N] }
__impl_slice_eq1! { VecDeque<A>, &'b [B; $N] }
__impl_slice_eq1! { VecDeque<A>, &'b mut [B; $N] }
)+
}
}
array_impls! {
0 1 2 3 4 5 6 7 8 9
10 11 12 13 14 15 16 17 18 19
20 21 22 23 24 25 26 27 28 29
30 31 32
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: PartialOrd> PartialOrd for VecDeque<A> {
fn partial_cmp(&self, other: &VecDeque<A>) -> Option<Ordering> {
self.iter().partial_cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Ord> Ord for VecDeque<A> {
#[inline]
fn cmp(&self, other: &VecDeque<A>) -> Ordering {
self.iter().cmp(other.iter())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A: Hash> Hash for VecDeque<A> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.len().hash(state);
let (a, b) = self.as_slices();
Hash::hash_slice(a, state);
Hash::hash_slice(b, state);
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Index<usize> for VecDeque<A> {
type Output = A;
#[inline]
fn index(&self, index: usize) -> &A {
self.get(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> IndexMut<usize> for VecDeque<A> {
#[inline]
fn index_mut(&mut self, index: usize) -> &mut A {
self.get_mut(index).expect("Out of bounds access")
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> FromIterator<A> for VecDeque<A> {
fn from_iter<T: IntoIterator<Item = A>>(iter: T) -> VecDeque<A> {
let iterator = iter.into_iter();
let (lower, _) = iterator.size_hint();
let mut deq = VecDeque::with_capacity(lower);
deq.extend(iterator);
deq
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> IntoIterator for VecDeque<T> {
type Item = T;
type IntoIter = IntoIter<T>;
/// Consumes the list into a front-to-back iterator yielding elements by
/// value.
fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: self }
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a VecDeque<T> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> IntoIterator for &'a mut VecDeque<T> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T>;
fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<A> Extend<A> for VecDeque<A> {
fn extend<T: IntoIterator<Item = A>>(&mut self, iter: T) {
for elt in iter {
self.push_back(elt);
}
}
}
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Copy> Extend<&'a T> for VecDeque<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: fmt::Debug> fmt::Debug for VecDeque<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_list().entries(self).finish()
}
}
#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
impl<T> From<Vec<T>> for VecDeque<T> {
fn from(mut other: Vec<T>) -> Self {
unsafe {
let other_buf = other.as_mut_ptr();
let mut buf = RawVec::from_raw_parts(other_buf, other.capacity());
let len = other.len();
mem::forget(other);
// We need to extend the buf if it's not a power of two, too small
// or doesn't have at least one free space
if !buf.cap().is_power_of_two() || (buf.cap() < (MINIMUM_CAPACITY + 1)) ||
(buf.cap() == len) {
let cap = cmp::max(buf.cap() + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
buf.reserve_exact(len, cap - len);
}
VecDeque {
tail: 0,
head: len,
buf,
}
}
}
}
#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
impl<T> From<VecDeque<T>> for Vec<T> {
fn from(other: VecDeque<T>) -> Self {
unsafe {
let buf = other.buf.ptr();
let len = other.len();
let tail = other.tail;
let head = other.head;
let cap = other.cap();
// Need to move the ring to the front of the buffer, as vec will expect this.
if other.is_contiguous() {
ptr::copy(buf.offset(tail as isize), buf, len);
} else {
if (tail - head) >= cmp::min((cap - tail), head) {
// There is enough free space in the centre for the shortest block so we can
// do this in at most three copy moves.
if (cap - tail) > head {
// right hand block is the long one; move that enough for the left
ptr::copy(buf.offset(tail as isize),
buf.offset((tail - head) as isize),
cap - tail);
// copy left in the end
ptr::copy(buf, buf.offset((cap - head) as isize), head);
// shift the new thing to the start
ptr::copy(buf.offset((tail - head) as isize), buf, len);
} else {
// left hand block is the long one, we can do it in two!
ptr::copy(buf, buf.offset((cap - tail) as isize), head);
ptr::copy(buf.offset(tail as isize), buf, cap - tail);
}
} else {
// Need to use N swaps to move the ring
// We can use the space at the end of the ring as a temp store
let mut left_edge: usize = 0;
let mut right_edge: usize = tail;
// The general problem looks like this
// GHIJKLM...ABCDEF - before any swaps
// ABCDEFM...GHIJKL - after 1 pass of swaps
// ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
// - then restart the algorithm with a new (smaller) store
// Sometimes the temp store is reached when the right edge is at the end
// of the buffer - this means we've hit the right order with fewer swaps!
// E.g
// EF..ABCD
// ABCDEF.. - after four only swaps we've finished
while left_edge < len && right_edge != cap {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
let src: isize = (right_edge + right_offset) as isize;
ptr::swap(buf.offset(i as isize), buf.offset(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
right_edge += right_offset + 1;
}
}
}
let out = Vec::from_raw_parts(buf, len, cap);
mem::forget(other);
out
}
}
}
/// A place for insertion at the back of a `VecDeque`.
///
/// See [`VecDeque::place_back`](struct.VecDeque.html#method.place_back) for details.
#[must_use = "places do nothing unless written to with `<-` syntax"]
#[unstable(feature = "collection_placement",
reason = "struct name and placement protocol are subject to change",
issue = "30172")]
#[derive(Debug)]
pub struct PlaceBack<'a, T: 'a> {
vec_deque: &'a mut VecDeque<T>,
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Placer<T> for PlaceBack<'a, T> {
type Place = PlaceBack<'a, T>;
fn make_place(self) -> Self {
self.vec_deque.grow_if_necessary();
self
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Place<T> for PlaceBack<'a, T> {
fn pointer(&mut self) -> *mut T {
unsafe { self.vec_deque.ptr().offset(self.vec_deque.head as isize) }
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> InPlace<T> for PlaceBack<'a, T> {
type Owner = &'a mut T;
unsafe fn finalize(self) -> &'a mut T {
let head = self.vec_deque.head;
self.vec_deque.head = self.vec_deque.wrap_add(head, 1);
&mut *(self.vec_deque.ptr().offset(head as isize))
}
}
/// A place for insertion at the front of a `VecDeque`.
///
/// See [`VecDeque::place_front`](struct.VecDeque.html#method.place_front) for details.
#[must_use = "places do nothing unless written to with `<-` syntax"]
#[unstable(feature = "collection_placement",
reason = "struct name and placement protocol are subject to change",
issue = "30172")]
#[derive(Debug)]
pub struct PlaceFront<'a, T: 'a> {
vec_deque: &'a mut VecDeque<T>,
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Placer<T> for PlaceFront<'a, T> {
type Place = PlaceFront<'a, T>;
fn make_place(self) -> Self {
self.vec_deque.grow_if_necessary();
self
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> Place<T> for PlaceFront<'a, T> {
fn pointer(&mut self) -> *mut T {
let tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1);
unsafe { self.vec_deque.ptr().offset(tail as isize) }
}
}
#[unstable(feature = "collection_placement",
reason = "placement protocol is subject to change",
issue = "30172")]
impl<'a, T> InPlace<T> for PlaceFront<'a, T> {
type Owner = &'a mut T;
unsafe fn finalize(self) -> &'a mut T {
self.vec_deque.tail = self.vec_deque.wrap_sub(self.vec_deque.tail, 1);
&mut *(self.vec_deque.ptr().offset(self.vec_deque.tail as isize))
}
}
#[cfg(test)]
mod tests {
use test;
use super::VecDeque;
#[bench]
fn bench_push_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_back(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
fn bench_push_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::with_capacity(101);
b.iter(|| {
for i in 0..100 {
deq.push_front(i);
}
deq.head = 0;
deq.tail = 0;
})
}
#[bench]
fn bench_pop_back_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_back());
}
})
}
#[bench]
fn bench_pop_front_100(b: &mut test::Bencher) {
let mut deq = VecDeque::<i32>::with_capacity(101);
b.iter(|| {
deq.head = 100;
deq.tail = 0;
while !deq.is_empty() {
test::black_box(deq.pop_front());
}
})
}
#[test]
fn test_swap_front_back_remove() {
fn test(back: bool) {
// This test checks that every single combination of tail position and length is tested.
// Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
let usable_cap = tester.capacity();
let final_len = usable_cap / 2;
for len in 0..final_len {
let expected: VecDeque<_> = if back {
(0..len).collect()
} else {
(0..len).rev().collect()
};
for tail_pos in 0..usable_cap {
tester.tail = tail_pos;
tester.head = tail_pos;
if back {
for i in 0..len * 2 {
tester.push_front(i);
}
for i in 0..len {
assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
}
} else {
for i in 0..len * 2 {
tester.push_back(i);
}
for i in 0..len {
let idx = tester.len() - 1 - i;
assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
}
}
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
test(true);
test(false);
}
#[test]
fn test_insert() {
// This test checks that every single combination of tail position, length, and
// insertion position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* insertion
for len in 1..cap {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_insert in 0..len {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i != to_insert {
tester.push_back(i);
}
}
tester.insert(to_insert, to_insert);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_remove() {
// This test checks that every single combination of tail position, length, and
// removal position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *after* removal
for len in 0..cap - 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
for to_remove in 0..len + 1 {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
if i == to_remove {
tester.push_back(1234);
}
tester.push_back(i);
}
if to_remove == len {
tester.push_back(1234);
}
tester.remove(to_remove);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
}
#[test]
fn test_drain() {
let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
let cap = tester.capacity();
for len in 0..cap + 1 {
for tail in 0..cap + 1 {
for drain_start in 0..len + 1 {
for drain_end in drain_start..len + 1 {
tester.tail = tail;
tester.head = tail;
for i in 0..len {
tester.push_back(i);
}
// Check that we drain the correct values
let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
assert_eq!(drained, drained_expected);
// We shouldn't have changed the capacity or made the
// head or tail out of bounds
assert_eq!(tester.capacity(), cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
// We should see the correct values in the VecDeque
let expected: VecDeque<_> = (0..drain_start)
.chain(drain_end..len)
.collect();
assert_eq!(expected, tester);
}
}
}
}
}
#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
// is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
tester.reserve(63);
let max_cap = tester.capacity();
for len in 0..cap + 1 {
// 0, 1, 2, .., len - 1
let expected = (0..).take(len).collect::<VecDeque<_>>();
for tail_pos in 0..max_cap + 1 {
tester.tail = tail_pos;
tester.head = tail_pos;
tester.reserve(63);
for i in 0..len {
tester.push_back(i);
}
tester.shrink_to_fit();
assert!(tester.capacity() <= cap);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert_eq!(tester, expected);
}
}
}
#[test]
fn test_split_off() {
// This test checks that every single combination of tail position, length, and
// split position is tested. Capacity 15 should be large enough to cover every case.
let mut tester = VecDeque::with_capacity(15);
// can't guarantee we got 15, so have to get what we got.
// 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
// this test isn't covering what it wants to
let cap = tester.capacity();
// len is the length *before* splitting
for len in 0..cap {
// index to split at
for at in 0..len + 1 {
// 0, 1, 2, .., at - 1 (may be empty)
let expected_self = (0..).take(at).collect::<VecDeque<_>>();
// at, at + 1, .., len - 1 (may be empty)
let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
for tail_pos in 0..cap {
tester.tail = tail_pos;
tester.head = tail_pos;
for i in 0..len {
tester.push_back(i);
}
let result = tester.split_off(at);
assert!(tester.tail < tester.cap());
assert!(tester.head < tester.cap());
assert!(result.tail < result.cap());
assert!(result.head < result.cap());
assert_eq!(tester, expected_self);
assert_eq!(result, expected_other);
}
}
}
}
#[test]
fn test_from_vec() {
use super::super::vec::Vec;
for cap in 0..35 {
for len in 0..cap + 1 {
let mut vec = Vec::with_capacity(cap);
vec.extend(0..len);
let vd = VecDeque::from(vec.clone());
assert!(vd.cap().is_power_of_two());
assert_eq!(vd.len(), vec.len());
assert!(vd.into_iter().eq(vec));
}
}
}
#[test]
fn test_vec_from_vecdeque() {
use super::super::vec::Vec;
fn create_vec_and_test_convert(cap: usize, offset: usize, len: usize) {
let mut vd = VecDeque::with_capacity(cap);
for _ in 0..offset {
vd.push_back(0);
vd.pop_front();
}
vd.extend(0..len);
let vec: Vec<_> = Vec::from(vd.clone());
assert_eq!(vec.len(), vd.len());
assert!(vec.into_iter().eq(vd));
}
for cap_pwr in 0..7 {
// Make capacity as a (2^x)-1, so that the ring size is 2^x
let cap = (2i32.pow(cap_pwr) - 1) as usize;
// In these cases there is enough free space to solve it with copies
for len in 0..((cap + 1) / 2) {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
// Now there's not (necessarily) space to straighten the ring with simple copies,
// the ring will use swapping when:
// (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
// right block size > free space && left block size > free space
for len in ((cap + 1) / 2)..cap {
// Test contiguous cases
for offset in 0..(cap - len) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at end of buffer is bigger than block at start
for offset in (cap - len)..(cap - (len / 2)) {
create_vec_and_test_convert(cap, offset, len)
}
// Test cases where block at start of buffer is bigger than block at end
for offset in (cap - (len / 2))..cap {
create_vec_and_test_convert(cap, offset, len)
}
}
}
}
}
|
45_0
|
crossvul
|
rs
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
php
|
<?hh
function nop($unused) { /* value sink */ }
function testApc($before) {
apc_delete("indep");
if (!apc_add("indep", $before)) {
echo "add failure. weird.\n";
exit(1);
}
# fetch.
$after = __hhvm_intrinsics\apc_fetch_no_check("indep");
var_dump($after);
if (!$after) {
echo "fetch failure. surprising.\n";
exit(2);
}
# cgetm.
foreach ($before as $k => $v) {
var_dump($after[$k]);
if ($after[$k] != $v) {
echo "fetched dubious values " . $after[$k] . " != " . "$v\n";
var_dump($after[$k]);
exit(3);
}
if (!isset($after[$k])) {
echo "expected key not set. devestating.\n";
var_dump($after[$k]);
exit(4);
}
}
# iterate over the APC array, too
foreach ($after as $k => $v) {
var_dump($after[$k]);
if ($after[$k] != $v) {
echo "incoherent APC iteration. lamentable.\n";
var_dump($v);
exit(5);
}
}
# setM
$after['newKey'] = array();
var_dump($after);
# unsetm
foreach($after as $k => $v) {
unset($after[$k]);
}
var_dump($after);
}
function testKeyTypes() {
apc_add("keysarray", array(2 => 'two', '3' => 'three'));
$arr = __hhvm_intrinsics\apc_fetch_no_check("keysarray");
foreach (array(2, 3, '2', '3') as $k) {
try { var_dump($arr[$k]); } catch (Exception $e) { echo $e->getMessage()."\n"; }
}
}
<<__EntryPoint>> function main(): void {
testApc(array(7, 4, 1776));
testApc(array("sv0", "sv1"));
testApc(array("sk0" => "sv0", "sk1" => "sv1"));
// Also check that foreign arrays work for indirect calls
apc_store('foo', array("a"));
$a = __hhvm_intrinsics\apc_fetch_no_check('foo');
$b = call_user_func_array(fun("strtoupper"), $a);
var_dump($b);
testKeyTypes();
}
|
<?hh
function nop($unused) { /* value sink */ }
function testApc($before) {
apc_delete("indep");
if (!apc_add("indep", $before)) {
echo "add failure. weird.\n";
exit(1);
}
# fetch.
$after = __hhvm_intrinsics\apc_fetch_no_check("indep");
var_dump($after);
if (!$after) {
echo "fetch failure. surprising.\n";
exit(2);
}
# cgetm.
foreach ($before as $k => $v) {
var_dump($after[$k]);
if ($after[$k] != $v) {
echo "fetched dubious values " . $after[$k] . " != " . "$v\n";
var_dump($after[$k]);
exit(3);
}
if (!isset($after[$k])) {
echo "expected key not set. devestating.\n";
var_dump($after[$k]);
exit(4);
}
}
# iterate over the APC array, too
foreach ($after as $k => $v) {
var_dump($after[$k]);
if ($after[$k] != $v) {
echo "incoherent APC iteration. lamentable.\n";
var_dump($v);
exit(5);
}
}
# setM
$after['newKey'] = array();
var_dump($after);
# unsetm
foreach($after as $k => $v) {
unset($after[$k]);
}
var_dump($after);
}
function testKeyTypes() {
apc_add("keysarray", array(2 => 'two', '3' => 'three'));
$arr = __hhvm_intrinsics\apc_fetch_no_check("keysarray");
foreach (array(2, 3, '2', '3') as $k) {
try { var_dump($arr[$k]); } catch (Exception $e) { echo $e->getMessage()."\n"; }
}
}
function testInvalidKeys() {
// Reject keys with null bytes
apc_add("bar\x00baz", 10);
apc_store("test\x00xyz", "hello");
apc_store(array("validkey" => "validvalue", "invalid\x00key" => "value"));
foreach (array('bar', 'test', 'validkey', 'invalid') as $k) {
var_dump(__hhvm_intrinsics\apc_fetch_no_check($k));
}
}
<<__EntryPoint>> function main(): void {
testApc(array(7, 4, 1776));
testApc(array("sv0", "sv1"));
testApc(array("sk0" => "sv0", "sk1" => "sv1"));
// Also check that foreign arrays work for indirect calls
apc_store('foo', array("a"));
$a = __hhvm_intrinsics\apc_fetch_no_check('foo');
$b = call_user_func_array(fun("strtoupper"), $a);
var_dump($b);
testKeyTypes();
testInvalidKeys();
}
|
851_1
|
crossvul
|
php
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* Copyright (C) 2000-2012 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FONTFORGE_VIEWS_H
#define FONTFORGE_VIEWS_H
#include <fontforge-config.h>
#include "ttfinstrs.h"
#include "baseviews.h"
#include "ffglib.h"
#include "dlist.h"
#include "ggadget.h"
#include "search.h"
struct gfi_data;
struct contextchaindlg;
struct statemachinedlg;
extern struct cvshows {
int showfore, showback, showgrids, showhhints, showvhints, showdhints;
int showpoints, showfilled;
int showrulers;
int showrounds; /* 0=>no, 1=>auto, 2=>always */
int showmdx, showmdy; /* minimum distances x,y */
int showhmetrics, showvmetrics; /* show advance width, baseline, etc. */
int markextrema;
int markpoi; /* Points of inflection */
int showblues, showfamilyblues;
int showanchor;
int showcpinfo;
int showtabs; /* with the names of former glyphs */
int showsidebearings;
int showrefnames;
int snapoutlines;
int showalmosthvlines;
int showalmosthvcurves;
int hvoffset;
int checkselfintersects; /* Not really something shown, but convenient to keep it here */
int showdebugchanges; /* Changes the way changing rasters are displayed in tt debug mode */
int alwaysshowcontrolpoints; //< Always show the BCP even when their splinepoint is not selected
} CVShows;
extern struct bvshows {
int showfore, showoutline, showgrid;
int lastpixelsize;
} BVShows;
enum debug_wins { dw_registers=0x1, dw_stack=0x2, dw_storage=0x4, dw_points=0x8,
dw_cvt=0x10, dw_raster=0x20, dw_gloss=0x40 };
struct instrinfo {
int isel_pos;
int16 lheight,lpos;
char *scroll, *offset;
GWindow v;
GGadget *vsb;
int16 sbw;
int16 vheight, vwidth;
int16 lstopped;
int16 as, fh;
struct instrdata *instrdata;
GFont *gfont;
unsigned int showaddr: 1;
unsigned int showhex: 1;
unsigned int mousedown: 1;
void *userdata;
void (*selection_callback)(struct instrinfo *,int ip);
int (*bpcheck)(struct instrinfo *,int ip);
int (*handle_char)(struct instrinfo *,GEvent *e);
};
struct reflist {
RefChar *ref;
struct reflist *parent;
};
typedef struct debugview {
struct debugger_context *dc; /* Local to freetype.c */
GWindow dv, v;
/* Windows for twilight points, cvt, registers, stack, storage, stack gloss */
GWindow regs, stack, storage, points, cvt, raster, gloss; /* order matters */
GWindow points_v;
GGadget *cvtsb;
GGadget *pts_vsb;
GGadget *glosssb;
GGadget *storagesb;
GGadget *regsb;
GGadget *stacksb;
struct instrdata id;
struct instrinfo ii;
int dwidth, toph;
struct charview *cv;
double scalex, scaley;
int pts_head, cvt_offtop, gloss_offtop, storage_offtop, stack_offtop, reg_offtop;
int points_offtop;
int codeSize;
uint8 initialbytes[4];
struct reflist *active_refs;
int last_npoints;
int layer;
} DebugView;
/* The number of tabs allowed in the outline glyph view of former glyphs */
#define FORMER_MAX 10
enum dv_coderange { cr_none=0, cr_fpgm, cr_prep, cr_glyph }; /* cleverly chosen to match ttobjs.h */
struct freehand {
struct tracedata *head, *last; /* for the freehand tool */
SplinePointList *current_trace;
int ignore_wobble; /* Ignore wiggles smaller than this */
int skip_cnt;
};
enum expandedge { ee_none, ee_nw, ee_up, ee_ne, ee_right, ee_se, ee_down,
ee_sw, ee_left, ee_max };
enum { charviewtab_charselectedsz = 1024 };
typedef struct charviewtab
{
char charselected[ charviewtab_charselectedsz + 1 ];
char tablabeltxt[ charviewtab_charselectedsz + 1 ];
float xoff, yoff;
real scale;
} CharViewTab;
enum { charview_cvtabssz = 100 };
/* approximately BACK_LAYER_MAX / 32 */
#define BACK_LAYERS_VIEW_MAX 8
typedef struct charview {
CharViewBase b;
uint32 showback[BACK_LAYERS_VIEW_MAX];
unsigned int showfore:1;
unsigned int showgrids:1;
unsigned int showhhints:1;
unsigned int showvhints:1;
unsigned int showdhints:1;
unsigned int showpoints:1;
unsigned int alwaysshowcontrolpoints:1;
unsigned int showfilled:1;
unsigned int showrulers:1;
unsigned int showrounds:2; /* 0=>no, 1=>auto, 2=>always */
unsigned int showmdx:1;
unsigned int showmdy:1;
unsigned int showhmetrics:1;
unsigned int showvmetrics:1;
unsigned int showblues:1; /* 16 */
unsigned int showfamilyblues:1;
unsigned int showanchor:1;
unsigned int showpointnumbers:2;
unsigned int markextrema:1;
unsigned int markpoi:1;
unsigned int needsrasterize:1; /* Rasterization (of fill or fontview) needed on mouse up */
unsigned int recentchange:1; /* a change happened in the grids or background. don't need to rasterize */
unsigned int info_within: 1; /* cursor is within main window */
unsigned int back_img_out_of_date: 1; /* Force redraw of back image pixmap */
unsigned int cntrldown:1;
unsigned int joinvalid:1;
unsigned int widthsel:1;
unsigned int vwidthsel:1;
unsigned int lbearingsel:1;
unsigned int icsel:1;
unsigned int tah_sel:1;
unsigned int inactive:1; /* When in a search view (32) */
unsigned int show_ft_results: 1;
unsigned int show_ft_results_live_update : 1;
unsigned int coderange: 2; /* For the debugger */
unsigned int autonomous_ruler_w: 1;
unsigned int showcpinfo: 1;
unsigned int showtabs: 1;
unsigned int showsidebearings: 1;
unsigned int showing_spiro_pt_menu: 1;
unsigned int ruler_pressed: 1;
unsigned int ruler_pressedv: 1;
unsigned int showrefnames: 1;
unsigned int snapoutlines: 1;
unsigned int showalmosthvlines: 1;
unsigned int showalmosthvcurves: 1;
unsigned int checkselfintersects: 1;
unsigned int showdebugchanges: 1;
unsigned int inPreviewMode: 1;
unsigned int inDraggingComparisonOutline: 1;
unsigned int activeModifierControl: 1; //< Is control being held right now?
unsigned int activeModifierAlt: 1; //< Is alt being held right now?
unsigned int changedActiveGlyph: 1; //< Set in CVSwitchActiveSC() cleared in cvmouseup()
int hvoffset; /* for showalmosthvlines */
int layers_off_top;
GWindow gw, v;
GWindow hruler, vruler; /* Ruler pixmaps */
GGadget *vsb, *hsb, *mb, *tabs;
GFont *small, *normal;
GWindow icon;
GWindow ruler_w;
GWindow ruler_linger_w;
unichar_t ruler_linger_lines[40][80];
int ruler_linger_num_lines;
int num_ruler_intersections;
int allocated_ruler_intersections;
BasePoint *ruler_intersections;
int start_intersection_snapped;
int end_intersection_snapped;
GFont *rfont;
GTimer *pressed;
GWindow backimgs;
GIC *gic;
GIC *gwgic;
int width, height;
int mbh; //< menu bar height
int charselectorh; //< char selection input box height
int infoh; //< info bar height
int rulerh; //< ruler height
int16 sas, sfh, sdh, nas, nfh;
BasePoint info;
SplinePoint *info_sp;
Spline *info_spline;
real info_t;
GPoint e; /* mouse location */
GPoint olde;
BasePoint last_c;
BDFChar *filled;
GImage gi; /* used for fill bitmap only */
int enc;
EncMap *map_of_enc; /* Only use for comparison against fontview's map to see if our enc be valid */
/* Will not be updated when fontview is reencoded */
PressedOn p;
SplinePoint *lastselpt;
spiro_cp *lastselcp;
/*GWindow tools, layers;*/
int8 b1_tool, cb1_tool, b2_tool, cb2_tool; /* Button 3 does a popup */
int8 b1_tool_old; /* Used by mingw port */
int8 s1_tool, s2_tool, er_tool; /* Bindings for wacom stylus and eraser */
int8 showing_tool, pressed_tool, pressed_display, had_control, active_tool;
int8 spacebar_hold; /* spacebar is held down */
SplinePointList *active_spl;
SplinePoint *active_sp;
spiro_cp *active_cp;
IPoint handscroll_base;
uint16 rfh, ras;
BasePoint lastknife;
struct freehand freehand;
enum expandedge expandedge;
BasePoint expandorigin;
real expandwidth, expandheight;
SplinePointList *active_shape;
SplinePoint joinpos;
spiro_cp joincp;
SplineChar *template1, *template2;
#if HANYANG
struct jamodisplay *jamodisplay;
#endif
real oldwidth, oldvwidth;
real oldlbearing;
int16 oldic, oldtah;
#if _ModKeysAutoRepeat
GTimer *autorpt;
int keysym, oldstate;
int oldkeyx, oldkeyy;
GWindow oldkeyw;
#endif
PST *lcarets;
int16 nearcaret;
/* freetype results display */
int16 ft_dpi, ft_ppemy, ft_ppemx, ft_depth;
real ft_pointsizey, ft_pointsizex;
struct freetype_raster *raster, *oldraster;
DebugView *dv;
uint32 mmvisible;
char *former_names[FORMER_MAX];
int former_cnt;
AnchorPoint *apmine, *apmatch;
SplineChar *apsc;
int guide_pos;
struct qg_data *qg;
int16 note_x, note_y;
struct dlistnode* pointInfoDialogs;
GGadget* charselector; //< let the user type in more than one char to view at once.
GGadget* charselectorNext; //< move to next word in charselector
GGadget* charselectorPrev; //< move to prev word in charselector
int charselectoridx;
SplineChar* additionalCharsToShow [51]; //< additionalCharsToShowLimit + 1 in size
int additionalCharsToShowActiveIndex;
CharViewTab cvtabs[ charview_cvtabssz+1 ];
int oldtabnum;
} CharView;
typedef struct bitmapview {
BDFChar *bc;
BDFFont *bdf;
struct fontview *fv;
EncMap *map_of_enc;
int enc;
GWindow gw, v;
GGadget *vsb, *hsb, *mb;
GGadget *recalc;
GFont *small;
int xoff, yoff;
int width, height;
int infoh, mbh;
int scale;
real scscale;
struct bitmapview *next;
unsigned int showfore:1;
unsigned int showoutline:1;
unsigned int showgrid:1;
unsigned int cntrldown:1;
unsigned int recentchange:1;
unsigned int clearing:1;
unsigned int shades_hidden:1;
unsigned int shades_down:1;
/*GWindow tools, layers;*/
int8 b1_tool, cb1_tool, b2_tool, cb2_tool; /* Button 3 does a popup */
int8 s1_tool, s2_tool, er_tool; /* Bindings for wacom stylus and eraser */
int8 showing_tool, pressed_tool, pressed_display, had_control, active_tool;
int pressed_x, pressed_y;
int info_x, info_y;
int event_x, event_y;
int16 sas, sfh;
#if _ModKeysAutoRepeat
GTimer *autorpt;
int keysym, oldstate;
#endif
int color; /* for greyscale fonts (between 0,255) */
int color_under_cursor;
} BitmapView;
struct aplist { AnchorPoint *ap; int connected_to, selected; struct aplist *next; };
enum mv_grids { mv_hidegrid, mv_showgrid, mv_partialgrid, mv_hidemovinggrid };
enum mv_type { mv_kernonly, mv_widthonly, mv_kernwidth };
struct metricchar {
int16 dx, dwidth; /* position and width of the displayed char */
int16 dy, dheight; /* displayed info for vertical metrics */
int xoff, yoff;
int16 mx, mwidth; /* position and width of the text underneath */
int16 kernafter;
unsigned int selected: 1;
GGadget *width, *lbearing, *rbearing, *kern, *name;
GGadget* updownkparray[10]; /* Cherry picked elements from width...kern allowing up/down key navigation */
};
typedef struct metricsview {
struct fontview *fv;
SplineFont *sf;
int pixelsize; /* If the user has manually requested a pixelsize */
/* then rasterize at that size no matter how large */
/* the font is zoomed. For non-user requesed sizes */
/* this is the pixelsize * zoom-factor */
BDFFont *bdf; /* We can also see metric info on a bitmap font */
BDFFont *show; /* Or the rasterized version of the outline font */
GWindow gw, v;
GFont *font;
GGadget *hsb, *vsb, *mb, *text, *textPrev, *textNext, *script, *features, *subtable_list;
GGadget *namelab, *widthlab, *lbearinglab, *rbearinglab, *kernlab;
int16 xstart;
int16 width, height, dwidth;
int16 vwidth, vheight;
int16 mbh,sbh;
int16 topend; /* y value of the end of the region containing the text field */
int16 displayend; /* y value of the end of the region showing filled characters */
int16 fh, as;
int16 cmax, clen;
SplineChar **chars; /* Character input stream */
struct opentype_str *glyphs;/* after going through the various gsub/gpos transformations */
struct metricchar *perchar; /* One for each glyph above */
SplineChar **sstr; /* Character input stream */
int16 mwidth, mbase;
int16 glyphcnt, max;
int16 pressed_x, pressed_y;
int16 activeoff;
int xoff, coff, yoff;
struct metricsview *next;
unsigned int right_to_left: 1;
unsigned int pressed: 1;
unsigned int pressedwidth: 1;
unsigned int pressedkern: 1;
unsigned int showgrid: 2;
unsigned int antialias: 1;
unsigned int vertical: 1;
unsigned int type: 2; /* enum mv_type */
unsigned int usehinting: 1; /* should the hints be used during the render */
unsigned int pixelsize_set_by_window;
int xp, yp, ap_owner;
BasePoint ap_start;
int cursor;
int scale_index;
struct lookup_subtable *cur_subtable;
GTextInfo *scriptlangs;
int word_index;
int layer;
int fake_unicode_base;
GIC *gwgic;
int ptsize, dpi;
int ybaseline;
int oldscript, oldlang;
} MetricsView;
enum fv_metrics { fvm_baseline=1, fvm_origin=2, fvm_advanceat=4, fvm_advanceto=8 };
typedef struct fontview {
FontViewBase b;
BDFFont *show, *filled;
GWindow gw, v;
GFont **fontset;
GGadget *vsb, *mb;
GTimer *pressed;
GTimer *resize;
GEvent resize_event;
GIC *gic;
GIC *gwgic;
int width, height; /* of v */
int16 infoh,mbh;
int16 lab_height, lab_as;
int16 colcnt, rowcnt; /* of display window */
int32 rowoff, rowltot; /* Can be really big in full unicode */
int16 cbw,cbh; /* width/height of a character box */
int pressed_pos, end_pos;
unsigned int antialias:1;
unsigned int bbsized:1; /* displayed bitmap should be scaled by bounding box rather than emsize */
unsigned int wasonlybitmaps:1;
/*unsigned int refstate: 3;*/ /* 0x1 => paste orig of all non exist refs, 0x2=>don't, 0x3 => don't warn about non-exist refs with no source font */
unsigned int touched: 1;
unsigned int showhmetrics: 4;
unsigned int showvmetrics: 4;
unsigned int drag_and_drop: 1;
unsigned int has_dd_no_cursor: 1;
unsigned int any_dd_events_sent: 1;
unsigned int resize_expected: 1;
/* Some window managers do not honour my resize requests (if window is*/
/* maximized for example), but we depend on the resize request to */
/* fix up the window. We do get a configure notify, but the window */
/* stays the same size, so kludge things */
unsigned int glyphlabel: 2;
unsigned int notactive:1; /* When embedded in a dlg */
int16 magnify;
int16 user_requested_magnify;
struct searchview *sv;
SplineChar *sc_near_top;
int sel_index;
struct lookup_subtable *cur_subtable;
struct qg_data *qg;
GPid pid_webfontserver;
} FontView;
typedef struct findsel {
GEvent *e;
real fudge; /* One pixel fudge factor */
real xl,xh, yl, yh; /* One pixel fudge factor */
real c_xl,c_xh, c_yl, c_yh; /* fudge rectangle for control points, larger than above if alt is depressed */
unsigned int select_controls: 1; /* notice control points */
unsigned int seek_controls: 1; /* notice control points before base points */
unsigned int all_controls: 1; /* notice control points even if the base points aren't selected (in truetype point numbering mode where all cps are visible) */
unsigned int alwaysshowcontrolpoints:1; /* if the BCP are forced on, then we want the selection code paths
* to also know that so the user can drag the BCP of a non selected splinepoint */
real scale;
PressedOn *p;
} FindSel;
typedef struct searchview {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar *chars[2];
EncMap dummy_map;
int32 map[2], backmap[2];
uint8 sel[2];
CharView cv_srch, cv_rpl;
CharView *lastcv;
/* ****** */
GWindow gw;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int rpl_x, cv_y;
int cv_width, cv_height;
short button_height, button_width;
/* ****** */
SearchData sd;
unsigned int showsfindnext: 1;
unsigned int findenabled: 1;
unsigned int rplallenabled: 1;
unsigned int rplenabled: 1;
unsigned int isvisible: 1;
} SearchView;
typedef struct mathkernview {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_topright, sc_topleft, sc_bottomright, sc_bottomleft;
SplineChar *chars[4];
EncMap dummy_map;
int32 map[4], backmap[4];
uint8 sel[4];
CharView cv_topright, cv_topleft, cv_bottomright, cv_bottomleft;
CharView *lastcv;
/* ****** */
GWindow gw;
GWindow cvparent_w;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int mid_space, cv_y;
int cv_width, cv_height;
short button_height, button_width;
/* ****** */
SplineChar *cursc;
int def_layer;
struct mathkern *orig_mathkern;
uint8 saved_mathkern; /* Can't just check if orig is non-NULL, because NULL is a perfectly valid initial state */
uint8 last_aspect;
uint8 done;
} MathKernDlg;
# ifdef FONTFORGE_CONFIG_TILEPATH
typedef struct tilepathdlg {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_first, sc_medial, sc_final, sc_isolated;
SplineChar *chars[4];
EncMap dummy_map;
int32 map[4], backmap[4];
uint8 sel[4];
CharView cv_first, cv_medial, cv_final, cv_isolated;
CharView *lastcv;
/* ****** */
GWindow gw;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int mid_space, cv_y;
int cv_width, cv_height;
/* ****** */
struct tiledata *td;
SplineFont *base_sf;
uint8 done, oked;
} TilePathDlg;
extern void TPDCharViewInits(TilePathDlg *tpd, int cid);
extern void PTDCharViewInits(TilePathDlg *tpd, int cid);
#endif /* Tile Path */
typedef struct gradientdlg {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_grad;
SplineChar *chars[1];
EncMap dummy_map;
int32 map[1], backmap[1];
uint8 sel[1];
CharView cv_grad;
/* ****** */
GWindow gw;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int mid_space, cv_y;
int cv_width, cv_height;
/* ****** */
uint8 done, oked;
struct gradient *active;
} GradientDlg;
extern void GDDCharViewInits(GradientDlg *gdd,int cid);
typedef struct strokedlg {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_stroke;
SplineChar *chars[1];
EncMap dummy_map;
int32 map[1], backmap[1];
uint8 sel[1];
CharView cv_stroke;
int cv_width, cv_height;
GGadget *mb;
int mbh;
SplineSet *old_poly;
/* ****** */
int done;
GWindow gw;
CharView *cv;
FontView *fv;
SplineFont *sf;
void (*strokeit)(void *,StrokeInfo *,int);
StrokeInfo *si;
GRect r1, r2;
int up[2];
int dontexpand;
} StrokeDlg;
extern void StrokeCharViewInits(StrokeDlg *sd,int cid);
struct lksubinfo {
struct lookup_subtable *subtable;
unsigned int deleted: 1;
unsigned int new: 1;
unsigned int selected: 1;
unsigned int moved: 1;
};
struct lkinfo {
OTLookup *lookup;
unsigned int open: 1;
unsigned int deleted: 1;
unsigned int new: 1;
unsigned int selected: 1;
unsigned int moved: 1;
int16 subtable_cnt, subtable_max;
struct lksubinfo *subtables;
};
struct lkdata {
int cnt, max;
int off_top, off_left;
struct lkinfo *all;
};
struct anchor_shows {
CharView *cv;
SplineChar *sc;
int restart;
};
struct gfi_data { /* FontInfo */
SplineFont *sf;
int def_layer;
GWindow gw;
int tn_active;
int private_aspect, ttfv_aspect, tn_aspect, tx_aspect, unicode_aspect;
int old_sel, old_aspect, old_lang, old_strid;
int ttf_set, names_set, tex_set;
int langlocalecode; /* MS code for the current locale */
unsigned int family_untitled: 1;
unsigned int human_untitled: 1;
unsigned int done: 1;
unsigned int mpdone: 1;
unsigned int lk_drag_and_drop: 1;
unsigned int lk_dropablecursor: 1;
struct anchor_shows anchor_shows[2];
struct texdata texdata;
GFont *font;
int as, fh;
struct lkdata tables[2];
int lkwidth, lkheight;
int first_sel_lookup, first_sel_subtable;
int last_panose_family;
};
struct kf_dlg /* : fvcontainer */ {
struct fvcontainer base;
struct lookup_subtable *sub;
GWindow gw, dw;
GFont *plain, *bold;
int fh, as;
GGadget *mb, *guts, *topbox;
int mbh, label2_y, infoh;
SplineFont *sf;
int def_layer;
struct kf_results *results;
int done;
FontView *active;
FontView *first_fv;
FontView *second_fv;
};
enum genfam { gf_none, gf_macfamily, gf_ttc };
extern void FVMarkHintsOutOfDate(SplineChar *sc);
extern void FVRefreshChar(FontView *fv,int gid);
extern void _FVMenuOpen(FontView *fv);
extern int _FVMenuSave(FontView *fv);
extern int _FVMenuSaveAs(FontView *fv);
extern int _FVMenuGenerate(FontView *fv,int family);
extern void _FVCloseWindows(FontView *fv);
extern char *GetPostScriptFontName(char *defdir,int mult);
extern void MergeKernInfo(SplineFont *sf,EncMap *map);
extern int SFGenerateFont(SplineFont *sf,int layer, int family,EncMap *map);
extern void NonLinearDlg(FontView *fv,struct charview *cv);
extern void FVChangeChar(FontView *fv,int encoding);
extern void FVMergeFonts(FontView *fv);
extern void FVInterpolateFonts(FontView *fv);
extern void FVDeselectAll(FontView *fv);
extern void FVAutoWidth2(FontView *fv);
/*extern void FVAutoKern(FontView *fv);*/
/*extern void FVAutoWidth(FontView *fv);*/
extern void SC_MarkInstrDlgAsChanged(SplineChar *sc);
extern void PythonUI_Init(void);
extern void SCStroke(SplineChar *sc);
extern void PfaEditSetFallback(void);
extern void RecentFilesRemember(char *filename);
extern void LastFonts_Save(void);
struct debugger_context;
extern void DebuggerTerminate(struct debugger_context *dc);
extern void DebuggerReset(struct debugger_context *dc,real pointsizey, real pointsizex,int dpi,int dbg_fpgm, int is_bitmap);
extern struct debugger_context *DebuggerCreate(SplineChar *sc,int layer,real pointsizey,real pointsizex,int dpi,int dbg_fpgm, int is_bitmap);
enum debug_gotype { dgt_continue, dgt_step, dgt_next, dgt_stepout };
extern void DebuggerGo(struct debugger_context *dc,enum debug_gotype,DebugView *dv);
extern struct TT_ExecContextRec_ *DebuggerGetEContext(struct debugger_context *dc);
extern void DebuggerToggleBp(struct debugger_context *dc,int range,int ip);
extern int DebuggerBpCheck(struct debugger_context *dc,int range,int ip);
extern void DebuggerSetWatches(struct debugger_context *dc,int n, uint8 *w);
extern uint8 *DebuggerGetWatches(struct debugger_context *dc, int *n);
extern void DebuggerSetWatchStores(struct debugger_context *dc,int n, uint8 *w);
extern uint8 *DebuggerGetWatchStores(struct debugger_context *dc, int *n);
extern int DebuggerIsStorageSet(struct debugger_context *dc, int index);
extern void DebuggerSetWatchCvts(struct debugger_context *dc,int n, uint8 *w);
extern uint8 *DebuggerGetWatchCvts(struct debugger_context *dc, int *n);
extern int DebuggingFpgm(struct debugger_context *dc);
extern void PrintFFDlg(FontView *fv,SplineChar *sc,MetricsView *mv);
extern void PrintWindowClose(void);
extern void InsertTextDlg(CharView *cv);
extern char *Kern2Text(SplineChar *other,KernPair *kp,int isv);
extern char *PST2Text(PST *pst,SplineFont *sf);
void EmboldenDlg(FontView *fv, CharView *cv);
void CondenseExtendDlg(FontView *fv, CharView *cv);
void ObliqueDlg(FontView *fv, CharView *cv);
void GlyphChangeDlg(FontView *fv, CharView *cv, enum glyphchange_type gc);
void ItalicDlg(FontView *fv, CharView *cv);
void ChangeXHeightDlg(FontView *fv,CharView *cv);
extern int FVParseSelectByPST(FontView *fv,struct lookup_subtable *sub,
int search_type);
extern void DropChars2Text(GWindow gw, GGadget *glyphs,GEvent *event);
extern void FVReplaceOutlineWithReference( FontView *fv, double fudge );
extern void SVDestroy(struct searchview *sv);
extern int SLICount(SplineFont *sf);
extern unichar_t *ClassName(const char *name,uint32 feature_tag,
uint16 flags, int script_lang_index, int merge_with, int act_type,
int macfeature,SplineFont *sf);
extern unichar_t *DecomposeClassName(const unichar_t *clsnm, unichar_t **name,
uint32 *feature_tag, int *macfeature,
uint16 *flags, uint16 *script_lang_index,int *merge_with,int *act_type,
SplineFont *sf);
extern PST *AddSubs(PST *last,uint32 tag,char *name,uint16 flags,
uint16 sli,SplineChar *sc);
extern void FVSetUIToMatch(FontView *destfv,FontView *srcfv);
extern void FVScrollToChar(FontView *fv,int i);
extern void FVRegenChar(FontView *fv,SplineChar *sc);
extern FontView *FontNew(void);
extern void _MenuWarnings(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void MenuPrefs(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuXRes(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuSaveAll(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuExit(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuHelp(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuIndex(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuAbout(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuLicense(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuNew(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void WindowMenuBuild(GWindow base,struct gmenuitem *mi,GEvent *);
extern void MenuRecentBuild(GWindow base,struct gmenuitem *mi,GEvent *);
extern void MenuScriptsBuild(GWindow base,struct gmenuitem *mi,GEvent *);
extern void mb2FreeGetText(GMenuItem2 *mb);
extern void mb2DoGetText(GMenuItem2 *mb);
extern void mbFreeGetText(GMenuItem *mb);
extern void mbDoGetText(GMenuItem *mb);
extern int RecentFilesAny(void);
extern void _aplistbuild(struct gmenuitem *mi,SplineFont *sf,
void (*func)(GWindow,struct gmenuitem *,GEvent *));
extern int32 *ParseBitmapSizes(GGadget *g,char *msg,int *err);
extern GTextInfo *AddMacFeatures(GTextInfo *opentype,enum possub_type type,SplineFont *sf);
extern unichar_t *AskNameTag(char *title,unichar_t *def,uint32 def_tag,uint16 flags,
int script_lang_index, enum possub_type type, SplineFont *sf, SplineChar *default_script,
int merge_with,int act_type);
extern unichar_t *ShowScripts(unichar_t *usedef);
extern GTextInfo *SFLangList(SplineFont *sf,int addfinal,SplineChar *default_script);
extern GTextInfo **SFLangArray(SplineFont *sf,int addfinal);
extern int ScriptLangList(SplineFont *sf,GGadget *list,int sli);
extern void GListDelSelected(GGadget *list);
extern void GListMoveSelected(GGadget *list,int offset);
extern GTextInfo *GListChangeLine(GGadget *list,int pos, const unichar_t *line);
extern GTextInfo *GListAppendLine(GGadget *list,const unichar_t *line,int select);
extern GTextInfo *GListChangeLine8(GGadget *list,int pos, const char *line);
extern GTextInfo *GListAppendLine8(GGadget *list,const char *line,int select);
extern void CharInfoInit(void);
extern void SCLigCaretCheck(SplineChar *sc,int clean);
extern char *DevTab_Dlg(GGadget *g, int r, int c);
extern int DeviceTableOK(char *dvstr, int *_low, int *_high);
extern void VRDevTabParse(struct vr *vr,struct matrix_data *md);
extern DeviceTable *DeviceTableParse(DeviceTable *dv,char *dvstr);
extern void DevTabToString(char **str,DeviceTable *adjust);
extern void ValDevTabToStrings(struct matrix_data *mds,int first_offset,ValDevTab *adjust);
extern void KpMDParse(SplineChar *sc,struct lookup_subtable *sub,
struct matrix_data *possub,int rows,int cols,int i);
extern void GFI_LookupEnableButtons(struct gfi_data *gfi, int isgpos);
extern void GFI_LookupScrollbars(struct gfi_data *gfi, int isgpos, int refresh);
extern void FontInfo(SplineFont *sf,int layer,int aspect,int sync);
extern void FontInfoDestroy(SplineFont *sf);
extern void FontMenuFontInfo(void *fv);
extern struct enc *MakeEncoding(SplineFont *sf, EncMap *map);
extern void LoadEncodingFile(void);
extern void RemoveEncoding(void);
extern void SFPrivateInfo(SplineFont *sf);
extern void FVDelay(FontView *fv,void (*func)(FontView *));
extern void GFI_FinishContextNew(struct gfi_data *d,FPST *fpst, int success);
extern void SCPreparePopup(GWindow gw,SplineChar *sc, struct remap *remap, int enc, int actualuni);
enum outlinesfm_flags {
sfm_stroke=0x1,
sfm_fill=0x2,
sfm_nothing=0x4,
sfm_stroke_trans = 0x8,
sfm_clip = 0x16
};
extern void CVDrawSplineSetSpecialized( CharView *cv, GWindow pixmap, SplinePointList *set,
Color fg, int dopoints, DRect *clip,
enum outlinesfm_flags strokeFillMode,
Color AlphaChannelOverride );
extern void CVDrawSplineSet(CharView *cv, GWindow pixmap, SplinePointList *set,
Color fg, int dopoints, DRect *clip );
extern void CVDrawSplineSetOutlineOnly(CharView *cv, GWindow pixmap, SplinePointList *set,
Color fg, int dopoints, DRect *clip, enum outlinesfm_flags strokeFillMode );
extern GWindow CVMakeTools(CharView *cv);
extern GWindow CVMakeLayers(CharView *cv);
extern GWindow BVMakeTools(BitmapView *bv);
extern GWindow BVMakeLayers(BitmapView *bv);
extern void CVSetLayer(CharView *cv,int layer);
extern int CVPaletteMnemonicCheck(GEvent *event);
extern int TrueCharState(GEvent *event);
extern void CVToolsPopup(CharView *cv, GEvent *event);
extern void BVToolsPopup(BitmapView *bv, GEvent *event);
extern real CVRoundRectRadius(void);
extern int CVRectElipseCenter(void);
extern void CVRectEllipsePosDlg(CharView *cv);
extern real CVStarRatio(void);
extern int CVPolyStarPoints(void);
extern StrokeInfo *CVFreeHandInfo(void);
extern void BVToolsSetCursor(BitmapView *bv, int state,char *device);
extern void CVToolsSetCursor(CharView *cv, int state,char *device);
extern int CVPaletteIsVisible(CharView *cv,int which);
extern void CVPaletteSetVisible(CharView *cv,int which,int visible);
extern void CVPalettesRaise(CharView *cv);
extern void CVLayersSet(CharView *cv);
extern void _CVPaletteActivate(CharView *cv,int force,int docking_changed);
extern void CVPaletteActivate(CharView *cv);
extern void CV_LayerPaletteCheck(SplineFont *sf);
extern void CVPalettesHideIfMine(CharView *cv);
extern int BVPaletteIsVisible(BitmapView *bv,int which);
extern void BVPaletteSetVisible(BitmapView *bv,int which,int visible);
extern void BVPaletteActivate(BitmapView *bv);
extern void BVPalettesHideIfMine(BitmapView *bv);
extern void BVPaletteColorChange(BitmapView *bv);
extern void BVPaletteColorUnderChange(BitmapView *bv,int color);
extern void BVPaletteChangedChar(BitmapView *bv);
extern void CVPaletteDeactivate(void);
extern void PalettesChangeDocking(void);
extern int CVPalettesWidth(void);
extern int BVPalettesWidth(void);
extern int CVInSpiro( CharView *cv );
extern void CVDoTransform(CharView *cv, enum cvtools cvt );
// apply transform to specified layer
extern void CVTransFuncLayer(CharView *cv,Layer *ly,real transform[6], enum fvtrans_flags flags);
// apply transform to the current layer only
extern void CVTransFunc(CharView *cv,real transform[6],enum fvtrans_flags);
// apply transform to all layers
extern void CVTransFuncAllLayers(CharView *cv,real transform[6], enum fvtrans_flags flags);
enum transdlg_flags { tdf_enableback=0x1, tdf_enablekerns=0x2,
tdf_defaultkerns=0x4, tdf_addapply=0x8 };
extern void TransformDlgCreate(void *data,void (*transfunc)(void *,real *,int,BVTFunc *,enum fvtrans_flags),
int (*getorigin)(void *,BasePoint *,int), enum transdlg_flags flags,
enum cvtools cvt);
extern void BitmapDlg(FontView *fv,SplineChar *sc, int isavail);
extern int SimplifyDlg(SplineFont *sf,struct simplifyinfo *smpl);
extern void CVReviewHints(CharView *cv);
extern void CVCreateHint(CharView *cv,int ishstem,int preserveundoes);
extern void SCRemoveSelectedMinimumDistances(SplineChar *sc,int inx);
extern int CVExport(CharView *cv);
extern int BVExport(BitmapView *bv);
extern void DrawAnchorPoint(GWindow pixmap,int x, int y,int selected);
extern void DefaultY(GRect *pos);
extern void CVDrawRubberRect(GWindow pixmap, CharView *cv);
extern void CVInfoDraw(CharView *cv, GWindow pixmap );
extern void CVChar(CharView *cv, GEvent *event );
extern void PI_ShowHints(SplineChar *sc, GGadget *list, int set);
extern GTextInfo *SCHintList(SplineChar *sc,HintMask *);
extern void CVResize(CharView *cv );
extern CharView *CharViewCreate(SplineChar *sc,FontView *fv,int enc);
extern void CharViewFinishNonStatic();
/**
* Extended version of CharViewCreate() which allows a window to be created but
* not displayed.
*/
extern CharView *CharViewCreateExtended(SplineChar *sc, FontView *fv,int enc, int show );
extern CharViewTab *CVGetActiveTab(CharView *cv);
extern void CharViewFree(CharView *cv);
extern int CVValid(SplineFont *sf, SplineChar *sc, CharView *cv);
extern void CVSetCharChanged(CharView *cv,int changed);
extern int CVAnySel(CharView *cv, int *anyp, int *anyr, int *anyi, int *anya);
extern int CVAnySelPoints(CharView *cv);
/**
* Get all the selected points in the current cv.
* Caller must g_list_free() the returned value.
*/
extern GList_Glib* CVGetSelectedPoints(CharView *cv);
extern void CVSelectPointAt(CharView *cv);
extern int CVClearSel(CharView *cv);
extern int CVSetSel(CharView *cv,int mask);
extern void CVInvertSel(CharView *cv);
extern int CVAllSelected(CharView *cv);
extern SplinePointList *CVAnySelPointList(CharView *cv);
extern int CVAnySelPoint(CharView *cv, SplinePoint **selsp, spiro_cp **selcp);
extern int CVOneThingSel(CharView *cv, SplinePoint **sp, SplinePointList **spl,
RefChar **ref, ImageList **img, AnchorPoint **ap, spiro_cp **cp);
extern int CVOneContourSel(CharView *cv, SplinePointList **_spl,
RefChar **ref, ImageList **img);
extern void CVInfoDrawText(CharView *cv, GWindow pixmap );
extern void CVImport(CharView *cv);
extern void BVImport(BitmapView *bv);
extern void FVImport(FontView *bv);
extern void CVFindCenter(CharView *cv, BasePoint *bp, int nosel);
extern void CVStroke(CharView *cv);
extern void FVStroke(FontView *fv);
extern void FreeHandStrokeDlg(StrokeInfo *si);
extern void OutlineDlg(FontView *fv, CharView *cv,MetricsView *mv,int isinline);
extern void ShadowDlg(FontView *fv, CharView *cv,MetricsView *mv,int wireframe);
extern void CVTile(CharView *cv);
extern void FVTile(FontView *fv);
extern void CVPatternTile(CharView *cv);
extern void FVPatternTile(FontView *fv);
extern void SCCharInfo(SplineChar *sc,int deflayer,EncMap *map,int enc);
extern void CharInfoDestroy(struct charinfo *ci);
extern SplineChar *SuffixCheck(SplineChar *sc,char *suffix);
extern void SCSubtableDefaultSubsCheck(SplineChar *sc, struct lookup_subtable *sub, struct matrix_data *possub, int col_cnt, int r,int layer);
extern GImage *PST_GetImage(GGadget *pstk,SplineFont *sf,int def_layer,
struct lookup_subtable *sub,int popup_r, SplineChar *sc );
extern GImage *NameList_GetImage(SplineFont *sf,SplineChar *sc,int def_layer,
char *namelist, int isliga );
extern GImage *GV_GetConstructedImage(SplineChar *sc,int def_layer, struct glyphvariants *gv,
int is_horiz);
extern GImage *SC_GetLinedImage(SplineChar *sc, int def_layer, int pos, int is_italic_cor);
extern struct glyphvariants *GV_ParseConstruction(struct glyphvariants *gv,
struct matrix_data *stuff, int rows, int cols);
extern void GV_ToMD(GGadget *g, struct glyphvariants *gv);
extern void CVGetInfo(CharView *cv);
extern void CVPGetInfo(CharView *cv);
extern int SCUsedBySubs(SplineChar *sc);
extern void SCSubBy(SplineChar *sc);
extern void SCRefBy(SplineChar *sc);
extern void ApGetInfo(CharView *cv, AnchorPoint *ap);
extern void CVMakeClipPath(CharView *cv);
extern void CVAddAnchor(CharView *cv);
extern AnchorClass *AnchorClassUnused(SplineChar *sc,int *waslig);
extern void FVSetWidth(FontView *fv,enum widthtype wtype);
extern void CVSetWidth(CharView *cv,enum widthtype wtype);
extern void GenericVSetWidth(FontView *fv,SplineChar* sc,enum widthtype wtype);
extern void CVChangeSC(CharView *cv, SplineChar *sc );
extern Undoes *CVPreserveTState(CharView *cv);
/**
* If isTState > 0 then CVPreserveTState(cv)
* otherwise CVPreserveState(cv)
*/
extern Undoes *CVPreserveMaybeState(CharView *cv, int isTState );
extern void CVRestoreTOriginalState(CharView *cv);
extern void CVUndoCleanup(CharView *cv);
extern void AdjustControls(SplinePoint *sp);
extern void CVAdjustPoint(CharView *cv, SplinePoint *sp);
extern void CVMergeSplineSets(CharView *cv, SplinePoint *active, SplineSet *activess,
SplinePoint *merge, SplineSet *mergess);
extern void CVAdjustControl(CharView *cv,BasePoint *cp, BasePoint *to);
extern int CVMoveSelection(CharView *cv, real dx, real dy, uint32 input_state);
extern int CVTestSelectFromEvent(CharView *cv,GEvent *event);
extern void CVMouseMovePen(CharView *cv, PressedOn *p, GEvent *event);
extern void CVMouseUpPoint(CharView *cv,GEvent *event);
extern int CVMouseMovePointer(CharView *cv, GEvent *event);
extern void CVMouseDownPointer(CharView *cv, FindSel *fs, GEvent *event);
extern void CVMouseDownRuler(CharView *cv, GEvent *event);
extern void CVMouseMoveRuler(CharView *cv, GEvent *event);
extern int CVMouseAtSpline(CharView *cv,GEvent *event);
extern void CVMouseUpRuler(CharView *cv, GEvent *event);
extern void CVMouseMoveHand(CharView *cv, GEvent *event);
extern void CVMouseDownFreeHand(CharView *cv, GEvent *event);
extern void CVMouseMoveFreeHand(CharView *cv, GEvent *event);
extern void CVMouseUpFreeHand(CharView *cv, GEvent *event);
extern void CVMouseDownShape(CharView *cv,GEvent *event);
extern void CPStartInfo(CharView *cv, GEvent *event);
extern void CPUpdateInfo(CharView *cv, GEvent *event);
extern void CPEndInfo(CharView *cv);
extern void BVChar(BitmapView *cv, GEvent *event );
extern void CVMouseDownPoint(CharView *cv,GEvent *event);
extern void CVMouseMovePoint(CharView *cv,PressedOn *);
extern void CVMouseUpPointer(CharView *cv );
extern void CVCheckResizeCursors(CharView *cv);
extern void CVMouseDownHand(CharView *cv);
extern void CVMouseUpHand(CharView *cv);
extern void CVMouseDownTransform(CharView *cv);
extern void CVMouseMoveTransform(CharView *cv);
extern void CVMouseUpTransform(CharView *cv);
extern void CVMouseDownKnife(CharView *cv);
extern void CVMouseMoveKnife(CharView *cv,PressedOn *);
extern void CVMouseUpKnife(CharView *cv,GEvent *event);
extern void CVMouseMoveShape(CharView *cv);
extern void CVMouseUpShape(CharView *cv);
extern void LogoExpose(GWindow pixmap,GEvent *event, GRect *r,enum drawmode dm);
extern void CVDebugPointPopup(CharView *cv);
extern int GotoChar(SplineFont *sf,EncMap *map, int *merge_with_selection);
extern void CVShowPoint(CharView *cv, BasePoint *me);
extern void BitmapViewFinishNonStatic();
extern BitmapView *BitmapViewCreate(BDFChar *bc, BDFFont *bdf, FontView *fv,int enc);
extern BitmapView *BitmapViewCreatePick(int enc, FontView *fv);
extern void BitmapViewFree(BitmapView *bv);
extern void BVMenuRotateInvoked(GWindow gw,struct gmenuitem *mi, GEvent *e);
extern void BVRotateBitmap(BitmapView *bv,enum bvtools type );
extern int BVColor(BitmapView *bv);
extern void BCGeneralFunction(BitmapView *bv,
void (*SetPoint)(BitmapView *,int x, int y, void *data),void *data);
extern char *BVFlipNames[];
extern void BVChangeBC(BitmapView *bv, BDFChar *bc, int fitit );
extern void MVSetSCs(MetricsView *mv, SplineChar **scs);
extern void MVRefreshChar(MetricsView *mv, SplineChar *sc);
extern void MVRegenChar(MetricsView *mv, SplineChar *sc);
extern void MVReKern(MetricsView *mv);
extern void MetricsViewFinishNonStatic();
extern MetricsView *MetricsViewCreate(FontView *fv,SplineChar *sc,BDFFont *bdf);
extern void MetricsViewFree(MetricsView *mv);
extern void MVRefreshAll(MetricsView *mv);
extern void MV_FriendlyFeatures(GGadget *g, int pos);
extern GTextInfo *SLOfFont(SplineFont *sf);
extern void DoPrefs(void);
extern void DoXRes(void);
extern void PointerDlg(CharView *cv);
extern void GListAddStr(GGadget *list,unichar_t *str, void *ud);
extern void GListReplaceStr(GGadget *list,int index, unichar_t *str, void *ud);
extern struct macname *NameGadgetsGetNames( GWindow gw );
extern void NameGadgetsSetEnabled( GWindow gw, int enable );
extern int GCDBuildNames(GGadgetCreateData *gcd,GTextInfo *label,int pos,struct macname *names);
extern void GCDFillMacFeat(GGadgetCreateData *mfgcd,GTextInfo *mflabels, int width,
MacFeat *all, int fromprefs, GGadgetCreateData *boxes,
GGadgetCreateData **array);
extern void Prefs_ReplaceMacFeatures(GGadget *list);
extern unichar_t *FVOpenFont(char *title, const char *defaultfile, int mult);
extern void ShowAboutScreen(void);
extern void DelayEvent(void (*func)(void *), void *data);
extern void FindProblems(FontView *fv,CharView *cv,SplineChar *sc);
typedef enum
{
constrainSelection_AveragePoints = 0,
constrainSelection_SpacePoints = 1,
constrainSelection_SpaceSelectedRegions = 2
} constrainSelection_t;
extern void CVConstrainSelection(CharView *cv, constrainSelection_t type);
extern void CVMakeParallel(CharView *cv);
extern void ScriptDlg(FontView *fv,CharView *cv);
# if HANYANG
extern void MenuNewComposition(GWindow gw, struct gmenuitem *, GEvent *);
extern void CVDisplayCompositions(GWindow gw, struct gmenuitem *, GEvent *);
extern void Disp_DoFinish(struct jamodisplay *d, int cancel);
extern void Disp_RefreshChar(SplineFont *sf,SplineChar *sc);
extern void Disp_DefaultTemplate(CharView *cv);
# endif
extern SearchView *SVCreate(FontView *fv);
extern void SVCharViewInits(SearchView *sv);
extern void SV_DoClose(struct cvcontainer *cvc);
extern void SVMakeActive(SearchView *sv,CharView *cv);
extern int SVAttachFV(FontView *fv,int ask_if_difficult);
extern void SVDetachFV(FontView *fv);
extern void MKDMakeActive(MathKernDlg *mkd,CharView *cv);
extern void MKD_DoClose(struct cvcontainer *cvc);
extern void MKDCharViewInits(MathKernDlg *mkd);
extern void MathKernDialog(SplineChar *sc,int def_layer);
extern void ShowAtt(SplineFont *sf,int def_layer);
extern void FontCompareDlg(FontView *fv);
extern void SFShowKernPairs(SplineFont *sf,SplineChar *sc,AnchorClass *ac,int layer);
extern void SFShowLigatures(SplineFont *sf,SplineChar *sc);
extern void SCEditInstructions(SplineChar *sc);
extern void SFEditTable(SplineFont *sf, uint32 tag);
extern void IIScrollTo(struct instrinfo *ii,int ip,int mark_stop);
extern void IIReinit(struct instrinfo *ii,int ip);
extern int ii_v_e_h(GWindow gw, GEvent *event);
extern void instr_scroll(struct instrinfo *ii,struct sbevent *sb);
extern void CVGridFitChar(CharView *cv);
/**
* If a live preview of grid fit is somehow in effect, call CVGridFitChar() for us.
* A caller can call here after a change and any CVGridFitChar() will be updated if need be.
*/
extern void CVGridHandlePossibleFitChar(CharView *cv);
extern void CVFtPpemDlg(CharView *cv,int debug);
extern void SCDeGridFit(SplineChar *sc);
extern void SCReGridFit(SplineChar *sc,int layer);
extern void CVDebugReInit(CharView *cv,int restart_debug,int dbg_fpgm);
extern void CVDebugFree(DebugView *dv);
extern int DVChar(DebugView *dv, GEvent *e);
extern void KernClassD(KernClass *kc, SplineFont *sf, int layer, int isv);
extern void ShowKernClasses(SplineFont *sf,MetricsView *mv,int layer,int isv);
extern void KCLD_End(struct kernclasslistdlg *kcld);
extern void KCLD_MvDetach(struct kernclasslistdlg *kcld,MetricsView *mv);
extern void KernPairD(SplineFont *sf,SplineChar *sc1,SplineChar *sc2,int layer, int isv);
extern void KCD_DrawGlyph(GWindow pixmap,int x,int baseline,BDFChar *bdfc,int mag);
extern GTextInfo *BuildFontList(FontView *except);
extern void TFFree(GTextInfo *tf);
extern void AnchorControl(SplineChar *sc,AnchorPoint *ap,int layer);
extern void AnchorControlClass(SplineFont *_sf,AnchorClass *ac,int layer);
extern void FVSelectByPST(FontView *fv);
enum hist_type { hist_hstem, hist_vstem, hist_blues };
struct psdict;
extern void SFHistogram(SplineFont *sf,int layer, struct psdict *private,uint8 *selected,
EncMap *map, enum hist_type which);
extern void ContextChainEdit(SplineFont *sf,FPST *fpst,
struct gfi_data *gfi,unichar_t *newname,int layer);
extern char *cu_copybetween(const unichar_t *start, const unichar_t *end);
extern void StateMachineEdit(SplineFont *sf,ASM *sm,struct gfi_data *d);
extern void GFI_FinishSMNew(struct gfi_data *d,ASM *sm, int success,int isnew);
extern void MMChangeBlend(MMSet *mm,FontView *fv,int tonew);
extern void MMWizard(MMSet *mm);
extern int LayerDialog(Layer *layer,SplineFont *sf);
extern void CVLayerChange(CharView *cv);
extern int PointOfViewDlg(struct pov_data *pov,SplineFont *sf,int flags);
extern SplineChar *FVMakeChar(FontView *fv,int i);
extern void CVPointOfView(CharView *cv,struct pov_data *);
extern void DVCreateGloss(DebugView *dv);
extern void DVMarkPts(DebugView *dv,SplineSet *ss);
extern int CVXPos(DebugView *dv,int offset,int width);
extern GMenuItem *GetEncodingMenu(void (*func)(GWindow,GMenuItem *,GEvent *),
Encoding *current);
extern GTextInfo *TIFromName(const char *name);
enum subtable_data_flags {
/* I have flags for each alternative because I want "unspecified" to be */
/* an option */
sdf_kernclass = 0x01,
sdf_kernpair = 0x02,
sdf_verticalkern = 0x04,
sdf_horizontalkern = 0x08,
sdf_dontedit = 0x10
};
struct subtable_data {
int flags;
SplineChar *sc;
};
extern GTextInfo **SFLookupListFromType(SplineFont *sf, int lookup_type );
extern GTextInfo *SFLookupArrayFromType(SplineFont *sf, int lookup_type );
extern GTextInfo *SFLookupArrayFromMask(SplineFont *sf, int lookup_mask );
extern GTextInfo **SFSubtablesOfType(SplineFont *sf, int lookup_type, int kernclass, int add_none);
extern GTextInfo *SFSubtableListOfType(SplineFont *sf, int lookup_type, int kernclass, int add_none);
extern struct lookup_subtable *SFNewLookupSubtableOfType(SplineFont *sf, int lookup_type, struct subtable_data *sd, int def_layer );
extern int EditLookup(OTLookup *otl,int isgpos,SplineFont *sf);
extern int EditSubtable(struct lookup_subtable *sub,int isgpos,SplineFont *sf,
struct subtable_data *sd,int def_layer);
extern void _LookupSubtableContents(SplineFont *sf, struct lookup_subtable *sub,
struct subtable_data *sd,int def_layer);
extern char *SCNameUniStr(SplineChar *sc);
extern unichar_t *uSCNameUniStr(SplineChar *sc);
extern char *SFNameList2NameUni(SplineFont *sf, char *str);
extern unichar_t **SFGlyphNameCompletion(SplineFont *sf,GGadget *t,int from_tab,
int new_name_after_space);
extern char *GlyphNameListDeUnicode( char *str );
extern void AddRmLang(SplineFont *sf, struct lkdata *lk,int add_lang);
extern void FVMassGlyphRename(FontView *fv);
extern void SFBdfProperties(SplineFont *sf, EncMap *map, BDFFont *thisone);
extern GMenuItem2 helplist[];
extern BasePoint last_ruler_offset[];
extern void CVCopyLayerToLayer(CharView *cv);
extern void FVCopyLayerToLayer(FontView *fv);
extern void CVCompareLayerToLayer(CharView *cv);
extern void FVCompareLayerToLayer(FontView *fv);
extern void MathInit(void);
extern void SFMathDlg(SplineFont *sf,int def_layer);
extern GMenuItem2 *cvpy_menu, *fvpy_menu;
extern void cvpy_tllistcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void fvpy_tllistcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern GMenuItem2 *cv_menu, *fv_menu;
extern void cv_tl2listcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void fv_tl2listcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void SFValidationWindow(SplineFont *sf,int layer, enum fontformat format);
extern void ValidationDestroy(SplineFont *sf);
extern const char *UI_TTFNameIds(int id);
extern const char *UI_MSLangString(int language);
extern void FontInfoInit(void);
extern void LookupUIInit(void);
extern enum psstrokeflags Ps_StrokeFlagsDlg(void);
extern struct cidmap *AskUserForCIDMap(void);
extern void DefineGroups(struct fontview *fv);
extern void DisplayGroups(struct fontview *fv);
extern struct Base *SFBaselines(SplineFont *sf,struct Base *old,int is_vertical);
extern void JustifyDlg(SplineFont *sf);
extern char *GlyphListDlg(SplineFont *sf, char *glyphstr);
extern void DeltaSuggestionDlg(FontView *fv,CharView *cv);
extern void QGRmFontView(struct qg_data *qg,FontView *fv);
extern void QGRmCharView(struct qg_data *qg,CharView *cv);
extern struct hslrgb *SFFontCols(SplineFont *sf,struct hslrgb fontcols[6]);
extern Color view_bgcol; /* Background color for views */
extern void MVColInit(void);
extern void CVColInit( void );
extern void FontViewRemove(FontView *fv);
extern void FontViewFinishNonStatic();
extern void FVChar(FontView *fv,GEvent *event);
extern void FVDrawInfo(FontView *fv,GWindow pixmap,GEvent *event);
extern void FVRedrawAllCharViews(FontView *fv);
extern void KFFontViewInits(struct kf_dlg *kf,GGadget *drawable);
extern char *GlyphSetFromSelection(SplineFont *sf,int def_layer,char *current);
extern void ME_ListCheck(GGadget *g,int r, int c, SplineFont *sf);
extern void ME_SetCheckUnique(GGadget *g,int r, int c, SplineFont *sf);
extern void ME_ClassCheckUnique(GGadget *g,int r, int c, SplineFont *sf);
extern void PI_Destroy(struct dlistnode *node);
struct gidata;
extern void PIChangePoint(struct gidata *ci);
extern void CVRegenFill(CharView *cv);
extern void RulerDlg(CharView *cv);
extern int CVCountSelectedPoints(CharView *cv);
extern void _CVMenuInsertPt(CharView *cv);
extern void _CVMenuNamePoint(CharView *cv, SplinePoint *sp);
extern void _CVMenuNameContour(CharView *cv);
extern void Prefs_LoadDefaultPreferences( void );
extern void SPSelectNextPoint( SplinePoint *sp, int state );
extern void SPSelectPrevPoint( SplinePoint *sp, int state );
/**
* Is the next BCP for the sp selected, and is it the primary BCP for the selection
* @see SPIsNextCPSelected
*/
extern bool SPIsNextCPSelectedSingle( SplinePoint *sp, CharView *cv );
/**
* Is the prev BCP for the sp selected, and is it the primary BCP for the selection
* @see SPIsNextCPSelected
*/
extern bool SPIsPrevCPSelectedSingle( SplinePoint *sp, CharView *cv );
/**
* Is the next BCP for the sp selected, it can be the primary or any
* of the secondary selected BCP
*
* The last selected BCP is the 'primary' selected BCP. Code which
* only handles a single selected BCP will only honor the primary
* selected BCP
*
* There can also be one or more seconday selected BCP. These might be
* drawn with slightly less highlight graphically and are only handled
* by code which has been updated to allow mutliple selected BCP to be
* operated on at once.
*/
extern bool SPIsNextCPSelected( SplinePoint *sp, CharView *cv );
/**
* Is the prev BCP for the sp selected, it can be the primary or any of the secondary selected BCP
*
* @see SPIsNextCPSelected
*/
extern bool SPIsPrevCPSelected( SplinePoint *sp, CharView *cv );
typedef struct FE_adjustBCPByDeltaDataS
{
CharView *cv; //< used to update view
real dx; //< Add this to the BCP x
real dy; //< Add this to the BCP y
int keyboarddx;
} FE_adjustBCPByDeltaData;
/**
* Visitor function type for visitSelectedControlPoints()
*/
typedef void (*visitSelectedControlPointsVisitor) ( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
/**
* Visitor function to move each BCP by data->dx/data->dy
*
*
* Visitor: visitSelectedControlPointsVisitor
* UsedBy: CVFindAndVisitSelectedControlPoints
*/
extern void FE_adjustBCPByDelta( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
extern void FE_adjustBCPByDeltaWhilePreservingBCPAngle( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
/**
* Visitor function to unselect every BCP passed
*
* Visitor: visitSelectedControlPointsVisitor
* UsedBy: CVFindAndVisitSelectedControlPoints
* CVUnselectAllBCP
*
* @see SPIsNextCPSelected
*/
extern void FE_unselectBCP( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
extern void FE_touchControlPoint( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
/**
* Find all the selected BCP and apply the visitor function f to them
* passing the user data pointer udata to the 'f' visitor.
*
* This function doesn't use udata at all, it simply passes it on to
* your visitor function so it may do something with it like record
* results or take optional parameters.
*
* If preserveState is true and there are selected BCP then
* CVPreserveState() is called before the visitor function.
*/
extern void CVFindAndVisitSelectedControlPoints( CharView *cv, bool preserveState,
visitSelectedControlPointsVisitor f, void* udata );
/**
* NOTE: doesn't do all, just all on selected spline.
*/
extern void CVVisitAllControlPoints( CharView *cv, bool preserveState,
visitSelectedControlPointsVisitor f, void* udata );
/**
* Unselect all the BCP which are currently selected.
*/
extern void CVUnselectAllBCP( CharView *cv );
/**
* This will call your visitor function 'f' on any selected BCP. This
* is regardless of if the BCP is the next or prev BCP for it's
* splinepoint.
*
* This function doesn't use udata at all, it simply passes it on to
* your visitor function so it may do something with it like record
* results or take optional parameters.
*/
extern void visitSelectedControlPoints( GHashTable *col, visitSelectedControlPointsVisitor f, gpointer udata );
/**
* NOTE: doesn't do all, just all on selected spline.
*/
extern void visitAllControlPoints( GHashTable *col, visitSelectedControlPointsVisitor f, gpointer udata );
extern void CVVisitAdjacentToSelectedControlPoints( CharView *cv, bool preserveState,
visitSelectedControlPointsVisitor f, void* udata );
extern void CVFreePreTransformSPL( CharView* cv );
extern bool CVShouldInterpolateCPsOnMotion( CharView* cv );
extern int CVNearRBearingLine( CharView* cv, real x, real fudge );
extern int CVNearLBearingLine( CharView* cv, real x, real fudge );
extern void CVMenuConstrain(GWindow gw, struct gmenuitem *mi, GEvent *UNUSED(e));
#endif /* FONTFORGE_VIEWS_H */
|
/* Copyright (C) 2000-2012 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FONTFORGE_VIEWS_H
#define FONTFORGE_VIEWS_H
#include <fontforge-config.h>
#include "ttfinstrs.h"
#include "baseviews.h"
#include "ffglib.h"
#include "dlist.h"
#include "ggadget.h"
#include "search.h"
struct gfi_data;
struct contextchaindlg;
struct statemachinedlg;
extern struct cvshows {
int showfore, showback, showgrids, showhhints, showvhints, showdhints;
int showpoints, showfilled;
int showrulers;
int showrounds; /* 0=>no, 1=>auto, 2=>always */
int showmdx, showmdy; /* minimum distances x,y */
int showhmetrics, showvmetrics; /* show advance width, baseline, etc. */
int markextrema;
int markpoi; /* Points of inflection */
int showblues, showfamilyblues;
int showanchor;
int showcpinfo;
int showtabs; /* with the names of former glyphs */
int showsidebearings;
int showrefnames;
int snapoutlines;
int showalmosthvlines;
int showalmosthvcurves;
int hvoffset;
int checkselfintersects; /* Not really something shown, but convenient to keep it here */
int showdebugchanges; /* Changes the way changing rasters are displayed in tt debug mode */
int alwaysshowcontrolpoints; //< Always show the BCP even when their splinepoint is not selected
} CVShows;
extern struct bvshows {
int showfore, showoutline, showgrid;
int lastpixelsize;
} BVShows;
enum debug_wins { dw_registers=0x1, dw_stack=0x2, dw_storage=0x4, dw_points=0x8,
dw_cvt=0x10, dw_raster=0x20, dw_gloss=0x40 };
struct instrinfo {
int isel_pos;
int16 lheight,lpos;
char *scroll, *offset;
GWindow v;
GGadget *vsb;
int16 sbw;
int16 vheight, vwidth;
int16 lstopped;
int16 as, fh;
struct instrdata *instrdata;
GFont *gfont;
unsigned int showaddr: 1;
unsigned int showhex: 1;
unsigned int mousedown: 1;
void *userdata;
void (*selection_callback)(struct instrinfo *,int ip);
int (*bpcheck)(struct instrinfo *,int ip);
int (*handle_char)(struct instrinfo *,GEvent *e);
};
struct reflist {
RefChar *ref;
struct reflist *parent;
};
typedef struct debugview {
struct debugger_context *dc; /* Local to freetype.c */
GWindow dv, v;
/* Windows for twilight points, cvt, registers, stack, storage, stack gloss */
GWindow regs, stack, storage, points, cvt, raster, gloss; /* order matters */
GWindow points_v;
GGadget *cvtsb;
GGadget *pts_vsb;
GGadget *glosssb;
GGadget *storagesb;
GGadget *regsb;
GGadget *stacksb;
struct instrdata id;
struct instrinfo ii;
int dwidth, toph;
struct charview *cv;
double scalex, scaley;
int pts_head, cvt_offtop, gloss_offtop, storage_offtop, stack_offtop, reg_offtop;
int points_offtop;
int codeSize;
uint8 initialbytes[4];
struct reflist *active_refs;
int last_npoints;
int layer;
} DebugView;
/* The number of tabs allowed in the outline glyph view of former glyphs */
#define FORMER_MAX 10
enum dv_coderange { cr_none=0, cr_fpgm, cr_prep, cr_glyph }; /* cleverly chosen to match ttobjs.h */
struct freehand {
struct tracedata *head, *last; /* for the freehand tool */
SplinePointList *current_trace;
int ignore_wobble; /* Ignore wiggles smaller than this */
int skip_cnt;
};
enum expandedge { ee_none, ee_nw, ee_up, ee_ne, ee_right, ee_se, ee_down,
ee_sw, ee_left, ee_max };
enum { charviewtab_charselectedsz = 1024 };
typedef struct charviewtab
{
char charselected[ charviewtab_charselectedsz + 1 ];
char tablabeltxt[ charviewtab_charselectedsz + 1 ];
float xoff, yoff;
real scale;
} CharViewTab;
enum { charview_cvtabssz = 100 };
/* approximately BACK_LAYER_MAX / 32 */
#define BACK_LAYERS_VIEW_MAX 8
typedef struct charview {
CharViewBase b;
uint32 showback[BACK_LAYERS_VIEW_MAX];
unsigned int showfore:1;
unsigned int showgrids:1;
unsigned int showhhints:1;
unsigned int showvhints:1;
unsigned int showdhints:1;
unsigned int showpoints:1;
unsigned int alwaysshowcontrolpoints:1;
unsigned int showfilled:1;
unsigned int showrulers:1;
unsigned int showrounds:2; /* 0=>no, 1=>auto, 2=>always */
unsigned int showmdx:1;
unsigned int showmdy:1;
unsigned int showhmetrics:1;
unsigned int showvmetrics:1;
unsigned int showblues:1; /* 16 */
unsigned int showfamilyblues:1;
unsigned int showanchor:1;
unsigned int showpointnumbers:2;
unsigned int markextrema:1;
unsigned int markpoi:1;
unsigned int needsrasterize:1; /* Rasterization (of fill or fontview) needed on mouse up */
unsigned int recentchange:1; /* a change happened in the grids or background. don't need to rasterize */
unsigned int info_within: 1; /* cursor is within main window */
unsigned int back_img_out_of_date: 1; /* Force redraw of back image pixmap */
unsigned int cntrldown:1;
unsigned int joinvalid:1;
unsigned int widthsel:1;
unsigned int vwidthsel:1;
unsigned int lbearingsel:1;
unsigned int icsel:1;
unsigned int tah_sel:1;
unsigned int inactive:1; /* When in a search view (32) */
unsigned int show_ft_results: 1;
unsigned int show_ft_results_live_update : 1;
unsigned int coderange: 2; /* For the debugger */
unsigned int autonomous_ruler_w: 1;
unsigned int showcpinfo: 1;
unsigned int showtabs: 1;
unsigned int showsidebearings: 1;
unsigned int showing_spiro_pt_menu: 1;
unsigned int ruler_pressed: 1;
unsigned int ruler_pressedv: 1;
unsigned int showrefnames: 1;
unsigned int snapoutlines: 1;
unsigned int showalmosthvlines: 1;
unsigned int showalmosthvcurves: 1;
unsigned int checkselfintersects: 1;
unsigned int showdebugchanges: 1;
unsigned int inPreviewMode: 1;
unsigned int inDraggingComparisonOutline: 1;
unsigned int activeModifierControl: 1; //< Is control being held right now?
unsigned int activeModifierAlt: 1; //< Is alt being held right now?
unsigned int changedActiveGlyph: 1; //< Set in CVSwitchActiveSC() cleared in cvmouseup()
int hvoffset; /* for showalmosthvlines */
int layers_off_top;
GWindow gw, v;
GWindow hruler, vruler; /* Ruler pixmaps */
GGadget *vsb, *hsb, *mb, *tabs;
GFont *small, *normal;
GWindow icon;
GWindow ruler_w;
GWindow ruler_linger_w;
unichar_t ruler_linger_lines[40][80];
int ruler_linger_num_lines;
int num_ruler_intersections;
int allocated_ruler_intersections;
BasePoint *ruler_intersections;
int start_intersection_snapped;
int end_intersection_snapped;
GFont *rfont;
GTimer *pressed;
GWindow backimgs;
GIC *gic;
GIC *gwgic;
int width, height;
int mbh; //< menu bar height
int charselectorh; //< char selection input box height
int infoh; //< info bar height
int rulerh; //< ruler height
int16 sas, sfh, sdh, nas, nfh;
BasePoint info;
SplinePoint *info_sp;
Spline *info_spline;
real info_t;
GPoint e; /* mouse location */
GPoint olde;
BasePoint last_c;
BDFChar *filled;
GImage gi; /* used for fill bitmap only */
int enc;
EncMap *map_of_enc; /* Only use for comparison against fontview's map to see if our enc be valid */
/* Will not be updated when fontview is reencoded */
PressedOn p;
SplinePoint *lastselpt;
spiro_cp *lastselcp;
/*GWindow tools, layers;*/
int8 b1_tool, cb1_tool, b2_tool, cb2_tool; /* Button 3 does a popup */
int8 b1_tool_old; /* Used by mingw port */
int8 s1_tool, s2_tool, er_tool; /* Bindings for wacom stylus and eraser */
int8 showing_tool, pressed_tool, pressed_display, had_control, active_tool;
int8 spacebar_hold; /* spacebar is held down */
SplinePointList *active_spl;
SplinePoint *active_sp;
spiro_cp *active_cp;
IPoint handscroll_base;
uint16 rfh, ras;
BasePoint lastknife;
struct freehand freehand;
enum expandedge expandedge;
BasePoint expandorigin;
real expandwidth, expandheight;
SplinePointList *active_shape;
SplinePoint joinpos;
spiro_cp joincp;
SplineChar *template1, *template2;
#if HANYANG
struct jamodisplay *jamodisplay;
#endif
real oldwidth, oldvwidth;
real oldlbearing;
int16 oldic, oldtah;
#if _ModKeysAutoRepeat
GTimer *autorpt;
int keysym, oldstate;
int oldkeyx, oldkeyy;
GWindow oldkeyw;
#endif
PST *lcarets;
int16 nearcaret;
/* freetype results display */
int16 ft_dpi, ft_ppemy, ft_ppemx, ft_depth;
real ft_pointsizey, ft_pointsizex;
struct freetype_raster *raster, *oldraster;
DebugView *dv;
uint32 mmvisible;
char *former_names[FORMER_MAX];
int former_cnt;
AnchorPoint *apmine, *apmatch;
SplineChar *apsc;
int guide_pos;
struct qg_data *qg;
int16 note_x, note_y;
struct dlistnode* pointInfoDialogs;
GGadget* charselector; //< let the user type in more than one char to view at once.
GGadget* charselectorNext; //< move to next word in charselector
GGadget* charselectorPrev; //< move to prev word in charselector
int charselectoridx;
SplineChar* additionalCharsToShow [51]; //< additionalCharsToShowLimit + 1 in size
int additionalCharsToShowActiveIndex;
CharViewTab cvtabs[ charview_cvtabssz+1 ];
int oldtabnum;
} CharView;
typedef struct bitmapview {
BDFChar *bc;
BDFFont *bdf;
struct fontview *fv;
EncMap *map_of_enc;
int enc;
GWindow gw, v;
GGadget *vsb, *hsb, *mb;
GGadget *recalc;
GFont *small;
int xoff, yoff;
int width, height;
int infoh, mbh;
int scale;
real scscale;
struct bitmapview *next;
unsigned int showfore:1;
unsigned int showoutline:1;
unsigned int showgrid:1;
unsigned int cntrldown:1;
unsigned int recentchange:1;
unsigned int clearing:1;
unsigned int shades_hidden:1;
unsigned int shades_down:1;
/*GWindow tools, layers;*/
int8 b1_tool, cb1_tool, b2_tool, cb2_tool; /* Button 3 does a popup */
int8 s1_tool, s2_tool, er_tool; /* Bindings for wacom stylus and eraser */
int8 showing_tool, pressed_tool, pressed_display, had_control, active_tool;
int pressed_x, pressed_y;
int info_x, info_y;
int event_x, event_y;
int16 sas, sfh;
#if _ModKeysAutoRepeat
GTimer *autorpt;
int keysym, oldstate;
#endif
int color; /* for greyscale fonts (between 0,255) */
int color_under_cursor;
} BitmapView;
struct aplist { AnchorPoint *ap; int connected_to, selected; struct aplist *next; };
enum mv_grids { mv_hidegrid, mv_showgrid, mv_partialgrid, mv_hidemovinggrid };
enum mv_type { mv_kernonly, mv_widthonly, mv_kernwidth };
struct metricchar {
int16 dx, dwidth; /* position and width of the displayed char */
int16 dy, dheight; /* displayed info for vertical metrics */
int xoff, yoff;
int16 mx, mwidth; /* position and width of the text underneath */
int16 kernafter;
unsigned int selected: 1;
GGadget *width, *lbearing, *rbearing, *kern, *name;
GGadget* updownkparray[10]; /* Cherry picked elements from width...kern allowing up/down key navigation */
};
typedef struct metricsview {
struct fontview *fv;
SplineFont *sf;
int pixelsize; /* If the user has manually requested a pixelsize */
/* then rasterize at that size no matter how large */
/* the font is zoomed. For non-user requesed sizes */
/* this is the pixelsize * zoom-factor */
BDFFont *bdf; /* We can also see metric info on a bitmap font */
BDFFont *show; /* Or the rasterized version of the outline font */
GWindow gw, v;
GFont *font;
GGadget *hsb, *vsb, *mb, *text, *textPrev, *textNext, *script, *features, *subtable_list;
GGadget *namelab, *widthlab, *lbearinglab, *rbearinglab, *kernlab;
int16 xstart;
int16 width, height, dwidth;
int16 vwidth, vheight;
int16 mbh,sbh;
int16 topend; /* y value of the end of the region containing the text field */
int16 displayend; /* y value of the end of the region showing filled characters */
int16 fh, as;
int16 cmax, clen;
SplineChar **chars; /* Character input stream */
struct opentype_str *glyphs;/* after going through the various gsub/gpos transformations */
struct metricchar *perchar; /* One for each glyph above */
SplineChar **sstr; /* Character input stream */
int16 mwidth, mbase;
int16 glyphcnt, max;
int16 pressed_x, pressed_y;
int16 activeoff;
int xoff, coff, yoff;
struct metricsview *next;
unsigned int right_to_left: 1;
unsigned int pressed: 1;
unsigned int pressedwidth: 1;
unsigned int pressedkern: 1;
unsigned int showgrid: 2;
unsigned int antialias: 1;
unsigned int vertical: 1;
unsigned int type: 2; /* enum mv_type */
unsigned int usehinting: 1; /* should the hints be used during the render */
unsigned int pixelsize_set_by_window;
int xp, yp, ap_owner;
BasePoint ap_start;
int cursor;
int scale_index;
struct lookup_subtable *cur_subtable;
GTextInfo *scriptlangs;
int word_index;
int layer;
int fake_unicode_base;
GIC *gwgic;
int ptsize, dpi;
int ybaseline;
int oldscript, oldlang;
} MetricsView;
enum fv_metrics { fvm_baseline=1, fvm_origin=2, fvm_advanceat=4, fvm_advanceto=8 };
typedef struct fontview {
FontViewBase b;
BDFFont *show, *filled;
GWindow gw, v;
GFont **fontset;
GGadget *vsb, *mb;
GTimer *pressed;
GTimer *resize;
GEvent resize_event;
GIC *gic;
GIC *gwgic;
int width, height; /* of v */
int16 infoh,mbh;
int16 lab_height, lab_as;
int16 colcnt, rowcnt; /* of display window */
int32 rowoff, rowltot; /* Can be really big in full unicode */
int16 cbw,cbh; /* width/height of a character box */
int pressed_pos, end_pos;
unsigned int antialias:1;
unsigned int bbsized:1; /* displayed bitmap should be scaled by bounding box rather than emsize */
unsigned int wasonlybitmaps:1;
/*unsigned int refstate: 3;*/ /* 0x1 => paste orig of all non exist refs, 0x2=>don't, 0x3 => don't warn about non-exist refs with no source font */
unsigned int touched: 1;
unsigned int showhmetrics: 4;
unsigned int showvmetrics: 4;
unsigned int drag_and_drop: 1;
unsigned int has_dd_no_cursor: 1;
unsigned int any_dd_events_sent: 1;
unsigned int resize_expected: 1;
/* Some window managers do not honour my resize requests (if window is*/
/* maximized for example), but we depend on the resize request to */
/* fix up the window. We do get a configure notify, but the window */
/* stays the same size, so kludge things */
unsigned int glyphlabel: 2;
unsigned int notactive:1; /* When embedded in a dlg */
int16 magnify;
int16 user_requested_magnify;
struct searchview *sv;
SplineChar *sc_near_top;
int sel_index;
struct lookup_subtable *cur_subtable;
struct qg_data *qg;
GPid pid_webfontserver;
bool script_unsaved; // Whether or not there's an unsaved script in script dialog
} FontView;
typedef struct findsel {
GEvent *e;
real fudge; /* One pixel fudge factor */
real xl,xh, yl, yh; /* One pixel fudge factor */
real c_xl,c_xh, c_yl, c_yh; /* fudge rectangle for control points, larger than above if alt is depressed */
unsigned int select_controls: 1; /* notice control points */
unsigned int seek_controls: 1; /* notice control points before base points */
unsigned int all_controls: 1; /* notice control points even if the base points aren't selected (in truetype point numbering mode where all cps are visible) */
unsigned int alwaysshowcontrolpoints:1; /* if the BCP are forced on, then we want the selection code paths
* to also know that so the user can drag the BCP of a non selected splinepoint */
real scale;
PressedOn *p;
} FindSel;
typedef struct searchview {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar *chars[2];
EncMap dummy_map;
int32 map[2], backmap[2];
uint8 sel[2];
CharView cv_srch, cv_rpl;
CharView *lastcv;
/* ****** */
GWindow gw;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int rpl_x, cv_y;
int cv_width, cv_height;
short button_height, button_width;
/* ****** */
SearchData sd;
unsigned int showsfindnext: 1;
unsigned int findenabled: 1;
unsigned int rplallenabled: 1;
unsigned int rplenabled: 1;
unsigned int isvisible: 1;
} SearchView;
typedef struct mathkernview {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_topright, sc_topleft, sc_bottomright, sc_bottomleft;
SplineChar *chars[4];
EncMap dummy_map;
int32 map[4], backmap[4];
uint8 sel[4];
CharView cv_topright, cv_topleft, cv_bottomright, cv_bottomleft;
CharView *lastcv;
/* ****** */
GWindow gw;
GWindow cvparent_w;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int mid_space, cv_y;
int cv_width, cv_height;
short button_height, button_width;
/* ****** */
SplineChar *cursc;
int def_layer;
struct mathkern *orig_mathkern;
uint8 saved_mathkern; /* Can't just check if orig is non-NULL, because NULL is a perfectly valid initial state */
uint8 last_aspect;
uint8 done;
} MathKernDlg;
# ifdef FONTFORGE_CONFIG_TILEPATH
typedef struct tilepathdlg {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_first, sc_medial, sc_final, sc_isolated;
SplineChar *chars[4];
EncMap dummy_map;
int32 map[4], backmap[4];
uint8 sel[4];
CharView cv_first, cv_medial, cv_final, cv_isolated;
CharView *lastcv;
/* ****** */
GWindow gw;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int mid_space, cv_y;
int cv_width, cv_height;
/* ****** */
struct tiledata *td;
SplineFont *base_sf;
uint8 done, oked;
} TilePathDlg;
extern void TPDCharViewInits(TilePathDlg *tpd, int cid);
extern void PTDCharViewInits(TilePathDlg *tpd, int cid);
#endif /* Tile Path */
typedef struct gradientdlg {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_grad;
SplineChar *chars[1];
EncMap dummy_map;
int32 map[1], backmap[1];
uint8 sel[1];
CharView cv_grad;
/* ****** */
GWindow gw;
GGadget *mb;
GFont *plain, *bold;
int mbh;
int fh, as;
int mid_space, cv_y;
int cv_width, cv_height;
/* ****** */
uint8 done, oked;
struct gradient *active;
} GradientDlg;
extern void GDDCharViewInits(GradientDlg *gdd,int cid);
typedef struct strokedlg {
struct cvcontainer base;
FontView dummy_fv;
SplineFont dummy_sf;
LayerInfo layerinfo[2];
SplineChar sc_stroke;
SplineChar *chars[1];
EncMap dummy_map;
int32 map[1], backmap[1];
uint8 sel[1];
CharView cv_stroke;
int cv_width, cv_height;
GGadget *mb;
int mbh;
SplineSet *old_poly;
/* ****** */
int done;
GWindow gw;
CharView *cv;
FontView *fv;
SplineFont *sf;
void (*strokeit)(void *,StrokeInfo *,int);
StrokeInfo *si;
GRect r1, r2;
int up[2];
int dontexpand;
} StrokeDlg;
extern void StrokeCharViewInits(StrokeDlg *sd,int cid);
struct lksubinfo {
struct lookup_subtable *subtable;
unsigned int deleted: 1;
unsigned int new: 1;
unsigned int selected: 1;
unsigned int moved: 1;
};
struct lkinfo {
OTLookup *lookup;
unsigned int open: 1;
unsigned int deleted: 1;
unsigned int new: 1;
unsigned int selected: 1;
unsigned int moved: 1;
int16 subtable_cnt, subtable_max;
struct lksubinfo *subtables;
};
struct lkdata {
int cnt, max;
int off_top, off_left;
struct lkinfo *all;
};
struct anchor_shows {
CharView *cv;
SplineChar *sc;
int restart;
};
struct gfi_data { /* FontInfo */
SplineFont *sf;
int def_layer;
GWindow gw;
int tn_active;
int private_aspect, ttfv_aspect, tn_aspect, tx_aspect, unicode_aspect;
int old_sel, old_aspect, old_lang, old_strid;
int ttf_set, names_set, tex_set;
int langlocalecode; /* MS code for the current locale */
unsigned int family_untitled: 1;
unsigned int human_untitled: 1;
unsigned int done: 1;
unsigned int mpdone: 1;
unsigned int lk_drag_and_drop: 1;
unsigned int lk_dropablecursor: 1;
struct anchor_shows anchor_shows[2];
struct texdata texdata;
GFont *font;
int as, fh;
struct lkdata tables[2];
int lkwidth, lkheight;
int first_sel_lookup, first_sel_subtable;
int last_panose_family;
};
struct kf_dlg /* : fvcontainer */ {
struct fvcontainer base;
struct lookup_subtable *sub;
GWindow gw, dw;
GFont *plain, *bold;
int fh, as;
GGadget *mb, *guts, *topbox;
int mbh, label2_y, infoh;
SplineFont *sf;
int def_layer;
struct kf_results *results;
int done;
FontView *active;
FontView *first_fv;
FontView *second_fv;
};
enum genfam { gf_none, gf_macfamily, gf_ttc };
extern void FVMarkHintsOutOfDate(SplineChar *sc);
extern void FVRefreshChar(FontView *fv,int gid);
extern void _FVMenuOpen(FontView *fv);
extern int _FVMenuSave(FontView *fv);
extern int _FVMenuSaveAs(FontView *fv);
extern int _FVMenuGenerate(FontView *fv,int family);
extern void _FVCloseWindows(FontView *fv);
extern char *GetPostScriptFontName(char *defdir,int mult);
extern void MergeKernInfo(SplineFont *sf,EncMap *map);
extern int SFGenerateFont(SplineFont *sf,int layer, int family,EncMap *map);
extern void NonLinearDlg(FontView *fv,struct charview *cv);
extern void FVChangeChar(FontView *fv,int encoding);
extern void FVMergeFonts(FontView *fv);
extern void FVInterpolateFonts(FontView *fv);
extern void FVDeselectAll(FontView *fv);
extern void FVAutoWidth2(FontView *fv);
/*extern void FVAutoKern(FontView *fv);*/
/*extern void FVAutoWidth(FontView *fv);*/
extern void SC_MarkInstrDlgAsChanged(SplineChar *sc);
extern void PythonUI_Init(void);
extern void SCStroke(SplineChar *sc);
extern void PfaEditSetFallback(void);
extern void RecentFilesRemember(char *filename);
extern void LastFonts_Save(void);
struct debugger_context;
extern void DebuggerTerminate(struct debugger_context *dc);
extern void DebuggerReset(struct debugger_context *dc,real pointsizey, real pointsizex,int dpi,int dbg_fpgm, int is_bitmap);
extern struct debugger_context *DebuggerCreate(SplineChar *sc,int layer,real pointsizey,real pointsizex,int dpi,int dbg_fpgm, int is_bitmap);
enum debug_gotype { dgt_continue, dgt_step, dgt_next, dgt_stepout };
extern void DebuggerGo(struct debugger_context *dc,enum debug_gotype,DebugView *dv);
extern struct TT_ExecContextRec_ *DebuggerGetEContext(struct debugger_context *dc);
extern void DebuggerToggleBp(struct debugger_context *dc,int range,int ip);
extern int DebuggerBpCheck(struct debugger_context *dc,int range,int ip);
extern void DebuggerSetWatches(struct debugger_context *dc,int n, uint8 *w);
extern uint8 *DebuggerGetWatches(struct debugger_context *dc, int *n);
extern void DebuggerSetWatchStores(struct debugger_context *dc,int n, uint8 *w);
extern uint8 *DebuggerGetWatchStores(struct debugger_context *dc, int *n);
extern int DebuggerIsStorageSet(struct debugger_context *dc, int index);
extern void DebuggerSetWatchCvts(struct debugger_context *dc,int n, uint8 *w);
extern uint8 *DebuggerGetWatchCvts(struct debugger_context *dc, int *n);
extern int DebuggingFpgm(struct debugger_context *dc);
extern void PrintFFDlg(FontView *fv,SplineChar *sc,MetricsView *mv);
extern void PrintWindowClose(void);
extern void InsertTextDlg(CharView *cv);
extern char *Kern2Text(SplineChar *other,KernPair *kp,int isv);
extern char *PST2Text(PST *pst,SplineFont *sf);
void EmboldenDlg(FontView *fv, CharView *cv);
void CondenseExtendDlg(FontView *fv, CharView *cv);
void ObliqueDlg(FontView *fv, CharView *cv);
void GlyphChangeDlg(FontView *fv, CharView *cv, enum glyphchange_type gc);
void ItalicDlg(FontView *fv, CharView *cv);
void ChangeXHeightDlg(FontView *fv,CharView *cv);
extern int FVParseSelectByPST(FontView *fv,struct lookup_subtable *sub,
int search_type);
extern void DropChars2Text(GWindow gw, GGadget *glyphs,GEvent *event);
extern void FVReplaceOutlineWithReference( FontView *fv, double fudge );
extern void SVDestroy(struct searchview *sv);
extern int SLICount(SplineFont *sf);
extern unichar_t *ClassName(const char *name,uint32 feature_tag,
uint16 flags, int script_lang_index, int merge_with, int act_type,
int macfeature,SplineFont *sf);
extern unichar_t *DecomposeClassName(const unichar_t *clsnm, unichar_t **name,
uint32 *feature_tag, int *macfeature,
uint16 *flags, uint16 *script_lang_index,int *merge_with,int *act_type,
SplineFont *sf);
extern PST *AddSubs(PST *last,uint32 tag,char *name,uint16 flags,
uint16 sli,SplineChar *sc);
extern void FVSetUIToMatch(FontView *destfv,FontView *srcfv);
extern void FVScrollToChar(FontView *fv,int i);
extern void FVRegenChar(FontView *fv,SplineChar *sc);
extern FontView *FontNew(void);
extern void _MenuWarnings(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void MenuPrefs(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuXRes(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuSaveAll(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuExit(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuHelp(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuIndex(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuAbout(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuLicense(GWindow base,struct gmenuitem *mi,GEvent *e);
extern void MenuNew(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void WindowMenuBuild(GWindow base,struct gmenuitem *mi,GEvent *);
extern void MenuRecentBuild(GWindow base,struct gmenuitem *mi,GEvent *);
extern void MenuScriptsBuild(GWindow base,struct gmenuitem *mi,GEvent *);
extern void mb2FreeGetText(GMenuItem2 *mb);
extern void mb2DoGetText(GMenuItem2 *mb);
extern void mbFreeGetText(GMenuItem *mb);
extern void mbDoGetText(GMenuItem *mb);
extern int RecentFilesAny(void);
extern void _aplistbuild(struct gmenuitem *mi,SplineFont *sf,
void (*func)(GWindow,struct gmenuitem *,GEvent *));
extern int32 *ParseBitmapSizes(GGadget *g,char *msg,int *err);
extern GTextInfo *AddMacFeatures(GTextInfo *opentype,enum possub_type type,SplineFont *sf);
extern unichar_t *AskNameTag(char *title,unichar_t *def,uint32 def_tag,uint16 flags,
int script_lang_index, enum possub_type type, SplineFont *sf, SplineChar *default_script,
int merge_with,int act_type);
extern unichar_t *ShowScripts(unichar_t *usedef);
extern GTextInfo *SFLangList(SplineFont *sf,int addfinal,SplineChar *default_script);
extern GTextInfo **SFLangArray(SplineFont *sf,int addfinal);
extern int ScriptLangList(SplineFont *sf,GGadget *list,int sli);
extern void GListDelSelected(GGadget *list);
extern void GListMoveSelected(GGadget *list,int offset);
extern GTextInfo *GListChangeLine(GGadget *list,int pos, const unichar_t *line);
extern GTextInfo *GListAppendLine(GGadget *list,const unichar_t *line,int select);
extern GTextInfo *GListChangeLine8(GGadget *list,int pos, const char *line);
extern GTextInfo *GListAppendLine8(GGadget *list,const char *line,int select);
extern void CharInfoInit(void);
extern void SCLigCaretCheck(SplineChar *sc,int clean);
extern char *DevTab_Dlg(GGadget *g, int r, int c);
extern int DeviceTableOK(char *dvstr, int *_low, int *_high);
extern void VRDevTabParse(struct vr *vr,struct matrix_data *md);
extern DeviceTable *DeviceTableParse(DeviceTable *dv,char *dvstr);
extern void DevTabToString(char **str,DeviceTable *adjust);
extern void ValDevTabToStrings(struct matrix_data *mds,int first_offset,ValDevTab *adjust);
extern void KpMDParse(SplineChar *sc,struct lookup_subtable *sub,
struct matrix_data *possub,int rows,int cols,int i);
extern void GFI_LookupEnableButtons(struct gfi_data *gfi, int isgpos);
extern void GFI_LookupScrollbars(struct gfi_data *gfi, int isgpos, int refresh);
extern void FontInfo(SplineFont *sf,int layer,int aspect,int sync);
extern void FontInfoDestroy(SplineFont *sf);
extern void FontMenuFontInfo(void *fv);
extern struct enc *MakeEncoding(SplineFont *sf, EncMap *map);
extern void LoadEncodingFile(void);
extern void RemoveEncoding(void);
extern void SFPrivateInfo(SplineFont *sf);
extern void FVDelay(FontView *fv,void (*func)(FontView *));
extern void GFI_FinishContextNew(struct gfi_data *d,FPST *fpst, int success);
extern void SCPreparePopup(GWindow gw,SplineChar *sc, struct remap *remap, int enc, int actualuni);
enum outlinesfm_flags {
sfm_stroke=0x1,
sfm_fill=0x2,
sfm_nothing=0x4,
sfm_stroke_trans = 0x8,
sfm_clip = 0x16
};
extern void CVDrawSplineSetSpecialized( CharView *cv, GWindow pixmap, SplinePointList *set,
Color fg, int dopoints, DRect *clip,
enum outlinesfm_flags strokeFillMode,
Color AlphaChannelOverride );
extern void CVDrawSplineSet(CharView *cv, GWindow pixmap, SplinePointList *set,
Color fg, int dopoints, DRect *clip );
extern void CVDrawSplineSetOutlineOnly(CharView *cv, GWindow pixmap, SplinePointList *set,
Color fg, int dopoints, DRect *clip, enum outlinesfm_flags strokeFillMode );
extern GWindow CVMakeTools(CharView *cv);
extern GWindow CVMakeLayers(CharView *cv);
extern GWindow BVMakeTools(BitmapView *bv);
extern GWindow BVMakeLayers(BitmapView *bv);
extern void CVSetLayer(CharView *cv,int layer);
extern int CVPaletteMnemonicCheck(GEvent *event);
extern int TrueCharState(GEvent *event);
extern void CVToolsPopup(CharView *cv, GEvent *event);
extern void BVToolsPopup(BitmapView *bv, GEvent *event);
extern real CVRoundRectRadius(void);
extern int CVRectElipseCenter(void);
extern void CVRectEllipsePosDlg(CharView *cv);
extern real CVStarRatio(void);
extern int CVPolyStarPoints(void);
extern StrokeInfo *CVFreeHandInfo(void);
extern void BVToolsSetCursor(BitmapView *bv, int state,char *device);
extern void CVToolsSetCursor(CharView *cv, int state,char *device);
extern int CVPaletteIsVisible(CharView *cv,int which);
extern void CVPaletteSetVisible(CharView *cv,int which,int visible);
extern void CVPalettesRaise(CharView *cv);
extern void CVLayersSet(CharView *cv);
extern void _CVPaletteActivate(CharView *cv,int force,int docking_changed);
extern void CVPaletteActivate(CharView *cv);
extern void CV_LayerPaletteCheck(SplineFont *sf);
extern void CVPalettesHideIfMine(CharView *cv);
extern int BVPaletteIsVisible(BitmapView *bv,int which);
extern void BVPaletteSetVisible(BitmapView *bv,int which,int visible);
extern void BVPaletteActivate(BitmapView *bv);
extern void BVPalettesHideIfMine(BitmapView *bv);
extern void BVPaletteColorChange(BitmapView *bv);
extern void BVPaletteColorUnderChange(BitmapView *bv,int color);
extern void BVPaletteChangedChar(BitmapView *bv);
extern void CVPaletteDeactivate(void);
extern void PalettesChangeDocking(void);
extern int CVPalettesWidth(void);
extern int BVPalettesWidth(void);
extern int CVInSpiro( CharView *cv );
extern void CVDoTransform(CharView *cv, enum cvtools cvt );
// apply transform to specified layer
extern void CVTransFuncLayer(CharView *cv,Layer *ly,real transform[6], enum fvtrans_flags flags);
// apply transform to the current layer only
extern void CVTransFunc(CharView *cv,real transform[6],enum fvtrans_flags);
// apply transform to all layers
extern void CVTransFuncAllLayers(CharView *cv,real transform[6], enum fvtrans_flags flags);
enum transdlg_flags { tdf_enableback=0x1, tdf_enablekerns=0x2,
tdf_defaultkerns=0x4, tdf_addapply=0x8 };
extern void TransformDlgCreate(void *data,void (*transfunc)(void *,real *,int,BVTFunc *,enum fvtrans_flags),
int (*getorigin)(void *,BasePoint *,int), enum transdlg_flags flags,
enum cvtools cvt);
extern void BitmapDlg(FontView *fv,SplineChar *sc, int isavail);
extern int SimplifyDlg(SplineFont *sf,struct simplifyinfo *smpl);
extern void CVReviewHints(CharView *cv);
extern void CVCreateHint(CharView *cv,int ishstem,int preserveundoes);
extern void SCRemoveSelectedMinimumDistances(SplineChar *sc,int inx);
extern int CVExport(CharView *cv);
extern int BVExport(BitmapView *bv);
extern void DrawAnchorPoint(GWindow pixmap,int x, int y,int selected);
extern void DefaultY(GRect *pos);
extern void CVDrawRubberRect(GWindow pixmap, CharView *cv);
extern void CVInfoDraw(CharView *cv, GWindow pixmap );
extern void CVChar(CharView *cv, GEvent *event );
extern void PI_ShowHints(SplineChar *sc, GGadget *list, int set);
extern GTextInfo *SCHintList(SplineChar *sc,HintMask *);
extern void CVResize(CharView *cv );
extern CharView *CharViewCreate(SplineChar *sc,FontView *fv,int enc);
extern void CharViewFinishNonStatic();
/**
* Extended version of CharViewCreate() which allows a window to be created but
* not displayed.
*/
extern CharView *CharViewCreateExtended(SplineChar *sc, FontView *fv,int enc, int show );
extern CharViewTab *CVGetActiveTab(CharView *cv);
extern void CharViewFree(CharView *cv);
extern int CVValid(SplineFont *sf, SplineChar *sc, CharView *cv);
extern void CVSetCharChanged(CharView *cv,int changed);
extern int CVAnySel(CharView *cv, int *anyp, int *anyr, int *anyi, int *anya);
extern int CVAnySelPoints(CharView *cv);
/**
* Get all the selected points in the current cv.
* Caller must g_list_free() the returned value.
*/
extern GList_Glib* CVGetSelectedPoints(CharView *cv);
extern void CVSelectPointAt(CharView *cv);
extern int CVClearSel(CharView *cv);
extern int CVSetSel(CharView *cv,int mask);
extern void CVInvertSel(CharView *cv);
extern int CVAllSelected(CharView *cv);
extern SplinePointList *CVAnySelPointList(CharView *cv);
extern int CVAnySelPoint(CharView *cv, SplinePoint **selsp, spiro_cp **selcp);
extern int CVOneThingSel(CharView *cv, SplinePoint **sp, SplinePointList **spl,
RefChar **ref, ImageList **img, AnchorPoint **ap, spiro_cp **cp);
extern int CVOneContourSel(CharView *cv, SplinePointList **_spl,
RefChar **ref, ImageList **img);
extern void CVInfoDrawText(CharView *cv, GWindow pixmap );
extern void CVImport(CharView *cv);
extern void BVImport(BitmapView *bv);
extern void FVImport(FontView *bv);
extern void CVFindCenter(CharView *cv, BasePoint *bp, int nosel);
extern void CVStroke(CharView *cv);
extern void FVStroke(FontView *fv);
extern void FreeHandStrokeDlg(StrokeInfo *si);
extern void OutlineDlg(FontView *fv, CharView *cv,MetricsView *mv,int isinline);
extern void ShadowDlg(FontView *fv, CharView *cv,MetricsView *mv,int wireframe);
extern void CVTile(CharView *cv);
extern void FVTile(FontView *fv);
extern void CVPatternTile(CharView *cv);
extern void FVPatternTile(FontView *fv);
extern void SCCharInfo(SplineChar *sc,int deflayer,EncMap *map,int enc);
extern void CharInfoDestroy(struct charinfo *ci);
extern SplineChar *SuffixCheck(SplineChar *sc,char *suffix);
extern void SCSubtableDefaultSubsCheck(SplineChar *sc, struct lookup_subtable *sub, struct matrix_data *possub, int col_cnt, int r,int layer);
extern GImage *PST_GetImage(GGadget *pstk,SplineFont *sf,int def_layer,
struct lookup_subtable *sub,int popup_r, SplineChar *sc );
extern GImage *NameList_GetImage(SplineFont *sf,SplineChar *sc,int def_layer,
char *namelist, int isliga );
extern GImage *GV_GetConstructedImage(SplineChar *sc,int def_layer, struct glyphvariants *gv,
int is_horiz);
extern GImage *SC_GetLinedImage(SplineChar *sc, int def_layer, int pos, int is_italic_cor);
extern struct glyphvariants *GV_ParseConstruction(struct glyphvariants *gv,
struct matrix_data *stuff, int rows, int cols);
extern void GV_ToMD(GGadget *g, struct glyphvariants *gv);
extern void CVGetInfo(CharView *cv);
extern void CVPGetInfo(CharView *cv);
extern int SCUsedBySubs(SplineChar *sc);
extern void SCSubBy(SplineChar *sc);
extern void SCRefBy(SplineChar *sc);
extern void ApGetInfo(CharView *cv, AnchorPoint *ap);
extern void CVMakeClipPath(CharView *cv);
extern void CVAddAnchor(CharView *cv);
extern AnchorClass *AnchorClassUnused(SplineChar *sc,int *waslig);
extern void FVSetWidth(FontView *fv,enum widthtype wtype);
extern void CVSetWidth(CharView *cv,enum widthtype wtype);
extern void GenericVSetWidth(FontView *fv,SplineChar* sc,enum widthtype wtype);
extern void CVChangeSC(CharView *cv, SplineChar *sc );
extern Undoes *CVPreserveTState(CharView *cv);
/**
* If isTState > 0 then CVPreserveTState(cv)
* otherwise CVPreserveState(cv)
*/
extern Undoes *CVPreserveMaybeState(CharView *cv, int isTState );
extern void CVRestoreTOriginalState(CharView *cv);
extern void CVUndoCleanup(CharView *cv);
extern void AdjustControls(SplinePoint *sp);
extern void CVAdjustPoint(CharView *cv, SplinePoint *sp);
extern void CVMergeSplineSets(CharView *cv, SplinePoint *active, SplineSet *activess,
SplinePoint *merge, SplineSet *mergess);
extern void CVAdjustControl(CharView *cv,BasePoint *cp, BasePoint *to);
extern int CVMoveSelection(CharView *cv, real dx, real dy, uint32 input_state);
extern int CVTestSelectFromEvent(CharView *cv,GEvent *event);
extern void CVMouseMovePen(CharView *cv, PressedOn *p, GEvent *event);
extern void CVMouseUpPoint(CharView *cv,GEvent *event);
extern int CVMouseMovePointer(CharView *cv, GEvent *event);
extern void CVMouseDownPointer(CharView *cv, FindSel *fs, GEvent *event);
extern void CVMouseDownRuler(CharView *cv, GEvent *event);
extern void CVMouseMoveRuler(CharView *cv, GEvent *event);
extern int CVMouseAtSpline(CharView *cv,GEvent *event);
extern void CVMouseUpRuler(CharView *cv, GEvent *event);
extern void CVMouseMoveHand(CharView *cv, GEvent *event);
extern void CVMouseDownFreeHand(CharView *cv, GEvent *event);
extern void CVMouseMoveFreeHand(CharView *cv, GEvent *event);
extern void CVMouseUpFreeHand(CharView *cv, GEvent *event);
extern void CVMouseDownShape(CharView *cv,GEvent *event);
extern void CPStartInfo(CharView *cv, GEvent *event);
extern void CPUpdateInfo(CharView *cv, GEvent *event);
extern void CPEndInfo(CharView *cv);
extern void BVChar(BitmapView *cv, GEvent *event );
extern void CVMouseDownPoint(CharView *cv,GEvent *event);
extern void CVMouseMovePoint(CharView *cv,PressedOn *);
extern void CVMouseUpPointer(CharView *cv );
extern void CVCheckResizeCursors(CharView *cv);
extern void CVMouseDownHand(CharView *cv);
extern void CVMouseUpHand(CharView *cv);
extern void CVMouseDownTransform(CharView *cv);
extern void CVMouseMoveTransform(CharView *cv);
extern void CVMouseUpTransform(CharView *cv);
extern void CVMouseDownKnife(CharView *cv);
extern void CVMouseMoveKnife(CharView *cv,PressedOn *);
extern void CVMouseUpKnife(CharView *cv,GEvent *event);
extern void CVMouseMoveShape(CharView *cv);
extern void CVMouseUpShape(CharView *cv);
extern void LogoExpose(GWindow pixmap,GEvent *event, GRect *r,enum drawmode dm);
extern void CVDebugPointPopup(CharView *cv);
extern int GotoChar(SplineFont *sf,EncMap *map, int *merge_with_selection);
extern void CVShowPoint(CharView *cv, BasePoint *me);
extern void BitmapViewFinishNonStatic();
extern BitmapView *BitmapViewCreate(BDFChar *bc, BDFFont *bdf, FontView *fv,int enc);
extern BitmapView *BitmapViewCreatePick(int enc, FontView *fv);
extern void BitmapViewFree(BitmapView *bv);
extern void BVMenuRotateInvoked(GWindow gw,struct gmenuitem *mi, GEvent *e);
extern void BVRotateBitmap(BitmapView *bv,enum bvtools type );
extern int BVColor(BitmapView *bv);
extern void BCGeneralFunction(BitmapView *bv,
void (*SetPoint)(BitmapView *,int x, int y, void *data),void *data);
extern char *BVFlipNames[];
extern void BVChangeBC(BitmapView *bv, BDFChar *bc, int fitit );
extern void MVSetSCs(MetricsView *mv, SplineChar **scs);
extern void MVRefreshChar(MetricsView *mv, SplineChar *sc);
extern void MVRegenChar(MetricsView *mv, SplineChar *sc);
extern void MVReKern(MetricsView *mv);
extern void MetricsViewFinishNonStatic();
extern MetricsView *MetricsViewCreate(FontView *fv,SplineChar *sc,BDFFont *bdf);
extern void MetricsViewFree(MetricsView *mv);
extern void MVRefreshAll(MetricsView *mv);
extern void MV_FriendlyFeatures(GGadget *g, int pos);
extern GTextInfo *SLOfFont(SplineFont *sf);
extern void DoPrefs(void);
extern void DoXRes(void);
extern void PointerDlg(CharView *cv);
extern void GListAddStr(GGadget *list,unichar_t *str, void *ud);
extern void GListReplaceStr(GGadget *list,int index, unichar_t *str, void *ud);
extern struct macname *NameGadgetsGetNames( GWindow gw );
extern void NameGadgetsSetEnabled( GWindow gw, int enable );
extern int GCDBuildNames(GGadgetCreateData *gcd,GTextInfo *label,int pos,struct macname *names);
extern void GCDFillMacFeat(GGadgetCreateData *mfgcd,GTextInfo *mflabels, int width,
MacFeat *all, int fromprefs, GGadgetCreateData *boxes,
GGadgetCreateData **array);
extern void Prefs_ReplaceMacFeatures(GGadget *list);
extern unichar_t *FVOpenFont(char *title, const char *defaultfile, int mult);
extern void ShowAboutScreen(void);
extern void DelayEvent(void (*func)(void *), void *data);
extern void FindProblems(FontView *fv,CharView *cv,SplineChar *sc);
typedef enum
{
constrainSelection_AveragePoints = 0,
constrainSelection_SpacePoints = 1,
constrainSelection_SpaceSelectedRegions = 2
} constrainSelection_t;
extern void CVConstrainSelection(CharView *cv, constrainSelection_t type);
extern void CVMakeParallel(CharView *cv);
extern void ScriptDlg(FontView *fv,CharView *cv);
# if HANYANG
extern void MenuNewComposition(GWindow gw, struct gmenuitem *, GEvent *);
extern void CVDisplayCompositions(GWindow gw, struct gmenuitem *, GEvent *);
extern void Disp_DoFinish(struct jamodisplay *d, int cancel);
extern void Disp_RefreshChar(SplineFont *sf,SplineChar *sc);
extern void Disp_DefaultTemplate(CharView *cv);
# endif
extern SearchView *SVCreate(FontView *fv);
extern void SVCharViewInits(SearchView *sv);
extern void SV_DoClose(struct cvcontainer *cvc);
extern void SVMakeActive(SearchView *sv,CharView *cv);
extern int SVAttachFV(FontView *fv,int ask_if_difficult);
extern void SVDetachFV(FontView *fv);
extern void MKDMakeActive(MathKernDlg *mkd,CharView *cv);
extern void MKD_DoClose(struct cvcontainer *cvc);
extern void MKDCharViewInits(MathKernDlg *mkd);
extern void MathKernDialog(SplineChar *sc,int def_layer);
extern void ShowAtt(SplineFont *sf,int def_layer);
extern void FontCompareDlg(FontView *fv);
extern void SFShowKernPairs(SplineFont *sf,SplineChar *sc,AnchorClass *ac,int layer);
extern void SFShowLigatures(SplineFont *sf,SplineChar *sc);
extern void SCEditInstructions(SplineChar *sc);
extern void SFEditTable(SplineFont *sf, uint32 tag);
extern void IIScrollTo(struct instrinfo *ii,int ip,int mark_stop);
extern void IIReinit(struct instrinfo *ii,int ip);
extern int ii_v_e_h(GWindow gw, GEvent *event);
extern void instr_scroll(struct instrinfo *ii,struct sbevent *sb);
extern void CVGridFitChar(CharView *cv);
/**
* If a live preview of grid fit is somehow in effect, call CVGridFitChar() for us.
* A caller can call here after a change and any CVGridFitChar() will be updated if need be.
*/
extern void CVGridHandlePossibleFitChar(CharView *cv);
extern void CVFtPpemDlg(CharView *cv,int debug);
extern void SCDeGridFit(SplineChar *sc);
extern void SCReGridFit(SplineChar *sc,int layer);
extern void CVDebugReInit(CharView *cv,int restart_debug,int dbg_fpgm);
extern void CVDebugFree(DebugView *dv);
extern int DVChar(DebugView *dv, GEvent *e);
extern void KernClassD(KernClass *kc, SplineFont *sf, int layer, int isv);
extern void ShowKernClasses(SplineFont *sf,MetricsView *mv,int layer,int isv);
extern void KCLD_End(struct kernclasslistdlg *kcld);
extern void KCLD_MvDetach(struct kernclasslistdlg *kcld,MetricsView *mv);
extern void KernPairD(SplineFont *sf,SplineChar *sc1,SplineChar *sc2,int layer, int isv);
extern void KCD_DrawGlyph(GWindow pixmap,int x,int baseline,BDFChar *bdfc,int mag);
extern GTextInfo *BuildFontList(FontView *except);
extern void TFFree(GTextInfo *tf);
extern void AnchorControl(SplineChar *sc,AnchorPoint *ap,int layer);
extern void AnchorControlClass(SplineFont *_sf,AnchorClass *ac,int layer);
extern void FVSelectByPST(FontView *fv);
enum hist_type { hist_hstem, hist_vstem, hist_blues };
struct psdict;
extern void SFHistogram(SplineFont *sf,int layer, struct psdict *private,uint8 *selected,
EncMap *map, enum hist_type which);
extern void ContextChainEdit(SplineFont *sf,FPST *fpst,
struct gfi_data *gfi,unichar_t *newname,int layer);
extern char *cu_copybetween(const unichar_t *start, const unichar_t *end);
extern void StateMachineEdit(SplineFont *sf,ASM *sm,struct gfi_data *d);
extern void GFI_FinishSMNew(struct gfi_data *d,ASM *sm, int success,int isnew);
extern void MMChangeBlend(MMSet *mm,FontView *fv,int tonew);
extern void MMWizard(MMSet *mm);
extern int LayerDialog(Layer *layer,SplineFont *sf);
extern void CVLayerChange(CharView *cv);
extern int PointOfViewDlg(struct pov_data *pov,SplineFont *sf,int flags);
extern SplineChar *FVMakeChar(FontView *fv,int i);
extern void CVPointOfView(CharView *cv,struct pov_data *);
extern void DVCreateGloss(DebugView *dv);
extern void DVMarkPts(DebugView *dv,SplineSet *ss);
extern int CVXPos(DebugView *dv,int offset,int width);
extern GMenuItem *GetEncodingMenu(void (*func)(GWindow,GMenuItem *,GEvent *),
Encoding *current);
extern GTextInfo *TIFromName(const char *name);
enum subtable_data_flags {
/* I have flags for each alternative because I want "unspecified" to be */
/* an option */
sdf_kernclass = 0x01,
sdf_kernpair = 0x02,
sdf_verticalkern = 0x04,
sdf_horizontalkern = 0x08,
sdf_dontedit = 0x10
};
struct subtable_data {
int flags;
SplineChar *sc;
};
extern GTextInfo **SFLookupListFromType(SplineFont *sf, int lookup_type );
extern GTextInfo *SFLookupArrayFromType(SplineFont *sf, int lookup_type );
extern GTextInfo *SFLookupArrayFromMask(SplineFont *sf, int lookup_mask );
extern GTextInfo **SFSubtablesOfType(SplineFont *sf, int lookup_type, int kernclass, int add_none);
extern GTextInfo *SFSubtableListOfType(SplineFont *sf, int lookup_type, int kernclass, int add_none);
extern struct lookup_subtable *SFNewLookupSubtableOfType(SplineFont *sf, int lookup_type, struct subtable_data *sd, int def_layer );
extern int EditLookup(OTLookup *otl,int isgpos,SplineFont *sf);
extern int EditSubtable(struct lookup_subtable *sub,int isgpos,SplineFont *sf,
struct subtable_data *sd,int def_layer);
extern void _LookupSubtableContents(SplineFont *sf, struct lookup_subtable *sub,
struct subtable_data *sd,int def_layer);
extern char *SCNameUniStr(SplineChar *sc);
extern unichar_t *uSCNameUniStr(SplineChar *sc);
extern char *SFNameList2NameUni(SplineFont *sf, char *str);
extern unichar_t **SFGlyphNameCompletion(SplineFont *sf,GGadget *t,int from_tab,
int new_name_after_space);
extern char *GlyphNameListDeUnicode( char *str );
extern void AddRmLang(SplineFont *sf, struct lkdata *lk,int add_lang);
extern void FVMassGlyphRename(FontView *fv);
extern void SFBdfProperties(SplineFont *sf, EncMap *map, BDFFont *thisone);
extern GMenuItem2 helplist[];
extern BasePoint last_ruler_offset[];
extern void CVCopyLayerToLayer(CharView *cv);
extern void FVCopyLayerToLayer(FontView *fv);
extern void CVCompareLayerToLayer(CharView *cv);
extern void FVCompareLayerToLayer(FontView *fv);
extern void MathInit(void);
extern void SFMathDlg(SplineFont *sf,int def_layer);
extern GMenuItem2 *cvpy_menu, *fvpy_menu;
extern void cvpy_tllistcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void fvpy_tllistcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern GMenuItem2 *cv_menu, *fv_menu;
extern void cv_tl2listcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void fv_tl2listcheck(GWindow gw,struct gmenuitem *mi,GEvent *e);
extern void SFValidationWindow(SplineFont *sf,int layer, enum fontformat format);
extern void ValidationDestroy(SplineFont *sf);
extern const char *UI_TTFNameIds(int id);
extern const char *UI_MSLangString(int language);
extern void FontInfoInit(void);
extern void LookupUIInit(void);
extern enum psstrokeflags Ps_StrokeFlagsDlg(void);
extern struct cidmap *AskUserForCIDMap(void);
extern void DefineGroups(struct fontview *fv);
extern void DisplayGroups(struct fontview *fv);
extern struct Base *SFBaselines(SplineFont *sf,struct Base *old,int is_vertical);
extern void JustifyDlg(SplineFont *sf);
extern char *GlyphListDlg(SplineFont *sf, char *glyphstr);
extern void DeltaSuggestionDlg(FontView *fv,CharView *cv);
extern void QGRmFontView(struct qg_data *qg,FontView *fv);
extern void QGRmCharView(struct qg_data *qg,CharView *cv);
extern struct hslrgb *SFFontCols(SplineFont *sf,struct hslrgb fontcols[6]);
extern Color view_bgcol; /* Background color for views */
extern void MVColInit(void);
extern void CVColInit( void );
extern void FontViewRemove(FontView *fv);
extern void FontViewFinishNonStatic();
extern void FVChar(FontView *fv,GEvent *event);
extern void FVDrawInfo(FontView *fv,GWindow pixmap,GEvent *event);
extern void FVRedrawAllCharViews(FontView *fv);
extern void KFFontViewInits(struct kf_dlg *kf,GGadget *drawable);
extern char *GlyphSetFromSelection(SplineFont *sf,int def_layer,char *current);
extern void ME_ListCheck(GGadget *g,int r, int c, SplineFont *sf);
extern void ME_SetCheckUnique(GGadget *g,int r, int c, SplineFont *sf);
extern void ME_ClassCheckUnique(GGadget *g,int r, int c, SplineFont *sf);
extern void PI_Destroy(struct dlistnode *node);
struct gidata;
extern void PIChangePoint(struct gidata *ci);
extern void CVRegenFill(CharView *cv);
extern void RulerDlg(CharView *cv);
extern int CVCountSelectedPoints(CharView *cv);
extern void _CVMenuInsertPt(CharView *cv);
extern void _CVMenuNamePoint(CharView *cv, SplinePoint *sp);
extern void _CVMenuNameContour(CharView *cv);
extern void Prefs_LoadDefaultPreferences( void );
extern void SPSelectNextPoint( SplinePoint *sp, int state );
extern void SPSelectPrevPoint( SplinePoint *sp, int state );
/**
* Is the next BCP for the sp selected, and is it the primary BCP for the selection
* @see SPIsNextCPSelected
*/
extern bool SPIsNextCPSelectedSingle( SplinePoint *sp, CharView *cv );
/**
* Is the prev BCP for the sp selected, and is it the primary BCP for the selection
* @see SPIsNextCPSelected
*/
extern bool SPIsPrevCPSelectedSingle( SplinePoint *sp, CharView *cv );
/**
* Is the next BCP for the sp selected, it can be the primary or any
* of the secondary selected BCP
*
* The last selected BCP is the 'primary' selected BCP. Code which
* only handles a single selected BCP will only honor the primary
* selected BCP
*
* There can also be one or more seconday selected BCP. These might be
* drawn with slightly less highlight graphically and are only handled
* by code which has been updated to allow mutliple selected BCP to be
* operated on at once.
*/
extern bool SPIsNextCPSelected( SplinePoint *sp, CharView *cv );
/**
* Is the prev BCP for the sp selected, it can be the primary or any of the secondary selected BCP
*
* @see SPIsNextCPSelected
*/
extern bool SPIsPrevCPSelected( SplinePoint *sp, CharView *cv );
typedef struct FE_adjustBCPByDeltaDataS
{
CharView *cv; //< used to update view
real dx; //< Add this to the BCP x
real dy; //< Add this to the BCP y
int keyboarddx;
} FE_adjustBCPByDeltaData;
/**
* Visitor function type for visitSelectedControlPoints()
*/
typedef void (*visitSelectedControlPointsVisitor) ( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
/**
* Visitor function to move each BCP by data->dx/data->dy
*
*
* Visitor: visitSelectedControlPointsVisitor
* UsedBy: CVFindAndVisitSelectedControlPoints
*/
extern void FE_adjustBCPByDelta( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
extern void FE_adjustBCPByDeltaWhilePreservingBCPAngle( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
/**
* Visitor function to unselect every BCP passed
*
* Visitor: visitSelectedControlPointsVisitor
* UsedBy: CVFindAndVisitSelectedControlPoints
* CVUnselectAllBCP
*
* @see SPIsNextCPSelected
*/
extern void FE_unselectBCP( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
extern void FE_touchControlPoint( void* key,
void* value,
SplinePoint* sp,
BasePoint *which,
bool isnext,
void* udata );
/**
* Find all the selected BCP and apply the visitor function f to them
* passing the user data pointer udata to the 'f' visitor.
*
* This function doesn't use udata at all, it simply passes it on to
* your visitor function so it may do something with it like record
* results or take optional parameters.
*
* If preserveState is true and there are selected BCP then
* CVPreserveState() is called before the visitor function.
*/
extern void CVFindAndVisitSelectedControlPoints( CharView *cv, bool preserveState,
visitSelectedControlPointsVisitor f, void* udata );
/**
* NOTE: doesn't do all, just all on selected spline.
*/
extern void CVVisitAllControlPoints( CharView *cv, bool preserveState,
visitSelectedControlPointsVisitor f, void* udata );
/**
* Unselect all the BCP which are currently selected.
*/
extern void CVUnselectAllBCP( CharView *cv );
/**
* This will call your visitor function 'f' on any selected BCP. This
* is regardless of if the BCP is the next or prev BCP for it's
* splinepoint.
*
* This function doesn't use udata at all, it simply passes it on to
* your visitor function so it may do something with it like record
* results or take optional parameters.
*/
extern void visitSelectedControlPoints( GHashTable *col, visitSelectedControlPointsVisitor f, gpointer udata );
/**
* NOTE: doesn't do all, just all on selected spline.
*/
extern void visitAllControlPoints( GHashTable *col, visitSelectedControlPointsVisitor f, gpointer udata );
extern void CVVisitAdjacentToSelectedControlPoints( CharView *cv, bool preserveState,
visitSelectedControlPointsVisitor f, void* udata );
extern void CVFreePreTransformSPL( CharView* cv );
extern bool CVShouldInterpolateCPsOnMotion( CharView* cv );
extern int CVNearRBearingLine( CharView* cv, real x, real fudge );
extern int CVNearLBearingLine( CharView* cv, real x, real fudge );
extern void CVMenuConstrain(GWindow gw, struct gmenuitem *mi, GEvent *UNUSED(e));
#endif /* FONTFORGE_VIEWS_H */
|
1050_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* Copyright (C) 2000-2012 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FONTFORGE_GDRAW_H
#define FONTFORGE_GDRAW_H
#include "charset.h"
#include "gimage.h"
enum font_style { fs_none, fs_italic=1, fs_smallcaps=2, fs_condensed=4, fs_extended=8, fs_vertical=16 };
enum font_type { ft_unknown, ft_serif, ft_sans, ft_mono, ft_cursive, ft_max };
enum text_mods { tm_none, tm_upper=1, tm_lower=2, tm_initialcaps=4, tm_showsofthyphen=8 };
enum text_lines { tl_none, tl_under=1, tl_strike=2, tl_over=4, tl_dash=8 };
typedef struct {
const unichar_t *family_name; /* may be more than one */
int16 point_size; /* negative values are in pixels */
int16 weight;
enum font_style style;
char *utf8_family_name;
} FontRequest;
typedef struct font_instance FontInstance, GFont;
enum gic_style { gic_overspot=2, gic_root=1, gic_hidden=0, gic_orlesser=4, gic_type=3 };
typedef struct ginput_context GIC;
typedef struct ggc {
struct gwindow *w;
int32 xor_base;
Color fg;
Color bg;
GRect clip;
unsigned int copy_through_sub_windows: 1;
unsigned int bitmap_col: 1; /* window is mapped for bitmap */
int16 skip_len, dash_len;
int16 line_width;
int16 ts;
int32 ts_xoff, ts_yoff;
int dash_offset;
GFont *fi;
} GGC;
typedef struct gtextbounds {
int16 lbearing; /* of first character */
/* origin to left edge of first char's raster */
int32 rbearing; /* origin to right edge of last char's raster */
int16 as,ds; /* maximum ascent and maximum descent */
/* (both numbers will be positive for "g" */
/* so total height = as+ds */
int16 fas, fds; /* font ascent and descent */
/* total width = rbearing-lbearing */
int32 width; /* above are for the bounding rect, not the text */
/* "width" which may be totally different */
} GTextBounds;
enum selnames { sn_primary, sn_clipboard, sn_drag_and_drop, sn_user1, sn_user2, sn_max };
typedef struct gwindow *GWindow;
typedef struct gdisplay GDisplay;
typedef struct gtimer GTimer;
enum keystate_mask { ksm_shift=1, ksm_capslock=2, ksm_control=4, ksm_meta=8,
ksm_cmdsuse=0x8,
/* Suse X on a Mac maps command to meta. As of Mac 10.2, the command key is 0x10 */
/* In 10.0 the command key was 0x20 */
ksm_cmdmacosx=0x10, /* But not the command key under suse ppc linux*/
ksm_numlock=0x10, /* It's numlock on my 386 system */
ksm_super=0x40, /* RedHat mask for the key with the windows flag on it */
ksm_hyper=0x80,
/* Both Suse and Mac OS/X.2 now map option to 0x2000, but under 10.0 it was meta */
/* Under 10.4 it is the meta mask again */
/* Under 10.6 it is 0x2000 again. I wish they'd be consistent */
ksm_option=0x2000, /* sometimes */
ksm_menumask=(ksm_control|ksm_meta|ksm_cmdmacosx|0xf0),
ksm_button1=(1<<8), ksm_button2=(1<<9), ksm_button3=(1<<10),
ksm_button4=(1<<11), ksm_button5=(1<<12),
ksm_buttons=(ksm_button1|ksm_button2|ksm_button3|ksm_button4|ksm_button5)
};
enum mnemonic_focus { mf_normal, mf_tab, mf_mnemonic, mf_shortcut };
enum event_type { et_noevent = -1, et_char, et_charup, // is 1
et_mousemove, et_mousedown, et_mouseup,
et_crossing, /* these four are assumed to be consecutive */
et_focus, // is 7
et_expose, et_visibility, et_resize, et_timer,
et_close/*request by user*/, et_create,
et_map, et_destroy/*window being freed*/,
et_selclear,
et_drag, et_dragout, et_drop,
et_lastnativeevent=et_drop,
et_controlevent, et_user };
enum visibility_state { vs_unobscured, vs_partially, vs_obscured };
enum et_subtype { et_buttonpress, et_buttonactivate, et_radiochanged,
et_listselected, et_listdoubleclick,
et_scrollbarchange,
et_textchanged, et_textfocuschanged,
et_lastsubtype };
enum sb { et_sb_top, et_sb_uppage, et_sb_up, et_sb_left=et_sb_up,
et_sb_down, et_sb_right=et_sb_down, et_sb_downpage,
et_sb_bottom,
et_sb_thumb, et_sb_thumbrelease };
struct sbevent {
enum sb type;
int32 pos;
};
typedef struct gevent {
enum event_type type;
#define _GD_EVT_CHRLEN 10
GWindow w;
union {
struct {
char *device; /* for wacom devices */
uint32 time;
uint16 state;
int16 x,y;
uint16 keysym;
int16 autorepeat;
unichar_t chars[_GD_EVT_CHRLEN];
} chr;
struct {
char *device; /* for wacom devices */
uint32 time;
int16 state;
int16 x,y;
int16 button;
int16 clicks;
int32 pressure, xtilt, ytilt, separation;
} mouse;
struct {
GRect rect;
} expose;
struct {
enum visibility_state state;
} visibility;
struct {
GRect size;
int16 dx, dy, dwidth, dheight;
unsigned int moved: 1;
unsigned int sized: 1;
} resize;
struct {
char *device; /* for wacom devices */
uint32 time;
int16 state;
int16 x,y;
unsigned int entered: 1;
} crossing;
struct {
unsigned int gained_focus: 1;
unsigned int mnemonic_focus: 2;
} focus;
struct {
unsigned int is_visible: 1;
} map;
struct {
enum selnames sel;
} selclear;
struct {
int32 x,y;
} drag_drop;
struct {
GTimer *timer;
void *userdata;
} timer;
struct {
enum et_subtype subtype;
struct ggadget *g;
union {
struct sbevent sb;
struct {
int gained_focus;
} tf_focus;
struct {
int from_pulldown; /* -1 normally, else index into pulldown list */
} tf_changed;
struct {
int clicks;
int16 button, state;
} button;
struct {
int from_mouse, changed_index;
} list;
} u;
} control;
struct {
long subtype;
void *userdata;
} user;
} u;
void *native_window;
} GEvent;
typedef enum cursor_types { ct_default, ct_pointer, ct_backpointer, ct_hand,
ct_question, ct_cross, ct_4way, ct_text, ct_watch, ct_draganddrop,
ct_invisible,
ct_user, ct_user2 /* and so on */ } GCursor;
enum window_attr_mask { wam_events=0x2, wam_bordwidth=0x4,
wam_bordcol=0x8, wam_backcol=0x10, wam_cursor=0x20, wam_wtitle=0x40,
wam_ititle=0x80, wam_icon=0x100, wam_nodecor=0x200,
wam_positioned=0x400, wam_centered=0x800, wam_undercursor=0x1000,
wam_noresize=0x2000, wam_restrict=0x4000, wam_redirect=0x8000,
wam_isdlg=0x10000, wam_notrestricted=0x20000,
wam_transient=0x40000,
wam_utf8_wtitle=0x80000, wam_utf8_ititle=0x100000,
wam_nocairo=0x200000, wam_verytransient=0x400000, wam_palette=0x800000 };
typedef struct gwindow_attrs {
enum window_attr_mask mask;
uint32 event_masks; /* (1<<et_char) | (1<<et_mouseup) etc */
int16 border_width;
Color border_color; /* Color_UNKNOWN if unspecified */
Color background_color;
GCursor cursor;
/* Remainder is only for top level windows */
const unichar_t *window_title;
const unichar_t *icon_title;
struct gwindow *icon; /* A bitmap pixmap, or NULL */
unsigned int nodecoration: 1; /* no wm decoration */
unsigned int positioned: 1; /* position information is important */
unsigned int centered: 2; /* center the window on the screen. pos.width&pos.height are used */
unsigned int undercursor: 1; /* center the window under the cursor. */
unsigned int noresize: 1; /* set min and max sizes to current size */
unsigned int restrict_input_to_me: 1;/* for dialogs, no input outside of dlg */
unsigned int redirect_chars_to_me: 1;/* ditto, we get any input outside of us */
unsigned int is_dlg: 1; /* 1 for dlg, 0 for main window */
unsigned int not_restricted: 1; /* gets events if if a restricted (modal) dlg is up */
GWindow redirect_from; /* only redirect input from this window and its children */
GWindow transient; /* the Transient_FOR hint */
const char *utf8_window_title;
const char *utf8_icon_title;
} GWindowAttrs;
#define GWINDOWATTRS_EMPTY { 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL }
enum printer_attr_mask { pam_pagesize=1, pam_margins=2, pam_scale=4,
pam_res=8, pam_copies=0x10, pam_thumbnails=0x20, pam_printername=0x40,
pam_filename=0x80, pam_args=0x100, pam_color=0x200, pam_transparent=0x400,
pam_lpr=0x800, pam_queue=0x1000, pam_eps=0x2000, pam_landscape=0x4000,
pam_title=0x8000 };
enum printer_units { pu_inches, pu_points, pu_mm };
typedef struct gprinter_attrs {
enum printer_attr_mask mask;
float width, height; /* paper size */
float lmargin, rmargin, tmargin, bmargin;
float scale; /* 1.0 implies no scaling */
enum printer_units units;
int32 res; /* printer resolution */
int16 num_copies;
int16 thumbnails; /* linear count of number of thumbnail*/
/* pages per edge of real page */
unsigned int do_color: 1;
unsigned int do_transparent: 1; /* try to get transparent images to work*/
unsigned int use_lpr: 1;
unsigned int donot_queue: 1; /* ie. print to file */
unsigned int landscape: 1;
unsigned int eps: 1; /* generate an eps file, not a full doc */
char *printer_name; /* only if things are queued */
char *file_name; /* only if things aren't queued */
char *extra_lpr_args;
unichar_t *title;
uint16 start_page, end_page; /* Ignored by printer routines, for programmer */
} GPrinterAttrs;
typedef struct gdeveventmask {
int event_mask;
char *device_name;
} GDevEventMask;
enum gzoom_flags { gzf_pos=1, gzf_size=2 };
/* bit flags for the hasCairo query */
enum gcairo_flags { gc_buildpath=1, /* Has build path commands (postscript, cairo) */
gc_alpha=2, /* Supports alpha channels & translucent colors (cairo, pdf) */
gc_xor=4, /* Cairo can't do the traditional XOR drawing that X11 does */
gc_all = gc_buildpath|gc_alpha
};
typedef int (*GDrawEH)(GWindow,GEvent *);
extern unichar_t *GDrawKeysyms[];
extern GDisplay *screen_display, *printer_display;
extern void GDrawDestroyDisplays(void);
extern void GDrawCreateDisplays(char *displayname,char *programname);
extern void *GDrawNativeDisplay(GDisplay *);
extern void GDrawTerm(GDisplay *disp);
extern int GDrawGetRes(GWindow gw);
extern int GDrawPointsToPixels(GWindow gw,int points);
extern int GDrawPixelsToPoints(GWindow gw,int pixels);
extern void GDrawSetDefaultIcon(GWindow icon);
extern GWindow GDrawCreateTopWindow(GDisplay *gdisp, GRect *pos, int (*eh)(GWindow,GEvent *), void *user_data, GWindowAttrs *wattrs);
extern GWindow GDrawCreateSubWindow(GWindow w, GRect *pos, int (*eh)(GWindow,GEvent *), void *user_data, GWindowAttrs *wattrs);
extern GWindow GDrawCreatePixmap(GDisplay *gdisp, GWindow similar, uint16 width, uint16 height);
extern GWindow GDrawCreateBitmap(GDisplay *gdisp, uint16 width, uint16 height, uint8 *data);
extern GCursor GDrawCreateCursor(GWindow src,GWindow mask,Color fg,Color bg,
int16 x, int16 y );
extern void GDrawDestroyWindow(GWindow w);
extern void GDrawDestroyCursor(GDisplay *gdisp, GCursor ct);
extern int GDrawNativeWindowExists(GDisplay *gdisp, void *native);
extern void GDrawSetZoom(GWindow w, GRect *zoomsize, enum gzoom_flags);
extern void GDrawSetWindowBorder(GWindow w, int width, Color color);
extern void GDrawSetWindowBackground(GWindow w, Color color);
/**
* Set the window type to the given name.
*
* You should not free the 'name' string, it is assumed to exist for
* the lifetime of the fontforge run, for example, as a constant
* string. No copy of name is performed.
*/
extern void GDrawSetWindowTypeName(GWindow w, char* name);
/**
* Get the window type string that was set with GDrawSetWindowTypeName()
* or NULL if no such string was set.
*
* No memory allocations are performed. You should not free the string
* that is returned.
*/
extern char* GDrawGetWindowTypeName(GWindow w);
extern int GDrawSetDither(GDisplay *gdisp, int dither);
extern void GDrawReparentWindow(GWindow child,GWindow newparent, int x,int y);
extern void GDrawSetVisible(GWindow w, int visible);
extern int GDrawIsVisible(GWindow w);
extern void GDrawTrueMove(GWindow w, int32 x, int32 y);
extern void GDrawMove(GWindow w, int32 x, int32 y);
extern void GDrawResize(GWindow w, int32 width, int32 height);
extern void GDrawMoveResize(GWindow w, int32 x, int32 y, int32 width, int32 height);
extern GWindow GDrawGetRoot(GDisplay *);
extern Color GDrawGetDefaultBackground(GDisplay *);
extern Color GDrawGetDefaultForeground(GDisplay *);
extern GRect *GDrawGetSize(GWindow w, GRect *ret);
extern GDrawEH GDrawGetEH(GWindow w);
extern void GDrawSetEH(GWindow w,GDrawEH e_h);
extern void GDrawGetPointerPosition(GWindow w, GEvent *mouse);
extern GWindow GDrawGetPointerWindow(GWindow w);
extern void GDrawRaise(GWindow w);
extern void GDrawRaiseAbove(GWindow w,GWindow below);
extern int GDrawIsAbove(GWindow w,GWindow other);
extern void GDrawLower(GWindow w);
extern void GDrawSetWindowTitles(GWindow w, const unichar_t *title, const unichar_t *icontit);
extern void GDrawSetWindowTitles8(GWindow w, const char *title, const char *icontit);
extern unichar_t *GDrawGetWindowTitle(GWindow w);
extern char *GDrawGetWindowTitle8(GWindow w);
extern void GDrawSetTransientFor(GWindow transient,GWindow owner);
extern void GDrawSetCursor(GWindow w, GCursor ct);
extern GCursor GDrawGetCursor(GWindow w);
extern GWindow GDrawGetRedirectWindow(GDisplay *gd);
extern GWindow GDrawGetParentWindow(GWindow gw);
extern int GDrawWindowIsAncestor(GWindow ancester, GWindow descendent);
extern void GDrawSetUserData(GWindow gw, void *ud);
extern void *GDrawGetUserData(GWindow gw);
extern GDisplay *GDrawGetDisplayOfWindow(GWindow);
extern void GDrawTranslateCoordinates(GWindow from,GWindow to, GPoint *pt);
extern int32 GDrawEventInWindow(GWindow inme,GEvent *event);
extern void GDrawBeep(GDisplay *gdisp);
extern void GDrawFlush(GDisplay *gdisp);
extern void GDrawGetClip(GWindow w, GRect *ret);
extern void GDrawSetClip(GWindow w, GRect *rct);
extern void GDrawPushClip(GWindow w, GRect *rct, GRect *old);
extern void GDrawPopClip(GWindow w, GRect *old);
extern void GDrawPushClipOnly(GWindow w);
extern void GDrawClipPreserve(GWindow w);
extern GGC *GDrawGetWindowGGC(GWindow w);
extern void GDrawSetCopyMode(GWindow w);
extern void GDrawSetDifferenceMode(GWindow w);
extern void GDrawSetCopyThroughSubWindows(GWindow w,int16 through);
extern void GDrawSetDashedLine(GWindow w,int16 dash_len, int16 skip_len, int16 off);
extern void GDrawSetStippled(GWindow w,int16 ts, int32 yoff,int32 xoff);
extern void GDrawSetLineWidth(GWindow w,int16 width);
extern int16 GDrawGetLineWidth( GWindow w );
extern void GDrawSetForeground(GWindow w,Color col);
extern void GDrawSetBackground(GWindow w,Color col);
extern GFont *GDrawSetFont(GWindow gw, GFont *fi);
extern GFont *GDrawInstanciateFont(GWindow gw, FontRequest *rq);
extern GFont *GDrawAttachFont(GWindow gw, FontRequest *rq);
extern FontRequest *GDrawDecomposeFont(GFont *fi, FontRequest *rq);
extern void GDrawWindowFontMetrics(GWindow gw,GFont *fi,int *as, int *ds, int *ld);
extern int32 GDrawGetTextBounds(GWindow gw,const unichar_t *text, int32 cnt, GTextBounds *size);
extern int32 GDrawGetTextWidth(GWindow gw, const unichar_t *text, int32 cnt);
extern int32 GDrawDrawText(GWindow gw, int32 x, int32 y, const unichar_t *txt, int32 cnt, Color col);
/* UTF8 routines */
extern int32 GDrawGetText8Bounds(GWindow gw, const char *text, int32 cnt, GTextBounds *size);
extern int32 GDrawGetText8Width(GWindow gw, const char *text, int32 cnt);
extern int32 GDrawGetText8Height(GWindow gw, const char *text, int32 cnt);
extern int32 GDrawDrawText8(GWindow gw, int32 x, int32 y, const char *txt, int32 cnt, Color col);
extern GIC *GDrawCreateInputContext(GWindow w,enum gic_style def_style);
extern void GDrawSetGIC(GWindow w,GIC *gic,int x, int y);
extern int GDrawKeyState(GWindow w, int keysym);
extern void GDrawClear(GWindow w, GRect *rect);
extern void GDrawDrawLine(GWindow w, int32 x,int32 y, int32 xend,int32 yend, Color col);
extern void GDrawDrawArrow(GWindow w, int32 x,int32 y, int32 xend,int32 yend, int arrows, Color col);
extern void GDrawDrawRect(GWindow w, GRect *rect, Color col);
extern void GDrawFillRect(GWindow w, GRect *rect, Color col);
extern void GDrawFillRoundRect(GWindow w, GRect *rect, int radius, Color col);
extern void GDrawDrawElipse(GWindow w, GRect *rect, Color col);
extern void GDrawFillElipse(GWindow w, GRect *rect, Color col);
extern void GDrawDrawArc(GWindow w, GRect *rect, int32 sangle, int32 tangle, Color col);
extern void GDrawDrawPoly(GWindow w, GPoint *pts, int16 cnt, Color col);
extern void GDrawFillPoly(GWindow w, GPoint *pts, int16 cnt, Color col);
extern void GDrawScroll(GWindow w, GRect *rect, int32 hor, int32 vert);
extern void GDrawDrawImage(GWindow w, GImage *img, GRect *src, int32 x, int32 y);
extern void GDrawDrawGlyph(GWindow w, GImage *img, GRect *src, int32 x, int32 y);
extern void GDrawDrawScaledImage(GWindow w, GImage *img, int32 x, int32 y);
extern void GDrawDrawImageMagnified(GWindow w, GImage *img, GRect *src, int32 x, int32 y,
int32 width, int32 height);
extern void GDrawTileImage(GWindow w, GImage *img, GRect *src, int32 x, int32 y);
extern void GDrawDrawPixmap(GWindow w, GWindow pixmap, GRect *src, int32 x, int32 y);
extern void GDrawTilePixmap(GWindow w, GWindow pixmap, GRect *src, int32 x, int32 y);
extern GImage *GDrawCopyScreenToImage(GWindow w, GRect *rect);
extern void GDrawGrabSelection(GWindow w,enum selnames sel);
extern void GDrawAddSelectionType(GWindow w,enum selnames sel,char *type,
void *data,int32 cnt,int32 unitsize,void *(*gendata)(void *,int32 *len),
void (*freedata)(void *));
extern void *GDrawRequestSelection(GWindow w,enum selnames sn, char *typename, int32 *len);
extern int GDrawSelectionHasType(GWindow w,enum selnames sn, char *typename);
extern void GDrawBindSelection(GDisplay *disp,enum selnames sel, char *atomname);
extern int GDrawSelectionOwned(GDisplay *disp,enum selnames sel);
extern void GDrawPointerUngrab(GDisplay *disp);
extern void GDrawPointerGrab(GWindow w);
extern int GDrawEnableExposeRequests(GWindow w,int enabled);
extern void GDrawRequestExpose(GWindow w, GRect *rect, int doclear);
extern void GDrawSync(GDisplay *gdisp);
extern void GDrawForceUpdate(GWindow w);
extern void GDrawProcessOneEvent(GDisplay *disp);
extern void GDrawProcessPendingEvents(GDisplay *disp);
extern void GDrawProcessWindowEvents(GWindow w);
extern void GDrawSkipMouseMoveEvents(GWindow w,GEvent *last);
extern void GDrawEventLoop(GDisplay *disp);
extern void GDrawPostEvent(GEvent *e);
extern void GDrawPostDragEvent(GWindow gw,GEvent *e,enum event_type);
extern GTimer *GDrawRequestTimer(GWindow w,int32 time_from_now,int32 frequency,
void *userdata);
extern void GDrawCancelTimer(GTimer *timer);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Windowless timers used for background activities
//
/**
* Callback which will be called at a nominated frequency with a given
* userdata pointer.
*/
typedef void (* BackgroundTimerFunc )(void*);
/**
* Internal bookkeeping for windowless timers. They are currently
* windowed timers on the inside but we spare the user from creating
* the window, keeping track of it, and having to deal with an event
* handling function which tries to get back a nominated userdata
* pointer from all that jazz.
*/
typedef struct BackgroundTimerstruct
{
// takes userdata as arg1
BackgroundTimerFunc func;
// the userdata pointer that should be passed to func
void *userdata;
// the internal hidden window used to actually get the timer events
GWindow w;
// the GDraw timer associated with the above window
GTimer* timer;
// how often to fire the timer
int32 BackgroundTimerMS;
} BackgroundTimer_t;
/**
* Create a new windowless timer which will be fired every
* BackgroundTimerMS milliseconds and call func with the supplied
* userdata.
*/
BackgroundTimer_t*
BackgroundTimer_new( int32 BackgroundTimerMS,
BackgroundTimerFunc func,
void *userdata );
/**
* Remove a windowless background timer freeing any resources
* associated with it.
*/
void BackgroundTimer_remove( BackgroundTimer_t* t );
/**
* Make sure the timer fires at it's desired time from now(). For
* example, if a timer will fire every 2 seconds and is about to fire
* in a few ms from now, calling touch() will make it fire 2 seconds
* from now instead.
*
* This way if you have a timer which is to handle background issues
* if something doesn't happen in a scheduled amount of time, you can
* touch() the timer to make sure it fires again after the full
* elapsed time instead of having it fire too soon.
*/
void BackgroundTimer_touch( BackgroundTimer_t* t );
////////////////////////////////////////////////////////////////////////////////
extern void GDrawSyncThread(GDisplay *gd, void (*func)(void *), void *data);
extern GWindow GPrinterStartJob(GDisplay *gdisp,void *user_data,GPrinterAttrs *attrs);
extern void GPrinterNextPage(GWindow w);
extern int GPrinterEndJob(GWindow w,int cancel);
extern void GDrawSetBuildCharHooks(void (*hook)(GDisplay *), void (*inshook)(GDisplay *,unichar_t));
extern int GDrawRequestDeviceEvents(GWindow w,int devcnt,struct gdeveventmask *de);
extern enum gcairo_flags GDrawHasCairo(GWindow w);
extern void GDrawPathStartNew(GWindow w);
extern void GDrawPathStartSubNew(GWindow w);
extern int GDrawFillRuleSetWinding(GWindow w);
extern void GDrawPathClose(GWindow w);
extern void GDrawPathMoveTo(GWindow w,double x, double y);
extern void GDrawPathLineTo(GWindow w,double x, double y);
extern void GDrawPathCurveTo(GWindow w,
double cx1, double cy1,
double cx2, double cy2,
double x, double y);
extern void GDrawPathStroke(GWindow w,Color col);
extern void GDrawPathFill(GWindow w,Color col);
extern void GDrawPathFillAndStroke(GWindow w,Color fillcol, Color strokecol);
extern void GDrawEnableCairo(int on);
extern void GDrawLayoutInit(GWindow w, char *text, int cnt, GFont *fi);
extern void GDrawLayoutDraw(GWindow w, int32 x, int32 y, Color fg);
extern void GDrawLayoutIndexToPos(GWindow w, int index, GRect *pos);
extern int GDrawLayoutXYToIndex(GWindow w, int x, int y);
extern void GDrawLayoutExtents(GWindow w, GRect *size);
extern void GDrawLayoutSetWidth(GWindow w, int width);
extern int GDrawLayoutLineCount(GWindow w);
extern int GDrawLayoutLineStart(GWindow w,int line);
extern void GDrawFatalError(const char *fmt,...);
extern void GDrawIError(const char *fmt,...);
extern void GDrawError(const char *fmt,...);
extern int GImageGetScaledWidth(GWindow gw, GImage *img);
extern int GImageGetScaledHeight(GWindow gw, GImage *img);
extern void GDrawAddReadFD( GDisplay *disp,
int fd, void* udata,
void (*callback)(int fd, void* udata ));
extern void GDrawRemoveReadFD( GDisplay *disp,
int fd, void* udata );
/**
* The Mac OSX build doesn't use the same core event loop as the
* Linux/X build. So inside the timer we can use this to double check
* if any fds that we should monitor for input have changed and if so
* service their messages.
*/
extern void MacServiceReadFDs(void);
#endif /* FONTFORGE_GDRAW_H */
|
/* Copyright (C) 2000-2012 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FONTFORGE_GDRAW_H
#define FONTFORGE_GDRAW_H
#include "charset.h"
#include "gimage.h"
enum font_style { fs_none, fs_italic=1, fs_smallcaps=2, fs_condensed=4, fs_extended=8, fs_vertical=16 };
enum font_type { ft_unknown, ft_serif, ft_sans, ft_mono, ft_cursive, ft_max };
enum text_mods { tm_none, tm_upper=1, tm_lower=2, tm_initialcaps=4, tm_showsofthyphen=8 };
enum text_lines { tl_none, tl_under=1, tl_strike=2, tl_over=4, tl_dash=8 };
typedef struct {
const unichar_t *family_name; /* may be more than one */
int16 point_size; /* negative values are in pixels */
int16 weight;
enum font_style style;
char *utf8_family_name;
} FontRequest;
typedef struct font_instance FontInstance, GFont;
enum gic_style { gic_overspot=2, gic_root=1, gic_hidden=0, gic_orlesser=4, gic_type=3 };
typedef struct ginput_context GIC;
typedef struct ggc {
struct gwindow *w;
int32 xor_base;
Color fg;
Color bg;
GRect clip;
unsigned int copy_through_sub_windows: 1;
unsigned int bitmap_col: 1; /* window is mapped for bitmap */
int16 skip_len, dash_len;
int16 line_width;
int16 ts;
int32 ts_xoff, ts_yoff;
int dash_offset;
GFont *fi;
} GGC;
typedef struct gtextbounds {
int16 lbearing; /* of first character */
/* origin to left edge of first char's raster */
int32 rbearing; /* origin to right edge of last char's raster */
int16 as,ds; /* maximum ascent and maximum descent */
/* (both numbers will be positive for "g" */
/* so total height = as+ds */
int16 fas, fds; /* font ascent and descent */
/* total width = rbearing-lbearing */
int32 width; /* above are for the bounding rect, not the text */
/* "width" which may be totally different */
} GTextBounds;
enum selnames { sn_primary, sn_clipboard, sn_drag_and_drop, sn_user1, sn_user2, sn_max };
typedef struct gwindow *GWindow;
typedef struct gdisplay GDisplay;
typedef struct gtimer GTimer;
enum keystate_mask { ksm_shift=1, ksm_capslock=2, ksm_control=4, ksm_meta=8,
ksm_cmdsuse=0x8,
/* Suse X on a Mac maps command to meta. As of Mac 10.2, the command key is 0x10 */
/* In 10.0 the command key was 0x20 */
ksm_cmdmacosx=0x10, /* But not the command key under suse ppc linux*/
ksm_numlock=0x10, /* It's numlock on my 386 system */
ksm_super=0x40, /* RedHat mask for the key with the windows flag on it */
ksm_hyper=0x80,
/* Both Suse and Mac OS/X.2 now map option to 0x2000, but under 10.0 it was meta */
/* Under 10.4 it is the meta mask again */
/* Under 10.6 it is 0x2000 again. I wish they'd be consistent */
ksm_option=0x2000, /* sometimes */
ksm_menumask=(ksm_control|ksm_meta|ksm_cmdmacosx|0xf0),
ksm_button1=(1<<8), ksm_button2=(1<<9), ksm_button3=(1<<10),
ksm_button4=(1<<11), ksm_button5=(1<<12),
ksm_buttons=(ksm_button1|ksm_button2|ksm_button3|ksm_button4|ksm_button5)
};
enum mnemonic_focus { mf_normal, mf_tab, mf_mnemonic, mf_shortcut };
enum event_type { et_noevent = -1, et_char, et_charup, // is 1
et_mousemove, et_mousedown, et_mouseup,
et_crossing, /* these four are assumed to be consecutive */
et_focus, // is 7
et_expose, et_visibility, et_resize, et_timer,
et_close/*request by user*/, et_create,
et_map, et_destroy/*window being freed*/,
et_selclear,
et_drag, et_dragout, et_drop,
et_lastnativeevent=et_drop,
et_controlevent, et_user };
enum visibility_state { vs_unobscured, vs_partially, vs_obscured };
enum et_subtype { et_buttonpress, et_buttonactivate, et_radiochanged,
et_listselected, et_listdoubleclick,
et_scrollbarchange,
et_textchanged, et_textfocuschanged,
et_save, et_lastsubtype };
enum sb { et_sb_top, et_sb_uppage, et_sb_up, et_sb_left=et_sb_up,
et_sb_down, et_sb_right=et_sb_down, et_sb_downpage,
et_sb_bottom,
et_sb_thumb, et_sb_thumbrelease };
struct sbevent {
enum sb type;
int32 pos;
};
typedef struct gevent {
enum event_type type;
#define _GD_EVT_CHRLEN 10
GWindow w;
union {
struct {
char *device; /* for wacom devices */
uint32 time;
uint16 state;
int16 x,y;
uint16 keysym;
int16 autorepeat;
unichar_t chars[_GD_EVT_CHRLEN];
} chr;
struct {
char *device; /* for wacom devices */
uint32 time;
int16 state;
int16 x,y;
int16 button;
int16 clicks;
int32 pressure, xtilt, ytilt, separation;
} mouse;
struct {
GRect rect;
} expose;
struct {
enum visibility_state state;
} visibility;
struct {
GRect size;
int16 dx, dy, dwidth, dheight;
unsigned int moved: 1;
unsigned int sized: 1;
} resize;
struct {
char *device; /* for wacom devices */
uint32 time;
int16 state;
int16 x,y;
unsigned int entered: 1;
} crossing;
struct {
unsigned int gained_focus: 1;
unsigned int mnemonic_focus: 2;
} focus;
struct {
unsigned int is_visible: 1;
} map;
struct {
enum selnames sel;
} selclear;
struct {
int32 x,y;
} drag_drop;
struct {
GTimer *timer;
void *userdata;
} timer;
struct {
enum et_subtype subtype;
struct ggadget *g;
union {
struct sbevent sb;
struct {
int gained_focus;
} tf_focus;
struct {
int from_pulldown; /* -1 normally, else index into pulldown list */
} tf_changed;
struct {
int clicks;
int16 button, state;
} button;
struct {
int from_mouse, changed_index;
} list;
} u;
} control;
struct {
long subtype;
void *userdata;
} user;
} u;
void *native_window;
} GEvent;
typedef enum cursor_types { ct_default, ct_pointer, ct_backpointer, ct_hand,
ct_question, ct_cross, ct_4way, ct_text, ct_watch, ct_draganddrop,
ct_invisible,
ct_user, ct_user2 /* and so on */ } GCursor;
enum window_attr_mask { wam_events=0x2, wam_bordwidth=0x4,
wam_bordcol=0x8, wam_backcol=0x10, wam_cursor=0x20, wam_wtitle=0x40,
wam_ititle=0x80, wam_icon=0x100, wam_nodecor=0x200,
wam_positioned=0x400, wam_centered=0x800, wam_undercursor=0x1000,
wam_noresize=0x2000, wam_restrict=0x4000, wam_redirect=0x8000,
wam_isdlg=0x10000, wam_notrestricted=0x20000,
wam_transient=0x40000,
wam_utf8_wtitle=0x80000, wam_utf8_ititle=0x100000,
wam_nocairo=0x200000, wam_verytransient=0x400000, wam_palette=0x800000 };
typedef struct gwindow_attrs {
enum window_attr_mask mask;
uint32 event_masks; /* (1<<et_char) | (1<<et_mouseup) etc */
int16 border_width;
Color border_color; /* Color_UNKNOWN if unspecified */
Color background_color;
GCursor cursor;
/* Remainder is only for top level windows */
const unichar_t *window_title;
const unichar_t *icon_title;
struct gwindow *icon; /* A bitmap pixmap, or NULL */
unsigned int nodecoration: 1; /* no wm decoration */
unsigned int positioned: 1; /* position information is important */
unsigned int centered: 2; /* center the window on the screen. pos.width&pos.height are used */
unsigned int undercursor: 1; /* center the window under the cursor. */
unsigned int noresize: 1; /* set min and max sizes to current size */
unsigned int restrict_input_to_me: 1;/* for dialogs, no input outside of dlg */
unsigned int redirect_chars_to_me: 1;/* ditto, we get any input outside of us */
unsigned int is_dlg: 1; /* 1 for dlg, 0 for main window */
unsigned int not_restricted: 1; /* gets events if if a restricted (modal) dlg is up */
GWindow redirect_from; /* only redirect input from this window and its children */
GWindow transient; /* the Transient_FOR hint */
const char *utf8_window_title;
const char *utf8_icon_title;
} GWindowAttrs;
#define GWINDOWATTRS_EMPTY { 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL }
enum printer_attr_mask { pam_pagesize=1, pam_margins=2, pam_scale=4,
pam_res=8, pam_copies=0x10, pam_thumbnails=0x20, pam_printername=0x40,
pam_filename=0x80, pam_args=0x100, pam_color=0x200, pam_transparent=0x400,
pam_lpr=0x800, pam_queue=0x1000, pam_eps=0x2000, pam_landscape=0x4000,
pam_title=0x8000 };
enum printer_units { pu_inches, pu_points, pu_mm };
typedef struct gprinter_attrs {
enum printer_attr_mask mask;
float width, height; /* paper size */
float lmargin, rmargin, tmargin, bmargin;
float scale; /* 1.0 implies no scaling */
enum printer_units units;
int32 res; /* printer resolution */
int16 num_copies;
int16 thumbnails; /* linear count of number of thumbnail*/
/* pages per edge of real page */
unsigned int do_color: 1;
unsigned int do_transparent: 1; /* try to get transparent images to work*/
unsigned int use_lpr: 1;
unsigned int donot_queue: 1; /* ie. print to file */
unsigned int landscape: 1;
unsigned int eps: 1; /* generate an eps file, not a full doc */
char *printer_name; /* only if things are queued */
char *file_name; /* only if things aren't queued */
char *extra_lpr_args;
unichar_t *title;
uint16 start_page, end_page; /* Ignored by printer routines, for programmer */
} GPrinterAttrs;
typedef struct gdeveventmask {
int event_mask;
char *device_name;
} GDevEventMask;
enum gzoom_flags { gzf_pos=1, gzf_size=2 };
/* bit flags for the hasCairo query */
enum gcairo_flags { gc_buildpath=1, /* Has build path commands (postscript, cairo) */
gc_alpha=2, /* Supports alpha channels & translucent colors (cairo, pdf) */
gc_xor=4, /* Cairo can't do the traditional XOR drawing that X11 does */
gc_all = gc_buildpath|gc_alpha
};
typedef int (*GDrawEH)(GWindow,GEvent *);
extern unichar_t *GDrawKeysyms[];
extern GDisplay *screen_display, *printer_display;
extern void GDrawDestroyDisplays(void);
extern void GDrawCreateDisplays(char *displayname,char *programname);
extern void *GDrawNativeDisplay(GDisplay *);
extern void GDrawTerm(GDisplay *disp);
extern int GDrawGetRes(GWindow gw);
extern int GDrawPointsToPixels(GWindow gw,int points);
extern int GDrawPixelsToPoints(GWindow gw,int pixels);
extern void GDrawSetDefaultIcon(GWindow icon);
extern GWindow GDrawCreateTopWindow(GDisplay *gdisp, GRect *pos, int (*eh)(GWindow,GEvent *), void *user_data, GWindowAttrs *wattrs);
extern GWindow GDrawCreateSubWindow(GWindow w, GRect *pos, int (*eh)(GWindow,GEvent *), void *user_data, GWindowAttrs *wattrs);
extern GWindow GDrawCreatePixmap(GDisplay *gdisp, GWindow similar, uint16 width, uint16 height);
extern GWindow GDrawCreateBitmap(GDisplay *gdisp, uint16 width, uint16 height, uint8 *data);
extern GCursor GDrawCreateCursor(GWindow src,GWindow mask,Color fg,Color bg,
int16 x, int16 y );
extern void GDrawDestroyWindow(GWindow w);
extern void GDrawDestroyCursor(GDisplay *gdisp, GCursor ct);
extern int GDrawNativeWindowExists(GDisplay *gdisp, void *native);
extern void GDrawSetZoom(GWindow w, GRect *zoomsize, enum gzoom_flags);
extern void GDrawSetWindowBorder(GWindow w, int width, Color color);
extern void GDrawSetWindowBackground(GWindow w, Color color);
/**
* Set the window type to the given name.
*
* You should not free the 'name' string, it is assumed to exist for
* the lifetime of the fontforge run, for example, as a constant
* string. No copy of name is performed.
*/
extern void GDrawSetWindowTypeName(GWindow w, char* name);
/**
* Get the window type string that was set with GDrawSetWindowTypeName()
* or NULL if no such string was set.
*
* No memory allocations are performed. You should not free the string
* that is returned.
*/
extern char* GDrawGetWindowTypeName(GWindow w);
extern int GDrawSetDither(GDisplay *gdisp, int dither);
extern void GDrawReparentWindow(GWindow child,GWindow newparent, int x,int y);
extern void GDrawSetVisible(GWindow w, int visible);
extern int GDrawIsVisible(GWindow w);
extern void GDrawTrueMove(GWindow w, int32 x, int32 y);
extern void GDrawMove(GWindow w, int32 x, int32 y);
extern void GDrawResize(GWindow w, int32 width, int32 height);
extern void GDrawMoveResize(GWindow w, int32 x, int32 y, int32 width, int32 height);
extern GWindow GDrawGetRoot(GDisplay *);
extern Color GDrawGetDefaultBackground(GDisplay *);
extern Color GDrawGetDefaultForeground(GDisplay *);
extern GRect *GDrawGetSize(GWindow w, GRect *ret);
extern GDrawEH GDrawGetEH(GWindow w);
extern void GDrawSetEH(GWindow w,GDrawEH e_h);
extern void GDrawGetPointerPosition(GWindow w, GEvent *mouse);
extern GWindow GDrawGetPointerWindow(GWindow w);
extern void GDrawRaise(GWindow w);
extern void GDrawRaiseAbove(GWindow w,GWindow below);
extern int GDrawIsAbove(GWindow w,GWindow other);
extern void GDrawLower(GWindow w);
extern void GDrawSetWindowTitles(GWindow w, const unichar_t *title, const unichar_t *icontit);
extern void GDrawSetWindowTitles8(GWindow w, const char *title, const char *icontit);
extern unichar_t *GDrawGetWindowTitle(GWindow w);
extern char *GDrawGetWindowTitle8(GWindow w);
extern void GDrawSetTransientFor(GWindow transient,GWindow owner);
extern void GDrawSetCursor(GWindow w, GCursor ct);
extern GCursor GDrawGetCursor(GWindow w);
extern GWindow GDrawGetRedirectWindow(GDisplay *gd);
extern GWindow GDrawGetParentWindow(GWindow gw);
extern int GDrawWindowIsAncestor(GWindow ancester, GWindow descendent);
extern void GDrawSetUserData(GWindow gw, void *ud);
extern void *GDrawGetUserData(GWindow gw);
extern GDisplay *GDrawGetDisplayOfWindow(GWindow);
extern void GDrawTranslateCoordinates(GWindow from,GWindow to, GPoint *pt);
extern int32 GDrawEventInWindow(GWindow inme,GEvent *event);
extern void GDrawBeep(GDisplay *gdisp);
extern void GDrawFlush(GDisplay *gdisp);
extern void GDrawGetClip(GWindow w, GRect *ret);
extern void GDrawSetClip(GWindow w, GRect *rct);
extern void GDrawPushClip(GWindow w, GRect *rct, GRect *old);
extern void GDrawPopClip(GWindow w, GRect *old);
extern void GDrawPushClipOnly(GWindow w);
extern void GDrawClipPreserve(GWindow w);
extern GGC *GDrawGetWindowGGC(GWindow w);
extern void GDrawSetCopyMode(GWindow w);
extern void GDrawSetDifferenceMode(GWindow w);
extern void GDrawSetCopyThroughSubWindows(GWindow w,int16 through);
extern void GDrawSetDashedLine(GWindow w,int16 dash_len, int16 skip_len, int16 off);
extern void GDrawSetStippled(GWindow w,int16 ts, int32 yoff,int32 xoff);
extern void GDrawSetLineWidth(GWindow w,int16 width);
extern int16 GDrawGetLineWidth( GWindow w );
extern void GDrawSetForeground(GWindow w,Color col);
extern void GDrawSetBackground(GWindow w,Color col);
extern GFont *GDrawSetFont(GWindow gw, GFont *fi);
extern GFont *GDrawInstanciateFont(GWindow gw, FontRequest *rq);
extern GFont *GDrawAttachFont(GWindow gw, FontRequest *rq);
extern FontRequest *GDrawDecomposeFont(GFont *fi, FontRequest *rq);
extern void GDrawWindowFontMetrics(GWindow gw,GFont *fi,int *as, int *ds, int *ld);
extern int32 GDrawGetTextBounds(GWindow gw,const unichar_t *text, int32 cnt, GTextBounds *size);
extern int32 GDrawGetTextWidth(GWindow gw, const unichar_t *text, int32 cnt);
extern int32 GDrawDrawText(GWindow gw, int32 x, int32 y, const unichar_t *txt, int32 cnt, Color col);
/* UTF8 routines */
extern int32 GDrawGetText8Bounds(GWindow gw, const char *text, int32 cnt, GTextBounds *size);
extern int32 GDrawGetText8Width(GWindow gw, const char *text, int32 cnt);
extern int32 GDrawGetText8Height(GWindow gw, const char *text, int32 cnt);
extern int32 GDrawDrawText8(GWindow gw, int32 x, int32 y, const char *txt, int32 cnt, Color col);
extern GIC *GDrawCreateInputContext(GWindow w,enum gic_style def_style);
extern void GDrawSetGIC(GWindow w,GIC *gic,int x, int y);
extern int GDrawKeyState(GWindow w, int keysym);
extern void GDrawClear(GWindow w, GRect *rect);
extern void GDrawDrawLine(GWindow w, int32 x,int32 y, int32 xend,int32 yend, Color col);
extern void GDrawDrawArrow(GWindow w, int32 x,int32 y, int32 xend,int32 yend, int arrows, Color col);
extern void GDrawDrawRect(GWindow w, GRect *rect, Color col);
extern void GDrawFillRect(GWindow w, GRect *rect, Color col);
extern void GDrawFillRoundRect(GWindow w, GRect *rect, int radius, Color col);
extern void GDrawDrawElipse(GWindow w, GRect *rect, Color col);
extern void GDrawFillElipse(GWindow w, GRect *rect, Color col);
extern void GDrawDrawArc(GWindow w, GRect *rect, int32 sangle, int32 tangle, Color col);
extern void GDrawDrawPoly(GWindow w, GPoint *pts, int16 cnt, Color col);
extern void GDrawFillPoly(GWindow w, GPoint *pts, int16 cnt, Color col);
extern void GDrawScroll(GWindow w, GRect *rect, int32 hor, int32 vert);
extern void GDrawDrawImage(GWindow w, GImage *img, GRect *src, int32 x, int32 y);
extern void GDrawDrawGlyph(GWindow w, GImage *img, GRect *src, int32 x, int32 y);
extern void GDrawDrawScaledImage(GWindow w, GImage *img, int32 x, int32 y);
extern void GDrawDrawImageMagnified(GWindow w, GImage *img, GRect *src, int32 x, int32 y,
int32 width, int32 height);
extern void GDrawTileImage(GWindow w, GImage *img, GRect *src, int32 x, int32 y);
extern void GDrawDrawPixmap(GWindow w, GWindow pixmap, GRect *src, int32 x, int32 y);
extern void GDrawTilePixmap(GWindow w, GWindow pixmap, GRect *src, int32 x, int32 y);
extern GImage *GDrawCopyScreenToImage(GWindow w, GRect *rect);
extern void GDrawGrabSelection(GWindow w,enum selnames sel);
extern void GDrawAddSelectionType(GWindow w,enum selnames sel,char *type,
void *data,int32 cnt,int32 unitsize,void *(*gendata)(void *,int32 *len),
void (*freedata)(void *));
extern void *GDrawRequestSelection(GWindow w,enum selnames sn, char *typename, int32 *len);
extern int GDrawSelectionHasType(GWindow w,enum selnames sn, char *typename);
extern void GDrawBindSelection(GDisplay *disp,enum selnames sel, char *atomname);
extern int GDrawSelectionOwned(GDisplay *disp,enum selnames sel);
extern void GDrawPointerUngrab(GDisplay *disp);
extern void GDrawPointerGrab(GWindow w);
extern int GDrawEnableExposeRequests(GWindow w,int enabled);
extern void GDrawRequestExpose(GWindow w, GRect *rect, int doclear);
extern void GDrawSync(GDisplay *gdisp);
extern void GDrawForceUpdate(GWindow w);
extern void GDrawProcessOneEvent(GDisplay *disp);
extern void GDrawProcessPendingEvents(GDisplay *disp);
extern void GDrawProcessWindowEvents(GWindow w);
extern void GDrawSkipMouseMoveEvents(GWindow w,GEvent *last);
extern void GDrawEventLoop(GDisplay *disp);
extern void GDrawPostEvent(GEvent *e);
extern void GDrawPostDragEvent(GWindow gw,GEvent *e,enum event_type);
extern GTimer *GDrawRequestTimer(GWindow w,int32 time_from_now,int32 frequency,
void *userdata);
extern void GDrawCancelTimer(GTimer *timer);
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
//
// Windowless timers used for background activities
//
/**
* Callback which will be called at a nominated frequency with a given
* userdata pointer.
*/
typedef void (* BackgroundTimerFunc )(void*);
/**
* Internal bookkeeping for windowless timers. They are currently
* windowed timers on the inside but we spare the user from creating
* the window, keeping track of it, and having to deal with an event
* handling function which tries to get back a nominated userdata
* pointer from all that jazz.
*/
typedef struct BackgroundTimerstruct
{
// takes userdata as arg1
BackgroundTimerFunc func;
// the userdata pointer that should be passed to func
void *userdata;
// the internal hidden window used to actually get the timer events
GWindow w;
// the GDraw timer associated with the above window
GTimer* timer;
// how often to fire the timer
int32 BackgroundTimerMS;
} BackgroundTimer_t;
/**
* Create a new windowless timer which will be fired every
* BackgroundTimerMS milliseconds and call func with the supplied
* userdata.
*/
BackgroundTimer_t*
BackgroundTimer_new( int32 BackgroundTimerMS,
BackgroundTimerFunc func,
void *userdata );
/**
* Remove a windowless background timer freeing any resources
* associated with it.
*/
void BackgroundTimer_remove( BackgroundTimer_t* t );
/**
* Make sure the timer fires at it's desired time from now(). For
* example, if a timer will fire every 2 seconds and is about to fire
* in a few ms from now, calling touch() will make it fire 2 seconds
* from now instead.
*
* This way if you have a timer which is to handle background issues
* if something doesn't happen in a scheduled amount of time, you can
* touch() the timer to make sure it fires again after the full
* elapsed time instead of having it fire too soon.
*/
void BackgroundTimer_touch( BackgroundTimer_t* t );
////////////////////////////////////////////////////////////////////////////////
extern void GDrawSyncThread(GDisplay *gd, void (*func)(void *), void *data);
extern GWindow GPrinterStartJob(GDisplay *gdisp,void *user_data,GPrinterAttrs *attrs);
extern void GPrinterNextPage(GWindow w);
extern int GPrinterEndJob(GWindow w,int cancel);
extern void GDrawSetBuildCharHooks(void (*hook)(GDisplay *), void (*inshook)(GDisplay *,unichar_t));
extern int GDrawRequestDeviceEvents(GWindow w,int devcnt,struct gdeveventmask *de);
extern enum gcairo_flags GDrawHasCairo(GWindow w);
extern void GDrawPathStartNew(GWindow w);
extern void GDrawPathStartSubNew(GWindow w);
extern int GDrawFillRuleSetWinding(GWindow w);
extern void GDrawPathClose(GWindow w);
extern void GDrawPathMoveTo(GWindow w,double x, double y);
extern void GDrawPathLineTo(GWindow w,double x, double y);
extern void GDrawPathCurveTo(GWindow w,
double cx1, double cy1,
double cx2, double cy2,
double x, double y);
extern void GDrawPathStroke(GWindow w,Color col);
extern void GDrawPathFill(GWindow w,Color col);
extern void GDrawPathFillAndStroke(GWindow w,Color fillcol, Color strokecol);
extern void GDrawEnableCairo(int on);
extern void GDrawLayoutInit(GWindow w, char *text, int cnt, GFont *fi);
extern void GDrawLayoutDraw(GWindow w, int32 x, int32 y, Color fg);
extern void GDrawLayoutIndexToPos(GWindow w, int index, GRect *pos);
extern int GDrawLayoutXYToIndex(GWindow w, int x, int y);
extern void GDrawLayoutExtents(GWindow w, GRect *size);
extern void GDrawLayoutSetWidth(GWindow w, int width);
extern int GDrawLayoutLineCount(GWindow w);
extern int GDrawLayoutLineStart(GWindow w,int line);
extern void GDrawFatalError(const char *fmt,...);
extern void GDrawIError(const char *fmt,...);
extern void GDrawError(const char *fmt,...);
extern int GImageGetScaledWidth(GWindow gw, GImage *img);
extern int GImageGetScaledHeight(GWindow gw, GImage *img);
extern void GDrawAddReadFD( GDisplay *disp,
int fd, void* udata,
void (*callback)(int fd, void* udata ));
extern void GDrawRemoveReadFD( GDisplay *disp,
int fd, void* udata );
/**
* The Mac OSX build doesn't use the same core event loop as the
* Linux/X build. So inside the timer we can use this to double check
* if any fds that we should monitor for input have changed and if so
* service their messages.
*/
extern void MacServiceReadFDs(void);
#endif /* FONTFORGE_GDRAW_H */
|
1050_5
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* Copyright (C) 2000-2012 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FONTFORGE_GGADGET_H
#define FONTFORGE_GGADGET_H
#include "gdraw.h"
#include "intl.h"
struct giocontrol;
#ifndef MAX
#define MAX(x,y) (((x) > (y)) ? (x) : (y))
#endif
#ifndef MIN
#define MIN(x,y) (((x) < (y)) ? (x) : (y))
#endif
typedef struct gtextinfo {
unichar_t *text;
GImage *image;
Color fg;
Color bg;
void *userdata;
GFont *font;
unsigned int disabled: 1;
unsigned int image_precedes: 1;
unsigned int checkable: 1; /* Only for menus */
unsigned int checked: 1; /* Only for menus */
unsigned int selected: 1; /* Only for lists (used internally for menu(bar)s, when cursor is on the line) */
unsigned int line: 1; /* Only for menus */
unsigned int text_is_1byte: 1; /* If passed in as 1byte (ie. iso-8859-1) text, will be converted */
unsigned int text_in_resource: 1; /* the text field is actually an index into the string resource table */
unsigned int changed: 1; /* If a row/column widget changed this */
unichar_t mnemonic; /* Only for menus and menubars */
/* should really be in menuitem, but that wastes space and complicates GTextInfoDraw */
char* text_untranslated; /* used to simplify hotkey lookup for menus. */
/*
* text_untranslated is either the GMenuItem2 shortcut or the GTextInfo prior
* to translation occurring. The shortcut text is considered first
* to allow the code to make the value explicit. This is useful in
* cases where the menu text to be translated (GTextInfo.text
* prior to calling sgettext() on it) is specially designed for
* translation, like File|New is. Having the hotkey of "New|No
* Shortcut" will give a text_untranslated of "New|No Shortcut".
* See HKTextInfoToUntranslatedText() for stripping out any
* potential underscore and the trailing "|Rest" string.
*
* Using a pointer like this relies on the GMenuItems used to make
* the menus are a static structure that outlasts the
* menu/gtextinfo itself.
**/
} GTextInfo;
#define GTEXTINFO_EMPTY { NULL, NULL, 0x000000, 0x000000, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, '\0' }
typedef struct gtextinfo2 {
unichar_t *text;
GImage *image;
Color fg;
Color bg;
void *userdata;
GFont *font;
unsigned int disabled: 1;
unsigned int image_precedes: 1;
unsigned int checkable: 1; /* Only for menus */
unsigned int checked: 1; /* Only for menus */
unsigned int selected: 1; /* Only for lists (used internally for menu(bar)s, when cursor is on the line) */
unsigned int line: 1; /* Only for menus */
unsigned int text_is_1byte: 1; /* If passed in as 1byte (ie. iso-8859-1) text, will be converted */
unsigned int text_in_resource: 1; /* the text field is actually an index into the string resource table */
unsigned int changed: 1; /* If a row/column widget changed this */
unsigned int sort_me_first_in_list: 1; /* used for directories in file chooser widgets */
unichar_t mnemonic; /* Only for menus and menubars */
/* should really be in menuitem, but that wastes space and complicates GTextInfoDraw */
} GTextInfo2;
#define GTEXTINFO2_EMPTY { NULL, NULL, 0x000000, 0x000000, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '\0' }
typedef struct gmenuitem {
GTextInfo ti;
unichar_t shortcut;
short short_mask;
struct gmenuitem *sub;
void (*moveto)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called before creating submenu */
void (*invoke)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called on mouse release */
int mid;
} GMenuItem;
#define GMENUITEM_LINE { { NULL, NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 1, 0, 0, 0, 1, 0, 0, 0, '\0' }, '\0', 0, NULL, NULL, NULL, 0 }
#define GMENUITEM_EMPTY { GTEXTINFO_EMPTY, '\0', 0, NULL, NULL, NULL, 0 }
typedef struct gmenuitem2 {
GTextInfo ti;
char *shortcut;
struct gmenuitem2 *sub;
void (*moveto)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called before creating submenu */
void (*invoke)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called on mouse release */
int mid;
} GMenuItem2;
#define GMENUITEM2_LINE { { NULL, NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 1, 0, 0, 0, 1, 0, 0, 0, '\0' }, NULL, NULL, NULL, NULL, 0 }
#define GMENUITEM2_EMPTY { GTEXTINFO_EMPTY, NULL, NULL, NULL, NULL, 0 }
typedef struct tabinfo {
unichar_t *text;
struct ggadgetcreatedata *gcd;
unsigned int disabled: 1;
unsigned int selected: 1;
unsigned int text_is_1byte: 1; /* If passed in as 1byte (ie. iso-8859-1) text, will be converted */
unsigned int text_in_resource: 1; /* the text field is actually an index into the string resource table */
unsigned char nesting;
} GTabInfo;
#define GTABINFO_EMPTY { NULL, NULL, 0, 0, 0, 0, 0 }
enum border_type { bt_none, bt_box, bt_raised, bt_lowered, bt_engraved,
bt_embossed, bt_double };
enum border_shape { bs_rect, bs_roundrect, bs_elipse, bs_diamond };
enum box_flags {
box_foreground_border_inner = 1, /* 1 point line */
box_foreground_border_outer = 2, /* 1 point line */
box_active_border_inner = 4, /* 1 point line */
box_foreground_shadow_outer = 8, /* 1 point line, bottom&right */
box_do_depressed_background = 0x10,
box_draw_default = 0x20, /* if a default button draw a depressed rect around button */
box_generate_colors = 0x40, /* use border_brightest to compute other border cols */
box_gradient_bg = 0x80
};
typedef struct gbox {
unsigned char border_type;
unsigned char border_shape;
unsigned char border_width; /* In points */
unsigned char padding; /* In points */
unsigned char rr_radius; /* In points */
unsigned char flags;
Color border_brightest; /* used for left upper part of elipse */
Color border_brighter;
Color border_darkest; /* used for right lower part of elipse */
Color border_darker;
Color main_background;
Color main_foreground;
Color disabled_background;
Color disabled_foreground;
Color active_border;
Color depressed_background;
Color gradient_bg_end;
Color border_inner;
Color border_outer;
} GBox;
#define GBOX_EMPTY { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0 ,0 }
typedef struct ggadget GGadget;
typedef struct ggadget *GGadgetSet;
enum sb_type { sb_upline, sb_downline, sb_uppage, sb_downpage, sb_track, sb_trackrelease };
struct scrollbarinit { int32 sb_min, sb_max, sb_pagesize, sb_pos; };
typedef int (*GGadgetHandler)(GGadget *,GEvent *);
typedef unichar_t **(*GTextCompletionHandler)(GGadget *,int from_tab);
enum gg_flags { gg_visible=1, gg_enabled=2, gg_pos_in_pixels=4,
gg_sb_vert=8, gg_line_vert=gg_sb_vert,
gg_but_default=0x10, gg_but_cancel=0x20,
gg_cb_on=0x40, gg_rad_startnew=0x80,
gg_rad_continueold=0x100, /* even if not previous */
gg_list_alphabetic=0x100, gg_list_multiplesel=0x200,
gg_list_exactlyone=0x400, gg_list_internal=0x800,
gg_group_prevlabel=0x1000, gg_group_end=0x2000,
gg_textarea_wrap=0x4000,
gg_tabset_scroll=0x8000, gg_tabset_filllines=0x10000, gg_tabset_fill1line = 0x20000,
gg_tabset_nowindow=gg_textarea_wrap,
gg_rowcol_alphabetic=gg_list_alphabetic,
gg_rowcol_vrules=0x40000, gg_rowcol_hrules=0x800000,
gg_rowcol_displayonly=0x1000000,
gg_dontcopybox=0x10000000,
gg_pos_use0=0x20000000, gg_pos_under=0x40000000,
gg_pos_newline = (int) 0x80000000,
gg_skip_hotkey_processing = (int) 0x100000000,
/* Reuse some flag values for different widgets */
gg_file_pulldown=gg_sb_vert, gg_file_multiple = gg_list_multiplesel,
gg_text_xim = gg_tabset_scroll,
gg_tabset_vert = gg_sb_vert,
gg_utf8_popup = gg_rowcol_displayonly
};
typedef struct ggadgetdata {
GRect pos;
GBox *box;
unichar_t mnemonic;
unichar_t shortcut;
uint8 short_mask;
uint8 cols; /* for rowcol */
short cid;
GTextInfo *label; /* Overloaded with a GGadgetCreateData * for hvboxes (their label is a gadget) */
union {
GTextInfo *list; /* for List Widgets (and ListButtons, RowCols etc) */
GTabInfo *tabs; /* for Tab Widgets */
GMenuItem *menu; /* for menus */
GMenuItem2 *menu2; /* for menus (alternate) */
struct ggadgetcreatedata **boxelements; /* An array of things to go in the box */
struct matrixinit *matrix;
GDrawEH drawable_e_h; /* Drawable event handler */
GTextCompletionHandler completion;
struct scrollbarinit *sbinit;
Color col;
int radiogroup;
} u;
enum gg_flags flags;
const unichar_t *popup_msg; /* Brief help message */
GGadgetHandler handle_controlevent;
} GGadgetData;
#define GGADGETDATA_EMPTY { GRECT_EMPTY, NULL, '\0', '\0', 0, 0, 0, NULL, { NULL }, 0, NULL, NULL }
typedef struct ggadgetcreatedata {
GGadget *(*creator)(struct gwindow *base, GGadgetData *gd,void *data);
GGadgetData gd;
void *data;
GGadget *ret;
} GGadgetCreateData;
#define GGADGETCREATEDATA_EMPTY { NULL, GGADGETDATA_EMPTY, NULL, NULL }
#define GCD_Glue ((GGadgetCreateData *) -1) /* Special entries */
#define GCD_ColSpan ((GGadgetCreateData *) -2) /* for box elements */
#define GCD_RowSpan ((GGadgetCreateData *) -3)
#define GCD_HPad10 ((GGadgetCreateData *) -4)
enum ghvbox_expand { gb_expandglue=-4, gb_expandgluesame=-3, gb_samesize=-2,
gb_expandall=-1 };
enum editor_commands { ec_cut, ec_clear, ec_copy, ec_paste, ec_undo, ec_redo,
ec_selectall, ec_search, ec_backsearch, ec_backword, ec_deleteword,
ec_max };
/* return values from file chooser filter functions */
enum fchooserret { fc_hide, fc_show, fc_showdisabled };
enum me_type { me_int, me_enum, me_real, me_string, me_bigstr, me_func,
me_funcedit,
me_stringchoice, me_stringchoicetrans, me_stringchoicetag,
me_button,
me_hex, me_uhex, me_addr, me_onlyfuncedit };
struct col_init {
enum me_type me_type;
char *(*func)(GGadget *,int r,int c);
GTextInfo *enum_vals;
void (*enable_enum)(GGadget *,GMenuItem *, int r, int c);
char *title;
};
struct matrix_data {
union {
intpt md_ival;
double md_real;
char *md_str;
void *md_addr;
} u;
uint8 frozen;
uint8 user_bits;
uint8 current;
};
struct matrixinit {
int col_cnt;
struct col_init *col_init;
int initial_row_cnt;
struct matrix_data *matrix_data;
void (*initrow)(GGadget *g,int row);
int (*candelete)(GGadget *g,int row);
void (*finishedit)(GGadget *g,int r, int c, int wasnew);
void (*popupmenu)(GGadget *g,GEvent *e,int row,int col);
int (*handle_key)(GGadget *g,GEvent *e);
char *(*bigedittitle)(GGadget *g,int r, int c);
};
#define COL_INIT_EMPTY { 0, NULL, NULL, NULL, NULL }
#define MATRIX_DATA_EMPTY { { 0 }, 0, 0, 0 }
#define MATRIXINIT_EMPTY { 0, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
#define GME_NoChange 0x80000000
struct gdirentry;
typedef enum fchooserret (*GFileChooserFilterType)(GGadget *g,struct gdirentry *ent,
const unichar_t *dir);
typedef int (*GFileChooserInputFilenameFuncType)( GGadget *g,
const unichar_t ** currentFilename,
unichar_t* oldfilename );
/* Obsolete */
#define _STR_NULL (-1) /* Null string resource */
#define _STR_Language 0
#define _STR_OK 1
#define _STR_Cancel 2
#define _STR_Open 3
#define _STR_Save 4
#define _STR_Filter 5
#define _STR_New 6
#define _STR_Replace 7
#define _STR_Fileexists 8
#define _STR_Fileexistspre 9
#define _STR_Fileexistspost 10
#define _STR_Createdir 11
#define _STR_Dirname 12
#define _STR_Couldntcreatedir 13
#define _STR_SelectAll 14
#define _STR_None 15
#define __STR_LastStd 15
#define _NUM_Buttonsize 0
#define _NUM_ScaleFactor 1
#define __NUM_LastStd 1
extern void GTextInfoFree(GTextInfo *ti);
extern void GTextInfoListFree(GTextInfo *ti);
extern void GTextInfoArrayFree(GTextInfo **ti);
extern GTextInfo **GTextInfoFromChars(char **array, int len);
extern const unichar_t *GStringGetResource(int index,unichar_t *mnemonic);
extern int GGadgetScale(int xpos);
extern int GIntGetResource(int index);
extern int GStringSetResourceFileV(char *filename,uint32 checksum);
extern int GStringSetResourceFile(char *filename); /* returns 1 for success, 0 for failure */
/* fallback string arrays are null terminated. mnemonics is same length as string */
/* fallback integer arrays are terminated by 0x80000000 (negative infinity) */
extern void GStringSetFallbackArray(const unichar_t **array,const unichar_t *mn,
const int *ires);
unichar_t *GStringFileGetResource(char *filename, int index,unichar_t *mnemonic);
extern void GResourceUseGetText(void);
extern void *GResource_font_cvt(char *val, void *def);
extern FontInstance *GResourceFindFont(char *resourcename,FontInstance *deffont);
void GGadgetDestroy(GGadget *g);
void GGadgetSetVisible(GGadget *g,int visible);
int GGadgetIsVisible(GGadget *g);
void GGadgetSetEnabled(GGadget *g,int enabled);
int GGadgetIsEnabled(GGadget *g);
GWindow GGadgetGetWindow(GGadget *g);
void *GGadgetGetUserData(GGadget *g);
void GGadgetSetUserData(GGadget *g, void *d);
void GGadgetSetPopupMsg(GGadget *g, const unichar_t *msg);
int GGadgetContains(GGadget *g, int x, int y );
int GGadgetContainsEventLocation(GGadget *g, GEvent* e );
GRect *GGadgetGetInnerSize(GGadget *g,GRect *rct);
GRect *GGadgetGetSize(GGadget *g,GRect *rct);
void GGadgetSetSize(GGadget *g,GRect *rct);
void GGadgetGetDesiredVisibleSize(GGadget *g,GRect *outer, GRect *inner);
void GGadgetGetDesiredSize(GGadget *g,GRect *outer, GRect *inner);
void GGadgetSetDesiredSize(GGadget *g,GRect *outer, GRect *inner);
int GGadgetGetCid(GGadget *g);
void GGadgetResize(GGadget *g,int32 width, int32 height );
void GGadgetMove(GGadget *g,int32 x, int32 y );
void GGadgetMoveAddToY(GGadget *g, int32 yoffset );
int32 GGadgetGetX(GGadget *g);
int32 GGadgetGetY(GGadget *g);
void GGadgetSetY(GGadget *g, int32 y );
void GGadgetRedraw(GGadget *g);
void GGadgetsCreate(GWindow base, GGadgetCreateData *gcd);
int GGadgetFillsWindow(GGadget *g);
int GGadgetIsDefault(GGadget *g);
void GGadgetSetTitle(GGadget *g,const unichar_t *title);
void GGadgetSetTitle8(GGadget *g,const char *title);
void GGadgetSetTitle8WithMn(GGadget *g,const char *title);
const unichar_t *_GGadgetGetTitle(GGadget *g); /* Do not free!!! */
unichar_t *GGadgetGetTitle(GGadget *g); /* Free the return */
char *GGadgetGetTitle8(GGadget *g); /* Free the return (utf8) */
void GGadgetSetFont(GGadget *g,GFont *font);
GFont *GGadgetGetFont(GGadget *g);
int GGadgetEditCmd(GGadget *g,enum editor_commands cmd);
int GGadgetActiveGadgetEditCmd(GWindow gw,enum editor_commands cmd);
void GGadgetSetHandler(GGadget *g, GGadgetHandler handler);
GGadgetHandler GGadgetGetHandler(GGadget *g);
void GTextFieldSelect(GGadget *g,int sel_start, int sel_end);
void GTextFieldShow(GGadget *g,int pos);
void GTextFieldReplace(GGadget *g,const unichar_t *txt);
void GCompletionFieldSetCompletion(GGadget *g,GTextCompletionHandler completion);
void GCompletionFieldSetCompletionMode(GGadget *g,int enabled);
void GGadgetClearList(GGadget *g);
void GGadgetSetList(GGadget *g, GTextInfo **ti, int32 copyit);
GTextInfo **GGadgetGetList(GGadget *g,int32 *len); /* Do not free!!! */
GTextInfo *GGadgetGetListItem(GGadget *g,int32 pos);
GTextInfo *GGadgetGetListItemSelected(GGadget *g);
void GGadgetSelectListItem(GGadget *g,int32 pos,int32 sel);
void GGadgetSelectOneListItem(GGadget *g,int32 pos);
int32 GGadgetIsListItemSelected(GGadget *g,int32 pos);
int32 GGadgetGetFirstListSelectedItem(GGadget *g);
void GGadgetScrollListToPos(GGadget *g,int32 pos);
void GGadgetScrollListToText(GGadget *g,const unichar_t *lab,int32 sel);
void GGadgetSetListOrderer(GGadget *g,int (*orderer)(const void *, const void *));
void GColorButtonSetColor(GGadget *g, Color col);
Color GColorButtonGetColor(GGadget *g);
void GGadgetSetChecked(GGadget *g, int ison);
int GGadgetIsChecked(GGadget *g);
int GListIndexFromY(GGadget *g,int y);
void GListSetSBAlwaysVisible(GGadget *g,int always);
void GListSetPopupCallback(GGadget *g,void (*callback)(GGadget *,int));
int GTabSetGetSel(GGadget *g);
void GTabSetSetSel(GGadget *g,int sel);
void GTabSetSetEnabled(GGadget *g,int pos, int enabled);
GWindow GTabSetGetSubwindow(GGadget *g,int pos);
int GTabSetGetTabLines(GGadget *g);
void GTabSetSetNestedExpose(GGadget *g, void (*)(GWindow,GGadget *,GEvent *));
void GTabSetSetNestedMouse(GGadget *g, int (*)(GGadget *,GEvent *));
void GTabSetChangeTabName(GGadget *g, const char *name, int pos);
void GTabSetRemetric(GGadget *g);
void GTabSetRemoveTabByPos(GGadget *g, int pos);
void GTabSetRemoveTabByName(GGadget *g, char *name);
int32 GScrollBarGetPos(GGadget *g);
int32 GScrollBarSetPos(GGadget *g,int32 pos);
int32 GScrollBarAddToPos(GGadget *g,int32 offset);
void GScrollBarSetMustShow(GGadget *g, int32 sb_min, int32 sb_max, int32 sb_pagesize,
int32 sb_mustshow);
void GScrollBarSetBounds(GGadget *g, int32 sb_min, int32 sb_max, int32 sb_pagesize );
void GScrollBarGetBounds(GGadget *g, int32 *sb_min, int32 *sb_max, int32 *sb_pagesize );
void GMenuBarSetItemChecked(GGadget *g, int mid, int check);
void GMenuBarSetItemEnabled(GGadget *g, int mid, int enabled);
void GMenuBarSetItemName(GGadget *g, int mid, const unichar_t *name);
void GMenuSetShortcutDomain(char *domain);
const char *GMenuGetShortcutDomain(void);
int GMenuIsCommand(GEvent *event,char *shortcut);
int GMenuMask(void);
int GMenuAnyUnmaskedShortcuts(GGadget *mb1, GGadget *mb2);
void GFileChooserPopupCheck(GGadget *g,GEvent *e);
void GFileChooserFilterIt(GGadget *g);
void GFileChooserRefreshList(GGadget *g);
int GFileChooserFilterEh(GGadget *g,GEvent *e);
void GFileChooserConnectButtons(GGadget *g,GGadget *ok, GGadget *filter);
void GFileChooserSetFilterText(GGadget *g,const unichar_t *filter);
void GFileChooserSetFilterFunc(GGadget *g,GFileChooserFilterType filter);
void GFileChooserSetInputFilenameFunc(GGadget *g,GFileChooserInputFilenameFuncType filter);
int GFileChooserDefInputFilenameFunc( GGadget *g, const unichar_t** currentFilename, unichar_t* oldfilename );
GFileChooserInputFilenameFuncType GFileChooserGetInputFilenameFunc(GGadget *g);
void GFileChooserSetDir(GGadget *g,unichar_t *dir);
struct giocontrol *GFileChooserReplaceIO(GGadget *g,struct giocontrol *gc);
unichar_t *GFileChooserGetDir(GGadget *g);
unichar_t *GFileChooserGetFilterText(GGadget *g);
GFileChooserFilterType GFileChooserGetFilterFunc(GGadget *g);
void GFileChooserSetFilename(GGadget *g,const unichar_t *defaultfile);
void GFileChooserSetMimetypes(GGadget *g,unichar_t **mimetypes);
unichar_t **GFileChooserGetMimetypes(GGadget *g);
void GFileChooserGetChildren(GGadget *g,GGadget **pulldown, GGadget **list, GGadget **tf);
int GFileChooserPosIsDir(GGadget *g, int pos);
unichar_t *GFileChooserFileNameOfPos(GGadget *g, int pos);
void GFileChooserSetShowHidden(int sh);
int GFileChooserGetShowHidden(void);
void GFileChooserSetDirectoryPlacement(int dp);
int GFileChooserGetDirectoryPlacement(void);
void GFileChooserSetBookmarks(unichar_t **b);
void GFileChooserSetPaths(GGadget *g, const char* const* path);
unichar_t **GFileChooserGetBookmarks(void);
void GFileChooserSetPrefsChangedCallback(void *data, void (*p_c)(void *));
void GHVBoxSetExpandableCol(GGadget *g,int col);
void GHVBoxSetExpandableRow(GGadget *g,int row);
void GHVBoxSetPadding(GGadget *g,int hpad, int vpad);
void GHVBoxFitWindow(GGadget *g);
void GHVBoxFitWindowCentered(GGadget *g);
void GHVBoxReflow(GGadget *g);
void GMatrixEditSet(GGadget *g,struct matrix_data *data, int rows, int copy_it);
struct matrix_data *GMatrixEditGet(GGadget *g, int *rows);
struct matrix_data *_GMatrixEditGet(GGadget *g, int *rows);
GGadget *_GMatrixEditGetActiveTextField(GGadget *g);
int GMatrixEditGetColCnt(GGadget *g);
int GMatrixEditGetActiveRow(GGadget *g);
int GMatrixEditGetActiveCol(GGadget *g);
void GMatrixEditActivateRowCol(GGadget *g, int r, int c);
void GMatrixEditDeleteRow(GGadget *g,int row);
void GMatrixEditScrollToRowCol(GGadget *g,int r, int c);
int GMatrixEditStringDlg(GGadget *g,int row,int col);
void GMatrixEditSetNewText(GGadget *g, char *text);
void GMatrixEditSetOtherButtonEnable(GGadget *g, void (*sob)(GGadget *g, int r, int c));
void GMatrixEditSetMouseMoveReporter(GGadget *g, void (*rmm)(GGadget *g, int r, int c));
void GMatrixEditSetTextChangeReporter(GGadget *g, void (*tcr)(GGadget *g, int r, int c, GGadget *text));
void GMatrixEditSetValidateStr(GGadget *g, char *(*validate)(GGadget *g, int r, int c, int wasnew, char *str));
void GMatrixEditSetBeforeDelete(GGadget *g, void (*predelete)(GGadget *g, int r));
void GMatrixEditSetRowMotionCallback(GGadget *g, void (*rowmotion)(GGadget *g, int oldr, int newr));
void GMatrixEditUp(GGadget *g);
void GMatrixEditDown(GGadget *g);
enum gme_updown { ud_up_enabled=1, ud_down_enabled=2 };
void GMatrixEditSetCanUpDown(GGadget *g, enum gme_updown (*canupdown)(GGadget *g, int r));
void GMatrixEditSetUpDownVisible(GGadget *g, int visible);
void GMatrixEditAddButtons(GGadget *g, GGadgetCreateData *gcd);
void GMatrixEditEnableColumn(GGadget *g, int col, int enabled);
void GMatrixEditShowColumn(GGadget *g, int col, int visible);
void GMatrixEditSetColumnChoices(GGadget *g, int col, GTextInfo *ti);
GMenuItem *GMatrixEditGetColumnChoices(GGadget *g, int col);
void GMatrixEditSetColumnCompletion(GGadget *g, int col, GTextCompletionHandler completion);
void GMatrixEditSetEditable(GGadget *g, int editable);
GWindow GDrawableGetWindow(GGadget *g);
extern void GGadgetPreparePopupImage(GWindow base,const unichar_t *msg,
const void *data,
GImage *(*get_image)(const void *data),
void (*free_image)(const void *data,GImage *img));
extern void GGadgetPreparePopup(GWindow base,const unichar_t *msg);
extern void GGadgetPreparePopupR(GWindow base,int msg);
extern void GGadgetPreparePopup8(GWindow base, const char *msg);
extern void GGadgetEndPopup(void);
extern void GGadgetPopupExternalEvent(GEvent *e);
extern int GGadgetDispatchEvent(GGadget *g,GEvent *e);
extern void GGadgetTakesKeyboard(GGadget *g, int takes_keyboard);
/* Handles *?{}[] wildcards */
int GGadgetWildMatch(unichar_t *pattern, unichar_t *name,int ignorecase);
enum fchooserret GFileChooserDefFilter(GGadget *g,struct gdirentry *ent,
const unichar_t *dir);
GWindow GMenuCreatePopupMenu(GWindow owner,GEvent *event, GMenuItem *mi);
GWindow GMenuCreatePopupMenuWithName(GWindow owner,GEvent *event, char* subMenuName,GMenuItem *mi);
GWindow _GMenuCreatePopupMenu(GWindow owner,GEvent *event, GMenuItem *mi,
void (*donecallback)(GWindow owner));
GWindow _GMenuCreatePopupMenuWithName(GWindow owner,GEvent *event, GMenuItem *mi,
char* subMenuName,
void (*donecallback)(GWindow owner));
GGadget *GLineCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GGroupCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GSpacerCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GLabelCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GImageButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GListButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GColorButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GRadioCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GCheckBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GVisibilityBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GScrollBarCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GListCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTextFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GPasswordCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GNumericFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTextCompletionCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTextAreaCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GListFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GSimpleListFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GMenuBarCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GMenu2BarCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTabSetCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GFileChooserCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GHBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GVBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GHVBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GHVGroupCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GMatrixEditCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GDrawableCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *CreateSlider(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *CreateFileChooser(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *CreateGadgets(struct gwindow *base, GGadgetCreateData *gcd);
GTextInfo **GTextInfoArrayFromList(GTextInfo *ti, uint16 *cnt);
typedef struct gresimage {
GImage *image;
char *filename;
} GResImage;
GResImage *GGadgetResourceFindImage(char *name, GImage *def);
void InitImageCache();
void ClearImageCache();
void GGadgetSetImageDir(char *dir);
void GGadgetSetImagePath(char *path);
GImage *GGadgetImageCache(const char *filename);
int TryGGadgetImageCache(GImage *image, const char *name);
extern unichar_t *utf82u_mncopy(const char *utf8buf,unichar_t *mn);
extern double GetCalmReal8(GWindow gw,int cid,char *namer,int *err);
extern double GetReal8(GWindow gw,int cid,char *namer,int *err);
extern int GetCalmInt8(GWindow gw,int cid,char *name,int *err);
extern int GetInt8(GWindow gw,int cid,char *namer,int *err);
extern int GetUnicodeChar8(GWindow gw,int cid,char *namer,int *err);
extern void GGadgetProtest8(char *labelr);
extern void GMenuItemParseShortCut(GMenuItem *mi,char *shortcut);
extern int GMenuItemParseMask(char *shortcut);
extern int GGadgetUndoMacEnglishOptionCombinations(GEvent *event);
/* Among other things, this routine sets global icon cache up. */
extern void GGadgetInit(void);
extern int GGadgetWithin(GGadget *g, int x, int y);
extern void GMenuItemArrayFree(GMenuItem *mi);
extern void GMenuItem2ArrayFree(GMenuItem2 *mi);
extern GMenuItem *GMenuItemArrayCopy(GMenuItem *mi, uint16 *cnt);
extern GMenuItem *GMenuItem2ArrayCopy(GMenuItem2 *mi, uint16 *cnt);
extern void GVisibilityBoxSetToMinWH(GGadget *g);
extern void GGadgetSetSkipHotkeyProcessing( GGadget *g, int v );
extern int GGadgetGetSkipHotkeyProcessing( GGadget *g );
extern void GGadgetSetSkipUnQualifiedHotkeyProcessing( GGadget *g, int v );
extern int GGadgetGetSkipUnQualifiedHotkeyProcessing( GGadget *g );
#endif /* FONTFORGE_GGADGET_H */
|
/* Copyright (C) 2000-2012 by George Williams */
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
* EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef FONTFORGE_GGADGET_H
#define FONTFORGE_GGADGET_H
#include "gdraw.h"
#include "intl.h"
struct giocontrol;
#ifndef MAX
#define MAX(x,y) (((x) > (y)) ? (x) : (y))
#endif
#ifndef MIN
#define MIN(x,y) (((x) < (y)) ? (x) : (y))
#endif
typedef struct gtextinfo {
unichar_t *text;
GImage *image;
Color fg;
Color bg;
void *userdata;
GFont *font;
unsigned int disabled: 1;
unsigned int image_precedes: 1;
unsigned int checkable: 1; /* Only for menus */
unsigned int checked: 1; /* Only for menus */
unsigned int selected: 1; /* Only for lists (used internally for menu(bar)s, when cursor is on the line) */
unsigned int line: 1; /* Only for menus */
unsigned int text_is_1byte: 1; /* If passed in as 1byte (ie. iso-8859-1) text, will be converted */
unsigned int text_in_resource: 1; /* the text field is actually an index into the string resource table */
unsigned int changed: 1; /* If a row/column widget changed this */
unichar_t mnemonic; /* Only for menus and menubars */
/* should really be in menuitem, but that wastes space and complicates GTextInfoDraw */
char* text_untranslated; /* used to simplify hotkey lookup for menus. */
/*
* text_untranslated is either the GMenuItem2 shortcut or the GTextInfo prior
* to translation occurring. The shortcut text is considered first
* to allow the code to make the value explicit. This is useful in
* cases where the menu text to be translated (GTextInfo.text
* prior to calling sgettext() on it) is specially designed for
* translation, like File|New is. Having the hotkey of "New|No
* Shortcut" will give a text_untranslated of "New|No Shortcut".
* See HKTextInfoToUntranslatedText() for stripping out any
* potential underscore and the trailing "|Rest" string.
*
* Using a pointer like this relies on the GMenuItems used to make
* the menus are a static structure that outlasts the
* menu/gtextinfo itself.
**/
} GTextInfo;
#define GTEXTINFO_EMPTY { NULL, NULL, 0x000000, 0x000000, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, '\0' }
typedef struct gtextinfo2 {
unichar_t *text;
GImage *image;
Color fg;
Color bg;
void *userdata;
GFont *font;
unsigned int disabled: 1;
unsigned int image_precedes: 1;
unsigned int checkable: 1; /* Only for menus */
unsigned int checked: 1; /* Only for menus */
unsigned int selected: 1; /* Only for lists (used internally for menu(bar)s, when cursor is on the line) */
unsigned int line: 1; /* Only for menus */
unsigned int text_is_1byte: 1; /* If passed in as 1byte (ie. iso-8859-1) text, will be converted */
unsigned int text_in_resource: 1; /* the text field is actually an index into the string resource table */
unsigned int changed: 1; /* If a row/column widget changed this */
unsigned int sort_me_first_in_list: 1; /* used for directories in file chooser widgets */
unichar_t mnemonic; /* Only for menus and menubars */
/* should really be in menuitem, but that wastes space and complicates GTextInfoDraw */
} GTextInfo2;
#define GTEXTINFO2_EMPTY { NULL, NULL, 0x000000, 0x000000, NULL, NULL, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, '\0' }
typedef struct gmenuitem {
GTextInfo ti;
unichar_t shortcut;
short short_mask;
struct gmenuitem *sub;
void (*moveto)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called before creating submenu */
void (*invoke)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called on mouse release */
int mid;
} GMenuItem;
#define GMENUITEM_LINE { { NULL, NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 1, 0, 0, 0, 1, 0, 0, 0, '\0' }, '\0', 0, NULL, NULL, NULL, 0 }
#define GMENUITEM_EMPTY { GTEXTINFO_EMPTY, '\0', 0, NULL, NULL, NULL, 0 }
typedef struct gmenuitem2 {
GTextInfo ti;
char *shortcut;
struct gmenuitem2 *sub;
void (*moveto)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called before creating submenu */
void (*invoke)(struct gwindow *base,struct gmenuitem *mi,GEvent *); /* called on mouse release */
int mid;
} GMenuItem2;
#define GMENUITEM2_LINE { { NULL, NULL, COLOR_DEFAULT, COLOR_DEFAULT, NULL, NULL, 0, 1, 0, 0, 0, 1, 0, 0, 0, '\0' }, NULL, NULL, NULL, NULL, 0 }
#define GMENUITEM2_EMPTY { GTEXTINFO_EMPTY, NULL, NULL, NULL, NULL, 0 }
typedef struct tabinfo {
unichar_t *text;
struct ggadgetcreatedata *gcd;
unsigned int disabled: 1;
unsigned int selected: 1;
unsigned int text_is_1byte: 1; /* If passed in as 1byte (ie. iso-8859-1) text, will be converted */
unsigned int text_in_resource: 1; /* the text field is actually an index into the string resource table */
unsigned char nesting;
} GTabInfo;
#define GTABINFO_EMPTY { NULL, NULL, 0, 0, 0, 0, 0 }
enum border_type { bt_none, bt_box, bt_raised, bt_lowered, bt_engraved,
bt_embossed, bt_double };
enum border_shape { bs_rect, bs_roundrect, bs_elipse, bs_diamond };
enum box_flags {
box_foreground_border_inner = 1, /* 1 point line */
box_foreground_border_outer = 2, /* 1 point line */
box_active_border_inner = 4, /* 1 point line */
box_foreground_shadow_outer = 8, /* 1 point line, bottom&right */
box_do_depressed_background = 0x10,
box_draw_default = 0x20, /* if a default button draw a depressed rect around button */
box_generate_colors = 0x40, /* use border_brightest to compute other border cols */
box_gradient_bg = 0x80
};
typedef struct gbox {
unsigned char border_type;
unsigned char border_shape;
unsigned char border_width; /* In points */
unsigned char padding; /* In points */
unsigned char rr_radius; /* In points */
unsigned char flags;
Color border_brightest; /* used for left upper part of elipse */
Color border_brighter;
Color border_darkest; /* used for right lower part of elipse */
Color border_darker;
Color main_background;
Color main_foreground;
Color disabled_background;
Color disabled_foreground;
Color active_border;
Color depressed_background;
Color gradient_bg_end;
Color border_inner;
Color border_outer;
} GBox;
#define GBOX_EMPTY { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0 ,0 ,0 }
typedef struct ggadget GGadget;
typedef struct ggadget *GGadgetSet;
enum sb_type { sb_upline, sb_downline, sb_uppage, sb_downpage, sb_track, sb_trackrelease };
struct scrollbarinit { int32 sb_min, sb_max, sb_pagesize, sb_pos; };
typedef int (*GGadgetHandler)(GGadget *,GEvent *);
typedef unichar_t **(*GTextCompletionHandler)(GGadget *,int from_tab);
enum gg_flags { gg_visible=1, gg_enabled=2, gg_pos_in_pixels=4,
gg_sb_vert=8, gg_line_vert=gg_sb_vert,
gg_but_default=0x10, gg_but_cancel=0x20,
gg_cb_on=0x40, gg_rad_startnew=0x80,
gg_rad_continueold=0x100, /* even if not previous */
gg_list_alphabetic=0x100, gg_list_multiplesel=0x200,
gg_list_exactlyone=0x400, gg_list_internal=0x800,
gg_group_prevlabel=0x1000, gg_group_end=0x2000,
gg_textarea_wrap=0x4000,
gg_tabset_scroll=0x8000, gg_tabset_filllines=0x10000, gg_tabset_fill1line = 0x20000,
gg_tabset_nowindow=gg_textarea_wrap,
gg_rowcol_alphabetic=gg_list_alphabetic,
gg_rowcol_vrules=0x40000, gg_rowcol_hrules=0x800000,
gg_rowcol_displayonly=0x1000000,
gg_dontcopybox=0x10000000,
gg_pos_use0=0x20000000, gg_pos_under=0x40000000,
gg_pos_newline = (int) 0x80000000,
gg_skip_hotkey_processing = (int) 0x100000000,
/* Reuse some flag values for different widgets */
gg_file_pulldown=gg_sb_vert, gg_file_multiple = gg_list_multiplesel,
gg_text_xim = gg_tabset_scroll,
gg_tabset_vert = gg_sb_vert,
gg_utf8_popup = gg_rowcol_displayonly
};
typedef struct ggadgetdata {
GRect pos;
GBox *box;
unichar_t mnemonic;
unichar_t shortcut;
uint8 short_mask;
uint8 cols; /* for rowcol */
short cid;
GTextInfo *label; /* Overloaded with a GGadgetCreateData * for hvboxes (their label is a gadget) */
union {
GTextInfo *list; /* for List Widgets (and ListButtons, RowCols etc) */
GTabInfo *tabs; /* for Tab Widgets */
GMenuItem *menu; /* for menus */
GMenuItem2 *menu2; /* for menus (alternate) */
struct ggadgetcreatedata **boxelements; /* An array of things to go in the box */
struct matrixinit *matrix;
GDrawEH drawable_e_h; /* Drawable event handler */
GTextCompletionHandler completion;
struct scrollbarinit *sbinit;
Color col;
int radiogroup;
} u;
enum gg_flags flags;
const unichar_t *popup_msg; /* Brief help message */
GGadgetHandler handle_controlevent;
} GGadgetData;
#define GGADGETDATA_EMPTY { GRECT_EMPTY, NULL, '\0', '\0', 0, 0, 0, NULL, { NULL }, 0, NULL, NULL }
typedef struct ggadgetcreatedata {
GGadget *(*creator)(struct gwindow *base, GGadgetData *gd,void *data);
GGadgetData gd;
void *data;
GGadget *ret;
} GGadgetCreateData;
#define GGADGETCREATEDATA_EMPTY { NULL, GGADGETDATA_EMPTY, NULL, NULL }
#define GCD_Glue ((GGadgetCreateData *) -1) /* Special entries */
#define GCD_ColSpan ((GGadgetCreateData *) -2) /* for box elements */
#define GCD_RowSpan ((GGadgetCreateData *) -3)
#define GCD_HPad10 ((GGadgetCreateData *) -4)
enum ghvbox_expand { gb_expandglue=-4, gb_expandgluesame=-3, gb_samesize=-2,
gb_expandall=-1 };
enum editor_commands { ec_cut, ec_clear, ec_copy, ec_paste, ec_undo, ec_redo,
ec_selectall, ec_search, ec_backsearch, ec_backword, ec_deleteword,
ec_max };
/* return values from file chooser filter functions */
enum fchooserret { fc_hide, fc_show, fc_showdisabled };
enum me_type { me_int, me_enum, me_real, me_string, me_bigstr, me_func,
me_funcedit,
me_stringchoice, me_stringchoicetrans, me_stringchoicetag,
me_button,
me_hex, me_uhex, me_addr, me_onlyfuncedit };
struct col_init {
enum me_type me_type;
char *(*func)(GGadget *,int r,int c);
GTextInfo *enum_vals;
void (*enable_enum)(GGadget *,GMenuItem *, int r, int c);
char *title;
};
struct matrix_data {
union {
intpt md_ival;
double md_real;
char *md_str;
void *md_addr;
} u;
uint8 frozen;
uint8 user_bits;
uint8 current;
};
struct matrixinit {
int col_cnt;
struct col_init *col_init;
int initial_row_cnt;
struct matrix_data *matrix_data;
void (*initrow)(GGadget *g,int row);
int (*candelete)(GGadget *g,int row);
void (*finishedit)(GGadget *g,int r, int c, int wasnew);
void (*popupmenu)(GGadget *g,GEvent *e,int row,int col);
int (*handle_key)(GGadget *g,GEvent *e);
char *(*bigedittitle)(GGadget *g,int r, int c);
};
#define COL_INIT_EMPTY { 0, NULL, NULL, NULL, NULL }
#define MATRIX_DATA_EMPTY { { 0 }, 0, 0, 0 }
#define MATRIXINIT_EMPTY { 0, NULL, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL }
#define GME_NoChange 0x80000000
struct gdirentry;
typedef enum fchooserret (*GFileChooserFilterType)(GGadget *g,struct gdirentry *ent,
const unichar_t *dir);
typedef int (*GFileChooserInputFilenameFuncType)( GGadget *g,
const unichar_t ** currentFilename,
unichar_t* oldfilename );
/* Obsolete */
#define _STR_NULL (-1) /* Null string resource */
#define _STR_Language 0
#define _STR_OK 1
#define _STR_Cancel 2
#define _STR_Open 3
#define _STR_Save 4
#define _STR_Filter 5
#define _STR_New 6
#define _STR_Replace 7
#define _STR_Fileexists 8
#define _STR_Fileexistspre 9
#define _STR_Fileexistspost 10
#define _STR_Createdir 11
#define _STR_Dirname 12
#define _STR_Couldntcreatedir 13
#define _STR_SelectAll 14
#define _STR_None 15
#define __STR_LastStd 15
#define _NUM_Buttonsize 0
#define _NUM_ScaleFactor 1
#define __NUM_LastStd 1
extern void GTextInfoFree(GTextInfo *ti);
extern void GTextInfoListFree(GTextInfo *ti);
extern void GTextInfoArrayFree(GTextInfo **ti);
extern GTextInfo **GTextInfoFromChars(char **array, int len);
extern const unichar_t *GStringGetResource(int index,unichar_t *mnemonic);
extern int GGadgetScale(int xpos);
extern int GIntGetResource(int index);
extern int GStringSetResourceFileV(char *filename,uint32 checksum);
extern int GStringSetResourceFile(char *filename); /* returns 1 for success, 0 for failure */
/* fallback string arrays are null terminated. mnemonics is same length as string */
/* fallback integer arrays are terminated by 0x80000000 (negative infinity) */
extern void GStringSetFallbackArray(const unichar_t **array,const unichar_t *mn,
const int *ires);
unichar_t *GStringFileGetResource(char *filename, int index,unichar_t *mnemonic);
extern void GResourceUseGetText(void);
extern void *GResource_font_cvt(char *val, void *def);
extern FontInstance *GResourceFindFont(char *resourcename,FontInstance *deffont);
void GGadgetDestroy(GGadget *g);
void GGadgetSetVisible(GGadget *g,int visible);
int GGadgetIsVisible(GGadget *g);
void GGadgetSetEnabled(GGadget *g,int enabled);
int GGadgetIsEnabled(GGadget *g);
GWindow GGadgetGetWindow(GGadget *g);
void *GGadgetGetUserData(GGadget *g);
void GGadgetSetUserData(GGadget *g, void *d);
void GGadgetSetPopupMsg(GGadget *g, const unichar_t *msg);
int GGadgetContains(GGadget *g, int x, int y );
int GGadgetContainsEventLocation(GGadget *g, GEvent* e );
GRect *GGadgetGetInnerSize(GGadget *g,GRect *rct);
GRect *GGadgetGetSize(GGadget *g,GRect *rct);
void GGadgetSetSize(GGadget *g,GRect *rct);
void GGadgetGetDesiredVisibleSize(GGadget *g,GRect *outer, GRect *inner);
void GGadgetGetDesiredSize(GGadget *g,GRect *outer, GRect *inner);
void GGadgetSetDesiredSize(GGadget *g,GRect *outer, GRect *inner);
int GGadgetGetCid(GGadget *g);
void GGadgetResize(GGadget *g,int32 width, int32 height );
void GGadgetMove(GGadget *g,int32 x, int32 y );
void GGadgetMoveAddToY(GGadget *g, int32 yoffset );
int32 GGadgetGetX(GGadget *g);
int32 GGadgetGetY(GGadget *g);
void GGadgetSetY(GGadget *g, int32 y );
void GGadgetRedraw(GGadget *g);
void GGadgetsCreate(GWindow base, GGadgetCreateData *gcd);
int GGadgetFillsWindow(GGadget *g);
int GGadgetIsDefault(GGadget *g);
void GGadgetSetTitle(GGadget *g,const unichar_t *title);
void GGadgetSetTitle8(GGadget *g,const char *title);
void GGadgetSetTitle8WithMn(GGadget *g,const char *title);
const unichar_t *_GGadgetGetTitle(GGadget *g); /* Do not free!!! */
unichar_t *GGadgetGetTitle(GGadget *g); /* Free the return */
char *GGadgetGetTitle8(GGadget *g); /* Free the return (utf8) */
void GGadgetSetFont(GGadget *g,GFont *font);
GFont *GGadgetGetFont(GGadget *g);
int GGadgetEditCmd(GGadget *g,enum editor_commands cmd);
int GGadgetActiveGadgetEditCmd(GWindow gw,enum editor_commands cmd);
void GGadgetSetHandler(GGadget *g, GGadgetHandler handler);
GGadgetHandler GGadgetGetHandler(GGadget *g);
void GTextFieldSelect(GGadget *g,int sel_start, int sel_end);
void GTextFieldShow(GGadget *g,int pos);
void GTextFieldReplace(GGadget *g,const unichar_t *txt);
bool GTextFieldIsEmpty(GGadget *g);
void GCompletionFieldSetCompletion(GGadget *g,GTextCompletionHandler completion);
void GCompletionFieldSetCompletionMode(GGadget *g,int enabled);
void GGadgetClearList(GGadget *g);
void GGadgetSetList(GGadget *g, GTextInfo **ti, int32 copyit);
GTextInfo **GGadgetGetList(GGadget *g,int32 *len); /* Do not free!!! */
GTextInfo *GGadgetGetListItem(GGadget *g,int32 pos);
GTextInfo *GGadgetGetListItemSelected(GGadget *g);
void GGadgetSelectListItem(GGadget *g,int32 pos,int32 sel);
void GGadgetSelectOneListItem(GGadget *g,int32 pos);
int32 GGadgetIsListItemSelected(GGadget *g,int32 pos);
int32 GGadgetGetFirstListSelectedItem(GGadget *g);
void GGadgetScrollListToPos(GGadget *g,int32 pos);
void GGadgetScrollListToText(GGadget *g,const unichar_t *lab,int32 sel);
void GGadgetSetListOrderer(GGadget *g,int (*orderer)(const void *, const void *));
void GColorButtonSetColor(GGadget *g, Color col);
Color GColorButtonGetColor(GGadget *g);
void GGadgetSetChecked(GGadget *g, int ison);
int GGadgetIsChecked(GGadget *g);
int GListIndexFromY(GGadget *g,int y);
void GListSetSBAlwaysVisible(GGadget *g,int always);
void GListSetPopupCallback(GGadget *g,void (*callback)(GGadget *,int));
int GTabSetGetSel(GGadget *g);
void GTabSetSetSel(GGadget *g,int sel);
void GTabSetSetEnabled(GGadget *g,int pos, int enabled);
GWindow GTabSetGetSubwindow(GGadget *g,int pos);
int GTabSetGetTabLines(GGadget *g);
void GTabSetSetNestedExpose(GGadget *g, void (*)(GWindow,GGadget *,GEvent *));
void GTabSetSetNestedMouse(GGadget *g, int (*)(GGadget *,GEvent *));
void GTabSetChangeTabName(GGadget *g, const char *name, int pos);
void GTabSetRemetric(GGadget *g);
void GTabSetRemoveTabByPos(GGadget *g, int pos);
void GTabSetRemoveTabByName(GGadget *g, char *name);
int32 GScrollBarGetPos(GGadget *g);
int32 GScrollBarSetPos(GGadget *g,int32 pos);
int32 GScrollBarAddToPos(GGadget *g,int32 offset);
void GScrollBarSetMustShow(GGadget *g, int32 sb_min, int32 sb_max, int32 sb_pagesize,
int32 sb_mustshow);
void GScrollBarSetBounds(GGadget *g, int32 sb_min, int32 sb_max, int32 sb_pagesize );
void GScrollBarGetBounds(GGadget *g, int32 *sb_min, int32 *sb_max, int32 *sb_pagesize );
void GMenuBarSetItemChecked(GGadget *g, int mid, int check);
void GMenuBarSetItemEnabled(GGadget *g, int mid, int enabled);
void GMenuBarSetItemName(GGadget *g, int mid, const unichar_t *name);
void GMenuSetShortcutDomain(char *domain);
const char *GMenuGetShortcutDomain(void);
int GMenuIsCommand(GEvent *event,char *shortcut);
int GMenuMask(void);
int GMenuAnyUnmaskedShortcuts(GGadget *mb1, GGadget *mb2);
void GFileChooserPopupCheck(GGadget *g,GEvent *e);
void GFileChooserFilterIt(GGadget *g);
void GFileChooserRefreshList(GGadget *g);
int GFileChooserFilterEh(GGadget *g,GEvent *e);
void GFileChooserConnectButtons(GGadget *g,GGadget *ok, GGadget *filter);
void GFileChooserSetFilterText(GGadget *g,const unichar_t *filter);
void GFileChooserSetFilterFunc(GGadget *g,GFileChooserFilterType filter);
void GFileChooserSetInputFilenameFunc(GGadget *g,GFileChooserInputFilenameFuncType filter);
int GFileChooserDefInputFilenameFunc( GGadget *g, const unichar_t** currentFilename, unichar_t* oldfilename );
GFileChooserInputFilenameFuncType GFileChooserGetInputFilenameFunc(GGadget *g);
void GFileChooserSetDir(GGadget *g,unichar_t *dir);
struct giocontrol *GFileChooserReplaceIO(GGadget *g,struct giocontrol *gc);
unichar_t *GFileChooserGetDir(GGadget *g);
unichar_t *GFileChooserGetFilterText(GGadget *g);
GFileChooserFilterType GFileChooserGetFilterFunc(GGadget *g);
void GFileChooserSetFilename(GGadget *g,const unichar_t *defaultfile);
void GFileChooserSetMimetypes(GGadget *g,unichar_t **mimetypes);
unichar_t **GFileChooserGetMimetypes(GGadget *g);
void GFileChooserGetChildren(GGadget *g,GGadget **pulldown, GGadget **list, GGadget **tf);
int GFileChooserPosIsDir(GGadget *g, int pos);
unichar_t *GFileChooserFileNameOfPos(GGadget *g, int pos);
void GFileChooserSetShowHidden(int sh);
int GFileChooserGetShowHidden(void);
void GFileChooserSetDirectoryPlacement(int dp);
int GFileChooserGetDirectoryPlacement(void);
void GFileChooserSetBookmarks(unichar_t **b);
void GFileChooserSetPaths(GGadget *g, const char* const* path);
unichar_t **GFileChooserGetBookmarks(void);
void GFileChooserSetPrefsChangedCallback(void *data, void (*p_c)(void *));
void GHVBoxSetExpandableCol(GGadget *g,int col);
void GHVBoxSetExpandableRow(GGadget *g,int row);
void GHVBoxSetPadding(GGadget *g,int hpad, int vpad);
void GHVBoxFitWindow(GGadget *g);
void GHVBoxFitWindowCentered(GGadget *g);
void GHVBoxReflow(GGadget *g);
void GMatrixEditSet(GGadget *g,struct matrix_data *data, int rows, int copy_it);
struct matrix_data *GMatrixEditGet(GGadget *g, int *rows);
struct matrix_data *_GMatrixEditGet(GGadget *g, int *rows);
GGadget *_GMatrixEditGetActiveTextField(GGadget *g);
int GMatrixEditGetColCnt(GGadget *g);
int GMatrixEditGetActiveRow(GGadget *g);
int GMatrixEditGetActiveCol(GGadget *g);
void GMatrixEditActivateRowCol(GGadget *g, int r, int c);
void GMatrixEditDeleteRow(GGadget *g,int row);
void GMatrixEditScrollToRowCol(GGadget *g,int r, int c);
int GMatrixEditStringDlg(GGadget *g,int row,int col);
void GMatrixEditSetNewText(GGadget *g, char *text);
void GMatrixEditSetOtherButtonEnable(GGadget *g, void (*sob)(GGadget *g, int r, int c));
void GMatrixEditSetMouseMoveReporter(GGadget *g, void (*rmm)(GGadget *g, int r, int c));
void GMatrixEditSetTextChangeReporter(GGadget *g, void (*tcr)(GGadget *g, int r, int c, GGadget *text));
void GMatrixEditSetValidateStr(GGadget *g, char *(*validate)(GGadget *g, int r, int c, int wasnew, char *str));
void GMatrixEditSetBeforeDelete(GGadget *g, void (*predelete)(GGadget *g, int r));
void GMatrixEditSetRowMotionCallback(GGadget *g, void (*rowmotion)(GGadget *g, int oldr, int newr));
void GMatrixEditUp(GGadget *g);
void GMatrixEditDown(GGadget *g);
enum gme_updown { ud_up_enabled=1, ud_down_enabled=2 };
void GMatrixEditSetCanUpDown(GGadget *g, enum gme_updown (*canupdown)(GGadget *g, int r));
void GMatrixEditSetUpDownVisible(GGadget *g, int visible);
void GMatrixEditAddButtons(GGadget *g, GGadgetCreateData *gcd);
void GMatrixEditEnableColumn(GGadget *g, int col, int enabled);
void GMatrixEditShowColumn(GGadget *g, int col, int visible);
void GMatrixEditSetColumnChoices(GGadget *g, int col, GTextInfo *ti);
GMenuItem *GMatrixEditGetColumnChoices(GGadget *g, int col);
void GMatrixEditSetColumnCompletion(GGadget *g, int col, GTextCompletionHandler completion);
void GMatrixEditSetEditable(GGadget *g, int editable);
GWindow GDrawableGetWindow(GGadget *g);
extern void GGadgetPreparePopupImage(GWindow base,const unichar_t *msg,
const void *data,
GImage *(*get_image)(const void *data),
void (*free_image)(const void *data,GImage *img));
extern void GGadgetPreparePopup(GWindow base,const unichar_t *msg);
extern void GGadgetPreparePopupR(GWindow base,int msg);
extern void GGadgetPreparePopup8(GWindow base, const char *msg);
extern void GGadgetEndPopup(void);
extern void GGadgetPopupExternalEvent(GEvent *e);
extern int GGadgetDispatchEvent(GGadget *g,GEvent *e);
extern void GGadgetTakesKeyboard(GGadget *g, int takes_keyboard);
/* Handles *?{}[] wildcards */
int GGadgetWildMatch(unichar_t *pattern, unichar_t *name,int ignorecase);
enum fchooserret GFileChooserDefFilter(GGadget *g,struct gdirentry *ent,
const unichar_t *dir);
GWindow GMenuCreatePopupMenu(GWindow owner,GEvent *event, GMenuItem *mi);
GWindow GMenuCreatePopupMenuWithName(GWindow owner,GEvent *event, char* subMenuName,GMenuItem *mi);
GWindow _GMenuCreatePopupMenu(GWindow owner,GEvent *event, GMenuItem *mi,
void (*donecallback)(GWindow owner));
GWindow _GMenuCreatePopupMenuWithName(GWindow owner,GEvent *event, GMenuItem *mi,
char* subMenuName,
void (*donecallback)(GWindow owner));
GGadget *GLineCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GGroupCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GSpacerCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GLabelCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GImageButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GListButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GColorButtonCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GRadioCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GCheckBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GVisibilityBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GScrollBarCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GListCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTextFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GPasswordCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GNumericFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTextCompletionCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTextAreaCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GListFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GSimpleListFieldCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GMenuBarCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GMenu2BarCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GTabSetCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GFileChooserCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GHBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GVBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GHVBoxCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GHVGroupCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GMatrixEditCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *GDrawableCreate(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *CreateSlider(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *CreateFileChooser(struct gwindow *base, GGadgetData *gd,void *data);
GGadget *CreateGadgets(struct gwindow *base, GGadgetCreateData *gcd);
GTextInfo **GTextInfoArrayFromList(GTextInfo *ti, uint16 *cnt);
typedef struct gresimage {
GImage *image;
char *filename;
} GResImage;
GResImage *GGadgetResourceFindImage(char *name, GImage *def);
void InitImageCache();
void ClearImageCache();
void GGadgetSetImageDir(char *dir);
void GGadgetSetImagePath(char *path);
GImage *GGadgetImageCache(const char *filename);
int TryGGadgetImageCache(GImage *image, const char *name);
extern unichar_t *utf82u_mncopy(const char *utf8buf,unichar_t *mn);
extern double GetCalmReal8(GWindow gw,int cid,char *namer,int *err);
extern double GetReal8(GWindow gw,int cid,char *namer,int *err);
extern int GetCalmInt8(GWindow gw,int cid,char *name,int *err);
extern int GetInt8(GWindow gw,int cid,char *namer,int *err);
extern int GetUnicodeChar8(GWindow gw,int cid,char *namer,int *err);
extern void GGadgetProtest8(char *labelr);
extern void GMenuItemParseShortCut(GMenuItem *mi,char *shortcut);
extern int GMenuItemParseMask(char *shortcut);
extern int GGadgetUndoMacEnglishOptionCombinations(GEvent *event);
/* Among other things, this routine sets global icon cache up. */
extern void GGadgetInit(void);
extern int GGadgetWithin(GGadget *g, int x, int y);
extern void GMenuItemArrayFree(GMenuItem *mi);
extern void GMenuItem2ArrayFree(GMenuItem2 *mi);
extern GMenuItem *GMenuItemArrayCopy(GMenuItem *mi, uint16 *cnt);
extern GMenuItem *GMenuItem2ArrayCopy(GMenuItem2 *mi, uint16 *cnt);
extern void GVisibilityBoxSetToMinWH(GGadget *g);
extern void GGadgetSetSkipHotkeyProcessing( GGadget *g, int v );
extern int GGadgetGetSkipHotkeyProcessing( GGadget *g );
extern void GGadgetSetSkipUnQualifiedHotkeyProcessing( GGadget *g, int v );
extern int GGadgetGetSkipUnQualifiedHotkeyProcessing( GGadget *g );
#endif /* FONTFORGE_GGADGET_H */
|
1050_6
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/**
* @file tree_internal.h
* @author Radek Krejci <rkrejci@cesnet.cz>
* @brief libyang internal functions for manipulating with the data model and
* data trees.
*
* Copyright (c) 2015 CESNET, z.s.p.o.
*
* This source code is licensed under BSD 3-Clause License (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://opensource.org/licenses/BSD-3-Clause
*/
#ifndef LY_TREE_INTERNAL_H_
#define LY_TREE_INTERNAL_H_
#include <stdint.h>
#include "libyang.h"
#include "tree_schema.h"
#include "tree_data.h"
#include "resolve.h"
/* this is used to distinguish lyxml_elem * from a YANG temporary parsing structure, the first byte is compared */
#define LY_YANG_STRUCTURE_FLAG 0x80
/**
* @brief YANG namespace
*/
#define LY_NSYANG "urn:ietf:params:xml:ns:yang:1"
/**
* @brief YIN namespace
*/
#define LY_NSYIN "urn:ietf:params:xml:ns:yang:yin:1"
/**
* @brief NETCONF namespace
*/
#define LY_NSNC "urn:ietf:params:xml:ns:netconf:base:1.0"
/**
* @brief NACM namespace
*/
#define LY_NSNACM "urn:ietf:params:xml:ns:yang:ietf-netconf-acm"
/**
* @brief internal parser flag for actions and inline notifications
*/
#define LYD_OPT_ACT_NOTIF 0x100
/**
* @brief Internal list of built-in types
*/
extern struct lys_tpdf *ly_types[LY_DATA_TYPE_COUNT];
/**
* @brief Internal structure for data node sorting.
*/
struct lyd_node_pos {
struct lyd_node *node;
uint32_t pos;
};
/**
* @brief Internal structure for LYB parser/printer.
*/
struct lyb_state {
size_t *written;
size_t *position;
uint8_t *inner_chunks;
int used;
int size;
const struct lys_module **models;
int mod_count;
struct ly_ctx *ctx;
/* LYB printer only */
struct {
struct lys_node *first_sibling;
struct hash_table *ht;
} *sib_ht;
int sib_ht_count;
};
/* struct lyb_state allocation step */
#define LYB_STATE_STEP 4
/**
* LYB schema hash constants
*
* Hash is divided to collision ID and hash itself.
*
* First bits are collision ID until 1 is found. The rest is truncated 32b hash.
* 1xxx xxxx - collision ID 0 (no collisions)
* 01xx xxxx - collision ID 1 (collision ID 0 hash collided)
* 001x xxxx - collision ID 2 ...
*/
/* Number of bits the whole hash will take (including hash collision ID) */
#define LYB_HASH_BITS 8
/* Masking 32b hash (collision ID 0) */
#define LYB_HASH_MASK 0x7f
/* Type for storing the whole hash (used only internally, publicly defined directly) */
#define LYB_HASH uint8_t
/* Need to move this first >> collision number (from 0) to get collision ID hash part */
#define LYB_HASH_COLLISION_ID 0x80
/* How many bytes are reserved for one data chunk SIZE (8B is maximum) */
#define LYB_SIZE_BYTES 1
/* Maximum size that will be written into LYB_SIZE_BYTES (must be large enough) */
#define LYB_SIZE_MAX UINT8_MAX
/* How many bytes are reserved for one data chunk inner chunk count */
#define LYB_INCHUNK_BYTES 1
/* Maximum size that will be written into LYB_INCHUNK_BYTES (must be large enough) */
#define LYB_INCHUNK_MAX UINT8_MAX
/* Just a helper macro */
#define LYB_META_BYTES (LYB_INCHUNK_BYTES + LYB_SIZE_BYTES)
/* Type large enough for all meta data */
#define LYB_META uint16_t
LYB_HASH lyb_hash(struct lys_node *sibling, uint8_t collision_id);
int lyb_has_schema_model(struct lys_node *sibling, const struct lys_module **models, int mod_count);
/**
* Macros to work with ::lyd_node#when_status
* +--- bit 1 - some when-stmt connected with the node (resolve_applies_when() is true)
* |+-- bit 2 - when-stmt's condition is resolved and it is true
* ||+- bit 3 - when-stmt's condition is resolved and it is false
* XXX
*
* bit 1 is set when the node is created
* if none of bits 2 and 3 is set, the when condition is not yet resolved
*/
#define LYD_WHEN 0x04
#define LYD_WHEN_TRUE 0x02
#define LYD_WHEN_FALSE 0x01
#define LYD_WHEN_DONE(status) (!((status) & LYD_WHEN) || ((status) & (LYD_WHEN_TRUE | LYD_WHEN_FALSE)))
/**
* @brief Type flag for an unresolved type in a grouping.
*/
#define LY_VALUE_UNRESGRP 0x80
#ifdef LY_ENABLED_CACHE
/**
* @brief Minimum number of children for the parent to create a hash table for them.
*/
# define LY_CACHE_HT_MIN_CHILDREN 4
int lyd_hash(struct lyd_node *node);
void lyd_insert_hash(struct lyd_node *node);
void lyd_unlink_hash(struct lyd_node *node, struct lyd_node *orig_parent);
#endif
/**
* @brief Create submodule structure by reading data from memory.
*
* @param[in] module Schema tree where to connect the submodule, belongs-to value must match.
* @param[in] data String containing the submodule specification in the given \p format.
* @param[in] format Format of the data to read.
* @param[in] unres list of unresolved items
* @return Created submodule structure or NULL in case of error.
*/
struct lys_submodule *lys_sub_parse_mem(struct lys_module *module, const char *data, LYS_INFORMAT format,
struct unres_schema *unres);
/**
* @brief Create submodule structure by reading data from file descriptor.
*
* \note Current implementation supports only reading data from standard (disk) file, not from sockets, pipes, etc.
*
* @param[in] module Schema tree where to connect the submodule, belongs-to value must match.
* @param[in] fd File descriptor of a regular file (e.g. sockets are not supported) containing the submodule
* specification in the given \p format.
* @param[in] format Format of the data to read.
* @param[in] unres list of unresolved items
* @return Created submodule structure or NULL in case of error.
*/
struct lys_submodule *lys_sub_parse_fd(struct lys_module *module, int fd, LYS_INFORMAT format, struct unres_schema *unres);
/**
* @brief Free the submodule structure
*
* @param[in] submodule The structure to free. Do not use the pointer after calling this function.
* @param[in] private_destructor Optional destructor function for private objects assigned
* to the nodes via lys_set_private(). If NULL, the private objects are not freed by libyang.
*/
void lys_submodule_free(struct lys_submodule *submodule, void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Add child schema tree node at the end of the parent's child list.
*
* If the child is connected somewhere (has a parent), it is completely
* unlinked and none of the following conditions applies.
* If the child has prev sibling(s), they are ignored (child is added at the
* end of the child list).
* If the child has next sibling(s), all of them are connected with the parent.
*
* @param[in] parent Parent node where the \p child will be added.
* @param[in] module Module where the \p child will be added if the \p parent
* parameter is NULL (case of top-level elements). The parameter does not change
* the module of the \p child element. If the \p parent parameter is present,
* the \p module parameter is ignored.
* @param[in] child The schema tree node to be added.
* @param[in] options Parsing options. Only relevant when creating a shorthand case.
* @return 0 on success, nonzero else
*/
int lys_node_addchild(struct lys_node *parent, struct lys_module *module, struct lys_node *child, int options);
/**
* @brief Find a valid grouping definition relative to a node.
*
* Valid definition means a sibling of \p start or a sibling of any of \p start 's parents.
*
* @param[in] name Name of the searched grouping.
* @param[in] start Definition must be valid (visible) for this node.
* @return Matching valid grouping or NULL.
*/
struct lys_node_grp *lys_find_grouping_up(const char *name, struct lys_node *start);
/**
* @brief Check that the \p node being connected into the \p parent has a unique name (identifier).
*
* Function is performed also as part of lys_node_addchild().
*
* @param[in] node The schema tree node to be checked.
* @param[in] parent Parent node where the \p child is supposed to be added.
* @param[in] module Module where the \p child is supposed to be added if the \p parent
* parameter is NULL (case of top-level elements). The parameter does not change
* the module of the \p child element. If the \p parent parameter is present,
* the \p module parameter is ignored.
* @return 0 on success, nonzero else
*/
int lys_check_id(struct lys_node *node, struct lys_node *parent, struct lys_module *module);
/**
* @brief Get know if the node contains must or when with XPath expression
*
* @param[in] node Node to examine.
* @return 1 if contains, 0 otherwise
*/
int lys_has_xpath(const struct lys_node *node);
/**
* @brief Learn if \p type is defined in the local module or from an import.
*
* @param[in] type Type to examine.
* @return non-zero if local, 0 if from an import.
*/
int lys_type_is_local(const struct lys_type *type);
/**
* @brief Create a copy of the specified schema tree \p node
*
* @param[in] module Target module for the duplicated node.
* @param[in] parent Schema tree node where the node is being connected, NULL in case of top level \p node.
* @param[in] node Schema tree node to be duplicated.
* @param[in] unres list of unresolved items
* @param[in] shallow Whether to copy children and connect to parent/module too.
* @return Created copy of the provided schema \p node.
*/
struct lys_node *lys_node_dup(struct lys_module *module, struct lys_node *parent, const struct lys_node *node,
struct unres_schema *unres, int shallow);
/**
* @brief duplicate the list of extension instances.
*
* @param[in] ctx Context to store errors in.
* @param[in] mod Module where we are
* @param[in] orig list of the extension instances to duplicate, the size of the array must correspond with \p size
* @param[in] size number of items in \p old array to duplicate
* @param[in] parent Parent structure of the new extension instances list
* @param[in] parent_type Type of the provide \p parent *
* @param[in,out] new Address where to store the created list of duplicated extension instances
* @param[in] shallow Whether to copy children and connect to parent/module too.
* @param[in] unres list of unresolved items
*
*/
int lys_ext_dup(struct ly_ctx *ctx, struct lys_module *mod, struct lys_ext_instance **orig, uint8_t size, void *parent,
LYEXT_PAR parent_type, struct lys_ext_instance ***new, int shallow, struct unres_schema *unres);
/**
* @brief Iterate over the specified type of the extension instances
*
* @param[in] ext Array of extensions to explore
* @param[in] ext_size Size of the provided \p ext array
* @param[in] start Index in the \p ext array where to start searching (first call with 0, the consequent calls with
* the returned index increased by 1, unless the returned index is -1)
* @param[in] substmt Type of the extension (its belongins to the specific substatement) to iterate, use
* #LYEXT_SUBSTMT_ALL to go through all the extensions in the array
* @result index in the ext, -1 if not present
*/
int lys_ext_iter(struct lys_ext_instance **ext, uint8_t ext_size, uint8_t start, LYEXT_SUBSTMT substmt);
/**
* @brief free the array of the extension instances
*/
void lys_extension_instances_free(struct ly_ctx *ctx, struct lys_ext_instance **e, unsigned int size,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Add pointer to \p leafref to \p leafref_target children so that it knows there
* are some leafrefs referring it.
*
* @param[in] leafref_target Leaf that is \p leafref's target.
* @param[in] leafref Leaf or leaflist of type #LY_TYPE_LEAFREF referring \p leafref_target.
* @return 0 on success, -1 on error.
*/
int lys_leaf_add_leafref_target(struct lys_node_leaf *leafref_target, struct lys_node *leafref);
/**
* @brief Free a schema when condition
*
* @param[in] libyang context where the schema of the ondition is used.
* @param[in] w When structure to free.
* @param[in] private_destructor Destructor for priv member in extension instances
*/
void lys_when_free(struct ly_ctx *ctx, struct lys_when *w,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Free the schema tree restriction (must, ...) structure content
*
* @param[in] ctx libyang context where the schema of the restriction is used.
* @param[in] restr The restriction structure to free. The function actually frees only
* the content of the structure, so after using this function, caller is supposed to
* use free(restr). It is done to free the content of structures being allocated as
* part of array, in that case the free() is used on the whole array.
* @param[in] private_destructor Destructor for priv member in extension instances
*/
void lys_restr_free(struct ly_ctx *ctx, struct lys_restr *restr,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Free the schema tree type structure content
*
* @param[in] ctx libyang context where the schema of the type is used.
* @param[in] restr The type structure to free. The function actually frees only
* the content of the structure, so after using this function, caller is supposed to
* use free(type). It is done to free the content of structures being allocated as
* part of array, in that case the free() is used on the whole array.
* @param[in] private_destructor Destructor for priv member in extension instances
*/
void lys_type_free(struct ly_ctx *ctx, struct lys_type *type,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Unlink the schema node from the tree.
*
* @param[in] node Schema tree node to unlink.
*/
void lys_node_unlink(struct lys_node *node);
/**
* @brief Free the schema node structure, includes unlinking it from the tree
*
* @param[in] node Schema tree node to free. Do not use the pointer after calling this function.
* @param[in] private_destructor Optional destructor function for private objects assigned
* to the nodes via lys_set_private(). If NULL, the private objects are not freed by libyang.
* @param[in] shallow Whether to do a shallow free only (on a shallow copy of a node).
*/
void lys_node_free(struct lys_node *node, void (*private_destructor)(const struct lys_node *node, void *priv), int shallow);
/**
* @brief Free (and unlink it from the context) the specified schema.
*
* It is dangerous to call this function on schemas already placed into the context's
* list of modules - there can be many references from other modules and data instances.
*
* @param[in] module Data model to free.
* @param[in] private_destructor Optional destructor function for private objects assigned
* to the nodes via lys_set_private(). If NULL, the private objects are not freed by libyang.
* @param[in] free_subs Whether to free included submodules.
* @param[in] remove_from_ctx Whether to remove this model from context. Always use 1 except
* when removing all the models (in ly_ctx_destroy()).
*/
void lys_free(struct lys_module *module, void (*private_destructor)(const struct lys_node *node, void *priv),
int free_subs, int remove_from_ctx);
/**
* @brief Create a data container knowing it's schema node.
*
* @param[in] parent Data parent of the new node.
* @param[in] schema Schema node of the new node.
* @param[in] dflt Set dflt flag in the created data nodes
* @return New node, NULL on error.
*/
struct lyd_node *_lyd_new(struct lyd_node *parent, const struct lys_node *schema, int dflt);
/**
* @brief Create a dummy node for XPath evaluation. After done using, it should be removed.
*
* The function must be used very carefully:
* - there must not be a list node to create
*
* @param[in] data Any data node of the tree where the dummy node will be created
* @param[in] parent To optimize searching in data tree (and to avoid issues with lists), caller can specify a
* parent node that exists in the data tree.
* @param[in] schema Schema node of the dummy node to create, must be of nodetype that
* appears also in data tree.
* @param[in] value Optional value to be set in the dummy node
* @param[in] dflt Set dflt flag in the created data nodes
*
* @return The first created node needed for the dummy node in the given tree.
*/
struct lyd_node *lyd_new_dummy(struct lyd_node *data, struct lyd_node *parent, const struct lys_node *schema,
const char *value, int dflt);
/**
* @brief Find the parent node of an attribute.
*
* @param[in] root Root element of the data tree with the attribute.
* @param[in] attr Attribute to find.
*
* @return Parent of \p attr, NULL if not found.
*/
const struct lyd_node *lyd_attr_parent(const struct lyd_node *root, struct lyd_attr *attr);
/**
* @brief Internal version of lyd_unlink().
*
* @param[in] node Node to unlink.
* @param[in] permanent 0 - the node will be linked back,
* 1 - the node is premanently unlinked,
* 2 - the node is being freed.
*
* @return EXIT_SUCCESS on success, EXIT_FAILURE on error.
*/
int lyd_unlink_internal(struct lyd_node *node, int permanent);
/**
* @brief Internal version of lyd_insert() and lyd_insert_sibling().
*
* @param[in] invalidate Whether to invalidate any nodes. Set 0 only if linking back some temporarily internally unlinked nodes.
*/
int lyd_insert_common(struct lyd_node *parent, struct lyd_node **sibling, struct lyd_node *node, int invalidate);
/**
* @brief Internal version of lyd_insert_before() and lyd_insert_after().
*
* @param[in] invalidate Whether to invalidate any nodes. Set 0 only if linking back some temporarily internally unlinked nodes.
*/
int lyd_insert_nextto(struct lyd_node *sibling, struct lyd_node *node, int before, int invalidate);
/**
* @brief Find a specific sibling. Does not log.
*
* Since \p mod_name is mandatory, augments are handled.
*
* @param[in] siblings Siblings to consider. They are first adjusted to
* point to the first sibling.
* @param[in] mod_name Module name, mandatory.
* @param[in] mod_name_len Module name length.
* @param[in] name Node name, mandatory.
* @param[in] nam_len Node name length.
* @param[in] type ORed desired type of the node. 0 means any type.
* Does not return groupings, uses, and augments (but can return augment nodes).
* @param[out] ret Pointer to the node of the desired type. Can be NULL.
*
* @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference.
*/
int lys_get_sibling(const struct lys_node *siblings, const char *mod_name, int mod_name_len, const char *name,
int nam_len, LYS_NODE type, const struct lys_node **ret);
/**
* @brief Find a specific node that can only appear in the data. Does not log.
*
* @param[in] mod Main module with the node. Must be set if \p parent == NULL (top-level node).
* @param[in] parent Parent of the node. Must be set if \p mod == NULL (nested node).
* @param[in] name Node name.
* @param[in] nam_len Node \p name length.
* @param[in] type ORed desired type of the node. 0 means any (data node) type.
* @param[out] ret Pointer to the node of the desired type. Can be NULL.
*
* @return EXIT_SUCCESS on success, EXIT_FAILURE on fail.
*/
int lys_getnext_data(const struct lys_module *mod, const struct lys_node *parent, const char *name, int nam_len,
LYS_NODE type, const struct lys_node **ret);
int lyd_get_unique_default(const char* unique_expr, struct lyd_node *list, const char **dflt);
int lyd_build_relative_data_path(const struct lys_module *module, const struct lyd_node *node, const char *schema_id,
char *buf);
void lyd_free_value(lyd_val value, LY_DATA_TYPE value_type, uint8_t value_flags, struct lys_type *type, lyd_val *old_val,
LY_DATA_TYPE *old_val_type, uint8_t *old_val_flags);
int lyd_list_equal(struct lyd_node *node1, struct lyd_node *node2, int with_defaults);
int lys_make_implemented_r(struct lys_module *module, struct unres_schema *unres);
/**
* @brief Check for (validate) mandatory nodes of a data tree. Checks recursively whole data tree. Requires all when
* statement to be solved.
*
* @param[in] root Data tree to validate.
* @param[in] ctx libyang context (for the case when the data tree is empty - i.e. root == NULL).
* @param[in] modules Only check mandatory nodes from these modules. If not set, check for all modules in the context.
* @param[in] mod_count Number of modules in \p modules.
* @param[in] options Standard @ref parseroptions.
* @return EXIT_SUCCESS or EXIT_FAILURE.
*/
int lyd_check_mandatory_tree(struct lyd_node *root, struct ly_ctx *ctx, const struct lys_module **modules, int mod_count,
int options);
/**
* @brief Check if the provided node is inside a grouping.
*
* @param[in] node Schema node to check.
* @return 0 as false, 1 as true
*/
int lys_ingrouping(const struct lys_node *node);
int unres_data_diff_new(struct unres_data *unres, struct lyd_node *subtree, struct lyd_node *parent, int created);
void unres_data_diff_rem(struct unres_data *unres, unsigned int idx);
/**
* @brief Process (add/clean) default nodes in the data tree and resolve the unresolved items
*
* @param[in,out] root Pointer to the root node of the complete data tree, the root node can be NULL if the data tree
* is empty
* @param[in] options Parser options to know the data tree type, see @ref parseroptions.
* @param[in] ctx Context for the case the \p root is empty (in that case \p ctx must not be NULL)
* @param[in] modules Only modules that will be traversed when adding default values.
* @param[in] mod_count Number of module names in \p modules.
* @param[in] data_tree Additional data tree for validating RPC/action/notification. The tree is used to satisfy
* possible references to the datastore content.
* @param[in] act_notif In case of nested action/notification, pointer to the subroot of the action/notification. Note
* that in this case the \p root points to the top level data tree node which provides the context
* for the nested action/notification
* @param[in] unres Unresolved data list, the newly added default nodes may need to add some unresolved items
* @param[in] wd Whether to add default values.
* @return EXIT_SUCCESS or EXIT_FAILURE
*/
int lyd_defaults_add_unres(struct lyd_node **root, int options, struct ly_ctx *ctx, const struct lys_module **modules,
int mod_count, const struct lyd_node *data_tree, struct lyd_node *act_notif,
struct unres_data *unres, int wd);
void lys_enable_deviations(struct lys_module *module);
void lys_disable_deviations(struct lys_module *module);
void lys_sub_module_remove_devs_augs(struct lys_module *module);
void lys_sub_module_apply_devs_augs(struct lys_module *module);
int apply_aug(struct lys_node_augment *augment, struct unres_schema *unres);
void lys_submodule_module_data_free(struct lys_submodule *submodule);
int lys_copy_union_leafrefs(struct lys_module *mod, struct lys_node *parent, struct lys_type *type,
struct lys_type *prev_new, struct unres_schema *unres);
const struct lys_module *lys_parse_fd_(struct ly_ctx *ctx, int fd, LYS_INFORMAT format, const char *revision, int implement);
const struct lys_module *lys_parse_mem_(struct ly_ctx *ctx, const char *data, LYS_INFORMAT format, const char *revision,
int internal, int implement);
/**
* @brief Get next augment from \p mod augmenting \p aug_target
*/
struct lys_node_augment *lys_getnext_target_aug(struct lys_node_augment *last, const struct lys_module *mod,
const struct lys_node *aug_target);
LY_STMT lys_snode2stmt(LYS_NODE nodetype);
struct lys_node ** lys_child(const struct lys_node *node, LYS_NODE nodetype);
#endif /* LY_TREE_INTERNAL_H_ */
|
/**
* @file tree_internal.h
* @author Radek Krejci <rkrejci@cesnet.cz>
* @brief libyang internal functions for manipulating with the data model and
* data trees.
*
* Copyright (c) 2015 CESNET, z.s.p.o.
*
* This source code is licensed under BSD 3-Clause License (the "License").
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://opensource.org/licenses/BSD-3-Clause
*/
#ifndef LY_TREE_INTERNAL_H_
#define LY_TREE_INTERNAL_H_
#include <stdint.h>
#include "libyang.h"
#include "tree_schema.h"
#include "tree_data.h"
#include "resolve.h"
/* this is used to distinguish lyxml_elem * from a YANG temporary parsing structure, the first byte is compared */
#define LY_YANG_STRUCTURE_FLAG 0x80
/**
* @brief YANG namespace
*/
#define LY_NSYANG "urn:ietf:params:xml:ns:yang:1"
/**
* @brief YIN namespace
*/
#define LY_NSYIN "urn:ietf:params:xml:ns:yang:yin:1"
/**
* @brief NETCONF namespace
*/
#define LY_NSNC "urn:ietf:params:xml:ns:netconf:base:1.0"
/**
* @brief NACM namespace
*/
#define LY_NSNACM "urn:ietf:params:xml:ns:yang:ietf-netconf-acm"
/**
* @brief internal parser flag for actions and inline notifications
*/
#define LYD_OPT_ACT_NOTIF 0x100
/**
* @brief Internal list of built-in types
*/
extern struct lys_tpdf *ly_types[LY_DATA_TYPE_COUNT];
/**
* @brief Internal structure for data node sorting.
*/
struct lyd_node_pos {
struct lyd_node *node;
uint32_t pos;
};
/**
* @brief Internal structure for LYB parser/printer.
*/
struct lyb_state {
size_t *written;
size_t *position;
uint8_t *inner_chunks;
int used;
int size;
const struct lys_module **models;
int mod_count;
struct ly_ctx *ctx;
/* LYB printer only */
struct {
struct lys_node *first_sibling;
struct hash_table *ht;
} *sib_ht;
int sib_ht_count;
};
/* struct lyb_state allocation step */
#define LYB_STATE_STEP 4
/**
* LYB schema hash constants
*
* Hash is divided to collision ID and hash itself.
*
* First bits are collision ID until 1 is found. The rest is truncated 32b hash.
* 1xxx xxxx - collision ID 0 (no collisions)
* 01xx xxxx - collision ID 1 (collision ID 0 hash collided)
* 001x xxxx - collision ID 2 ...
*/
/* Number of bits the whole hash will take (including hash collision ID) */
#define LYB_HASH_BITS 8
/* Masking 32b hash (collision ID 0) */
#define LYB_HASH_MASK 0x7f
/* Type for storing the whole hash (used only internally, publicly defined directly) */
#define LYB_HASH uint8_t
/* Need to move this first >> collision number (from 0) to get collision ID hash part */
#define LYB_HASH_COLLISION_ID 0x80
/* How many bytes are reserved for one data chunk SIZE (8B is maximum) */
#define LYB_SIZE_BYTES 1
/* Maximum size that will be written into LYB_SIZE_BYTES (must be large enough) */
#define LYB_SIZE_MAX UINT8_MAX
/* How many bytes are reserved for one data chunk inner chunk count */
#define LYB_INCHUNK_BYTES 1
/* Maximum size that will be written into LYB_INCHUNK_BYTES (must be large enough) */
#define LYB_INCHUNK_MAX UINT8_MAX
/* Just a helper macro */
#define LYB_META_BYTES (LYB_INCHUNK_BYTES + LYB_SIZE_BYTES)
/* Type large enough for all meta data */
#define LYB_META uint16_t
LYB_HASH lyb_hash(struct lys_node *sibling, uint8_t collision_id);
int lyb_has_schema_model(struct lys_node *sibling, const struct lys_module **models, int mod_count);
/**
* Macros to work with ::lyd_node#when_status
* +--- bit 1 - some when-stmt connected with the node (resolve_applies_when() is true)
* |+-- bit 2 - when-stmt's condition is resolved and it is true
* ||+- bit 3 - when-stmt's condition is resolved and it is false
* XXX
*
* bit 1 is set when the node is created
* if none of bits 2 and 3 is set, the when condition is not yet resolved
*/
#define LYD_WHEN 0x04
#define LYD_WHEN_TRUE 0x02
#define LYD_WHEN_FALSE 0x01
#define LYD_WHEN_DONE(status) (!((status) & LYD_WHEN) || ((status) & (LYD_WHEN_TRUE | LYD_WHEN_FALSE)))
/**
* @brief Type flag for an unresolved type in a grouping.
*/
#define LY_VALUE_UNRESGRP 0x80
#ifdef LY_ENABLED_CACHE
/**
* @brief Minimum number of children for the parent to create a hash table for them.
*/
# define LY_CACHE_HT_MIN_CHILDREN 4
int lyd_hash(struct lyd_node *node);
void lyd_insert_hash(struct lyd_node *node);
void lyd_unlink_hash(struct lyd_node *node, struct lyd_node *orig_parent);
#endif
/**
* @brief Create submodule structure by reading data from memory.
*
* @param[in] module Schema tree where to connect the submodule, belongs-to value must match.
* @param[in] data String containing the submodule specification in the given \p format.
* @param[in] format Format of the data to read.
* @param[in] unres list of unresolved items
* @return Created submodule structure or NULL in case of error.
*/
struct lys_submodule *lys_sub_parse_mem(struct lys_module *module, const char *data, LYS_INFORMAT format,
struct unres_schema *unres);
/**
* @brief Create submodule structure by reading data from file descriptor.
*
* \note Current implementation supports only reading data from standard (disk) file, not from sockets, pipes, etc.
*
* @param[in] module Schema tree where to connect the submodule, belongs-to value must match.
* @param[in] fd File descriptor of a regular file (e.g. sockets are not supported) containing the submodule
* specification in the given \p format.
* @param[in] format Format of the data to read.
* @param[in] unres list of unresolved items
* @return Created submodule structure or NULL in case of error.
*/
struct lys_submodule *lys_sub_parse_fd(struct lys_module *module, int fd, LYS_INFORMAT format, struct unres_schema *unres);
/**
* @brief Free the submodule structure
*
* @param[in] submodule The structure to free. Do not use the pointer after calling this function.
* @param[in] private_destructor Optional destructor function for private objects assigned
* to the nodes via lys_set_private(). If NULL, the private objects are not freed by libyang.
*/
void lys_submodule_free(struct lys_submodule *submodule, void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Add child schema tree node at the end of the parent's child list.
*
* If the child is connected somewhere (has a parent), it is completely
* unlinked and none of the following conditions applies.
* If the child has prev sibling(s), they are ignored (child is added at the
* end of the child list).
* If the child has next sibling(s), all of them are connected with the parent.
*
* @param[in] parent Parent node where the \p child will be added.
* @param[in] module Module where the \p child will be added if the \p parent
* parameter is NULL (case of top-level elements). The parameter does not change
* the module of the \p child element. If the \p parent parameter is present,
* the \p module parameter is ignored.
* @param[in] child The schema tree node to be added.
* @param[in] options Parsing options. Only relevant when creating a shorthand case.
* @return 0 on success, nonzero else
*/
int lys_node_addchild(struct lys_node *parent, struct lys_module *module, struct lys_node *child, int options);
/**
* @brief Find a valid grouping definition relative to a node.
*
* Valid definition means a sibling of \p start or a sibling of any of \p start 's parents.
*
* @param[in] name Name of the searched grouping.
* @param[in] start Definition must be valid (visible) for this node.
* @return Matching valid grouping or NULL.
*/
struct lys_node_grp *lys_find_grouping_up(const char *name, struct lys_node *start);
/**
* @brief Check that the \p node being connected into the \p parent has a unique name (identifier).
*
* Function is performed also as part of lys_node_addchild().
*
* @param[in] node The schema tree node to be checked.
* @param[in] parent Parent node where the \p child is supposed to be added.
* @param[in] module Module where the \p child is supposed to be added if the \p parent
* parameter is NULL (case of top-level elements). The parameter does not change
* the module of the \p child element. If the \p parent parameter is present,
* the \p module parameter is ignored.
* @return 0 on success, nonzero else
*/
int lys_check_id(struct lys_node *node, struct lys_node *parent, struct lys_module *module);
/**
* @brief Get know if the node contains must or when with XPath expression
*
* @param[in] node Node to examine.
* @return 1 if contains, 0 otherwise
*/
int lys_has_xpath(const struct lys_node *node);
/**
* @brief Learn if \p type is defined in the local module or from an import.
*
* @param[in] type Type to examine.
* @return non-zero if local, 0 if from an import.
*/
int lys_type_is_local(const struct lys_type *type);
/**
* @brief Create a copy of the specified schema tree \p node
*
* @param[in] module Target module for the duplicated node.
* @param[in] parent Schema tree node where the node is being connected, NULL in case of top level \p node.
* @param[in] node Schema tree node to be duplicated.
* @param[in] unres list of unresolved items
* @param[in] shallow Whether to copy children and connect to parent/module too.
* @return Created copy of the provided schema \p node.
*/
struct lys_node *lys_node_dup(struct lys_module *module, struct lys_node *parent, const struct lys_node *node,
struct unres_schema *unres, int shallow);
/**
* @brief duplicate the list of extension instances.
*
* @param[in] ctx Context to store errors in.
* @param[in] mod Module where we are
* @param[in] orig list of the extension instances to duplicate, the size of the array must correspond with \p size
* @param[in] size number of items in \p old array to duplicate
* @param[in] parent Parent structure of the new extension instances list
* @param[in] parent_type Type of the provide \p parent *
* @param[in,out] new Address where to store the created list of duplicated extension instances
* @param[in] shallow Whether to copy children and connect to parent/module too.
* @param[in] unres list of unresolved items
*
*/
int lys_ext_dup(struct ly_ctx *ctx, struct lys_module *mod, struct lys_ext_instance **orig, uint8_t size, void *parent,
LYEXT_PAR parent_type, struct lys_ext_instance ***new, int shallow, struct unres_schema *unres);
/**
* @brief Iterate over the specified type of the extension instances
*
* @param[in] ext Array of extensions to explore
* @param[in] ext_size Size of the provided \p ext array
* @param[in] start Index in the \p ext array where to start searching (first call with 0, the consequent calls with
* the returned index increased by 1, unless the returned index is -1)
* @param[in] substmt Type of the extension (its belongins to the specific substatement) to iterate, use
* #LYEXT_SUBSTMT_ALL to go through all the extensions in the array
* @result index in the ext, -1 if not present
*/
int lys_ext_iter(struct lys_ext_instance **ext, uint8_t ext_size, uint8_t start, LYEXT_SUBSTMT substmt);
/**
* @brief free the array of the extension instances
*/
void lys_extension_instances_free(struct ly_ctx *ctx, struct lys_ext_instance **e, unsigned int size,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Add pointer to \p leafref to \p leafref_target children so that it knows there
* are some leafrefs referring it.
*
* @param[in] leafref_target Leaf that is \p leafref's target.
* @param[in] leafref Leaf or leaflist of type #LY_TYPE_LEAFREF referring \p leafref_target.
* @return 0 on success, -1 on error.
*/
int lys_leaf_add_leafref_target(struct lys_node_leaf *leafref_target, struct lys_node *leafref);
/**
* @brief Free a schema when condition
*
* @param[in] libyang context where the schema of the ondition is used.
* @param[in] w When structure to free.
* @param[in] private_destructor Destructor for priv member in extension instances
*/
void lys_when_free(struct ly_ctx *ctx, struct lys_when *w,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Free the schema tree restriction (must, ...) structure content
*
* @param[in] ctx libyang context where the schema of the restriction is used.
* @param[in] restr The restriction structure to free. The function actually frees only
* the content of the structure, so after using this function, caller is supposed to
* use free(restr). It is done to free the content of structures being allocated as
* part of array, in that case the free() is used on the whole array.
* @param[in] private_destructor Destructor for priv member in extension instances
*/
void lys_restr_free(struct ly_ctx *ctx, struct lys_restr *restr,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Free the schema tree type structure content
*
* @param[in] ctx libyang context where the schema of the type is used.
* @param[in] restr The type structure to free. The function actually frees only
* the content of the structure, so after using this function, caller is supposed to
* use free(type). It is done to free the content of structures being allocated as
* part of array, in that case the free() is used on the whole array.
* @param[in] private_destructor Destructor for priv member in extension instances
*/
void lys_type_free(struct ly_ctx *ctx, struct lys_type *type,
void (*private_destructor)(const struct lys_node *node, void *priv));
/**
* @brief Unlink the schema node from the tree.
*
* @param[in] node Schema tree node to unlink.
*/
void lys_node_unlink(struct lys_node *node);
/**
* @brief Free the schema node structure, includes unlinking it from the tree
*
* @param[in] node Schema tree node to free. Do not use the pointer after calling this function.
* @param[in] private_destructor Optional destructor function for private objects assigned
* to the nodes via lys_set_private(). If NULL, the private objects are not freed by libyang.
* @param[in] shallow Whether to do a shallow free only (on a shallow copy of a node).
*/
void lys_node_free(struct lys_node *node, void (*private_destructor)(const struct lys_node *node, void *priv), int shallow);
/**
* @brief Free (and unlink it from the context) the specified schema.
*
* It is dangerous to call this function on schemas already placed into the context's
* list of modules - there can be many references from other modules and data instances.
*
* @param[in] module Data model to free.
* @param[in] private_destructor Optional destructor function for private objects assigned
* to the nodes via lys_set_private(). If NULL, the private objects are not freed by libyang.
* @param[in] free_subs Whether to free included submodules.
* @param[in] remove_from_ctx Whether to remove this model from context. Always use 1 except
* when removing all the models (in ly_ctx_destroy()).
*/
void lys_free(struct lys_module *module, void (*private_destructor)(const struct lys_node *node, void *priv),
int free_subs, int remove_from_ctx);
/**
* @brief Create a data container knowing it's schema node.
*
* @param[in] parent Data parent of the new node.
* @param[in] schema Schema node of the new node.
* @param[in] dflt Set dflt flag in the created data nodes
* @return New node, NULL on error.
*/
struct lyd_node *_lyd_new(struct lyd_node *parent, const struct lys_node *schema, int dflt);
/**
* @brief Create a dummy node for XPath evaluation. After done using, it should be removed.
*
* The function must be used very carefully:
* - there must not be a list node to create
*
* @param[in] data Any data node of the tree where the dummy node will be created
* @param[in] parent To optimize searching in data tree (and to avoid issues with lists), caller can specify a
* parent node that exists in the data tree.
* @param[in] schema Schema node of the dummy node to create, must be of nodetype that
* appears also in data tree.
* @param[in] value Optional value to be set in the dummy node
* @param[in] dflt Set dflt flag in the created data nodes
*
* @return The first created node needed for the dummy node in the given tree.
*/
struct lyd_node *lyd_new_dummy(struct lyd_node *data, struct lyd_node *parent, const struct lys_node *schema,
const char *value, int dflt);
/**
* @brief Find the parent node of an attribute.
*
* @param[in] root Root element of the data tree with the attribute.
* @param[in] attr Attribute to find.
*
* @return Parent of \p attr, NULL if not found.
*/
const struct lyd_node *lyd_attr_parent(const struct lyd_node *root, struct lyd_attr *attr);
/**
* @brief Internal version of lyd_unlink().
*
* @param[in] node Node to unlink.
* @param[in] permanent 0 - the node will be linked back,
* 1 - the node is premanently unlinked,
* 2 - the node is being freed.
*
* @return EXIT_SUCCESS on success, EXIT_FAILURE on error.
*/
int lyd_unlink_internal(struct lyd_node *node, int permanent);
/**
* @brief Internal version of lyd_insert() and lyd_insert_sibling().
*
* @param[in] invalidate Whether to invalidate any nodes. Set 0 only if linking back some temporarily internally unlinked nodes.
*/
int lyd_insert_common(struct lyd_node *parent, struct lyd_node **sibling, struct lyd_node *node, int invalidate);
/**
* @brief Internal version of lyd_insert_before() and lyd_insert_after().
*
* @param[in] invalidate Whether to invalidate any nodes. Set 0 only if linking back some temporarily internally unlinked nodes.
*/
int lyd_insert_nextto(struct lyd_node *sibling, struct lyd_node *node, int before, int invalidate);
/**
* @brief Find a specific sibling. Does not log.
*
* Since \p mod_name is mandatory, augments are handled.
*
* @param[in] siblings Siblings to consider. They are first adjusted to
* point to the first sibling.
* @param[in] mod_name Module name, mandatory.
* @param[in] mod_name_len Module name length.
* @param[in] name Node name, mandatory.
* @param[in] nam_len Node name length.
* @param[in] type ORed desired type of the node. 0 means any type.
* Does not return groupings, uses, and augments (but can return augment nodes).
* @param[out] ret Pointer to the node of the desired type. Can be NULL.
*
* @return EXIT_SUCCESS on success, EXIT_FAILURE on forward reference.
*/
int lys_get_sibling(const struct lys_node *siblings, const char *mod_name, int mod_name_len, const char *name,
int nam_len, LYS_NODE type, const struct lys_node **ret);
/**
* @brief Find a specific node that can only appear in the data. Does not log.
*
* @param[in] mod Main module with the node. Must be set if \p parent == NULL (top-level node).
* @param[in] parent Parent of the node. Must be set if \p mod == NULL (nested node).
* @param[in] name Node name.
* @param[in] nam_len Node \p name length.
* @param[in] type ORed desired type of the node. 0 means any (data node) type.
* @param[in] getnext_opts lys_getnext() options to use.
* @param[out] ret Pointer to the node of the desired type. Can be NULL.
*
* @return EXIT_SUCCESS on success, EXIT_FAILURE on fail.
*/
int lys_getnext_data(const struct lys_module *mod, const struct lys_node *parent, const char *name, int nam_len,
LYS_NODE type, int getnext_opts, const struct lys_node **ret);
int lyd_get_unique_default(const char* unique_expr, struct lyd_node *list, const char **dflt);
int lyd_build_relative_data_path(const struct lys_module *module, const struct lyd_node *node, const char *schema_id,
char *buf);
void lyd_free_value(lyd_val value, LY_DATA_TYPE value_type, uint8_t value_flags, struct lys_type *type, lyd_val *old_val,
LY_DATA_TYPE *old_val_type, uint8_t *old_val_flags);
int lyd_list_equal(struct lyd_node *node1, struct lyd_node *node2, int with_defaults);
int lys_make_implemented_r(struct lys_module *module, struct unres_schema *unres);
/**
* @brief Check for (validate) mandatory nodes of a data tree. Checks recursively whole data tree. Requires all when
* statement to be solved.
*
* @param[in] root Data tree to validate.
* @param[in] ctx libyang context (for the case when the data tree is empty - i.e. root == NULL).
* @param[in] modules Only check mandatory nodes from these modules. If not set, check for all modules in the context.
* @param[in] mod_count Number of modules in \p modules.
* @param[in] options Standard @ref parseroptions.
* @return EXIT_SUCCESS or EXIT_FAILURE.
*/
int lyd_check_mandatory_tree(struct lyd_node *root, struct ly_ctx *ctx, const struct lys_module **modules, int mod_count,
int options);
/**
* @brief Check if the provided node is inside a grouping.
*
* @param[in] node Schema node to check.
* @return 0 as false, 1 as true
*/
int lys_ingrouping(const struct lys_node *node);
int unres_data_diff_new(struct unres_data *unres, struct lyd_node *subtree, struct lyd_node *parent, int created);
void unres_data_diff_rem(struct unres_data *unres, unsigned int idx);
/**
* @brief Process (add/clean) default nodes in the data tree and resolve the unresolved items
*
* @param[in,out] root Pointer to the root node of the complete data tree, the root node can be NULL if the data tree
* is empty
* @param[in] options Parser options to know the data tree type, see @ref parseroptions.
* @param[in] ctx Context for the case the \p root is empty (in that case \p ctx must not be NULL)
* @param[in] modules Only modules that will be traversed when adding default values.
* @param[in] mod_count Number of module names in \p modules.
* @param[in] data_tree Additional data tree for validating RPC/action/notification. The tree is used to satisfy
* possible references to the datastore content.
* @param[in] act_notif In case of nested action/notification, pointer to the subroot of the action/notification. Note
* that in this case the \p root points to the top level data tree node which provides the context
* for the nested action/notification
* @param[in] unres Unresolved data list, the newly added default nodes may need to add some unresolved items
* @param[in] wd Whether to add default values.
* @return EXIT_SUCCESS or EXIT_FAILURE
*/
int lyd_defaults_add_unres(struct lyd_node **root, int options, struct ly_ctx *ctx, const struct lys_module **modules,
int mod_count, const struct lyd_node *data_tree, struct lyd_node *act_notif,
struct unres_data *unres, int wd);
void lys_enable_deviations(struct lys_module *module);
void lys_disable_deviations(struct lys_module *module);
void lys_sub_module_remove_devs_augs(struct lys_module *module);
void lys_sub_module_apply_devs_augs(struct lys_module *module);
int apply_aug(struct lys_node_augment *augment, struct unres_schema *unres);
void lys_submodule_module_data_free(struct lys_submodule *submodule);
int lys_copy_union_leafrefs(struct lys_module *mod, struct lys_node *parent, struct lys_type *type,
struct lys_type *prev_new, struct unres_schema *unres);
const struct lys_module *lys_parse_fd_(struct ly_ctx *ctx, int fd, LYS_INFORMAT format, const char *revision, int implement);
const struct lys_module *lys_parse_mem_(struct ly_ctx *ctx, const char *data, LYS_INFORMAT format, const char *revision,
int internal, int implement);
/**
* @brief Get next augment from \p mod augmenting \p aug_target
*/
struct lys_node_augment *lys_getnext_target_aug(struct lys_node_augment *last, const struct lys_module *mod,
const struct lys_node *aug_target);
LY_STMT lys_snode2stmt(LYS_NODE nodetype);
struct lys_node ** lys_child(const struct lys_node *node, LYS_NODE nodetype);
#endif /* LY_TREE_INTERNAL_H_ */
|
1359_2
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* -*- mode: c; indent-tabs-mode: nil -*- */
/*
* Copyright 2000, 2008 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
*/
/*
* Copyright 1993 by OpenVision Technologies, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* that both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of OpenVision not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _GSSAPIP_KRB5_H_
#define _GSSAPIP_KRB5_H_
#include <k5-int.h>
#ifdef HAVE_MEMORY_H
#include <memory.h>
#endif
/* work around sunos braindamage */
#ifdef major
#undef major
#endif
#ifdef minor
#undef minor
#endif
#include "gssapiP_generic.h"
/* The include of gssapi_krb5.h will dtrt with the above #defines in
* effect.
*/
#include "gssapi_krb5.h"
#include "gssapi_err_krb5.h"
#include "gssapi_ext.h"
/* for debugging */
#undef CFX_EXERCISE
/** constants **/
#define GSS_MECH_KRB5_OID_LENGTH 9
#define GSS_MECH_KRB5_OID "\052\206\110\206\367\022\001\002\002"
#define GSS_MECH_KRB5_OLD_OID_LENGTH 5
#define GSS_MECH_KRB5_OLD_OID "\053\005\001\005\002"
/* Incorrect krb5 mech OID emitted by MS. */
#define GSS_MECH_KRB5_WRONG_OID_LENGTH 9
#define GSS_MECH_KRB5_WRONG_OID "\052\206\110\202\367\022\001\002\002"
/* IAKERB variant */
#define GSS_MECH_IAKERB_OID_LENGTH 6
#define GSS_MECH_IAKERB_OID "\053\006\001\005\002\005"
extern const gss_OID_set_desc * const kg_all_mechs;
#define CKSUMTYPE_KG_CB 0x8003
#define KG_TOK_CTX_AP_REQ 0x0100
#define KG_TOK_CTX_AP_REP 0x0200
#define KG_TOK_CTX_ERROR 0x0300
#define KG_TOK_SIGN_MSG 0x0101
#define KG_TOK_SEAL_MSG 0x0201
#define KG_TOK_MIC_MSG 0x0101
#define KG_TOK_WRAP_MSG 0x0201
#define KG_TOK_DEL_CTX 0x0102
#define KG2_TOK_MIC_MSG 0x0404
#define KG2_TOK_WRAP_MSG 0x0504
#define KG2_TOK_DEL_CTX 0x0405
#define IAKERB_TOK_PROXY 0x0501
#define KRB5_GSS_FOR_CREDS_OPTION 1
#define KG2_RESP_FLAG_ERROR 0x0001
#define KG2_RESP_FLAG_DELEG_OK 0x0002
/** CFX flags **/
#define FLAG_SENDER_IS_ACCEPTOR 0x01
#define FLAG_WRAP_CONFIDENTIAL 0x02
#define FLAG_ACCEPTOR_SUBKEY 0x04
/* These are to be stored in little-endian order, i.e., des-mac is
stored as 02 00. */
enum sgn_alg {
SGN_ALG_DES_MAC_MD5 = 0x0000,
SGN_ALG_MD2_5 = 0x0001,
SGN_ALG_DES_MAC = 0x0002,
SGN_ALG_3 = 0x0003, /* not published */
SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; */
SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004
};
enum seal_alg {
SEAL_ALG_NONE = 0xffff,
SEAL_ALG_DES = 0x0000,
SEAL_ALG_1 = 0x0001, /* not published */
SEAL_ALG_MICROSOFT_RC4 = 0x0010, /* microsoft w2k; */
SEAL_ALG_DES3KD = 0x0002
};
/* for 3DES */
#define KG_USAGE_SEAL 22
#define KG_USAGE_SIGN 23
#define KG_USAGE_SEQ 24
/* for draft-ietf-krb-wg-gssapi-cfx-01 */
#define KG_USAGE_ACCEPTOR_SEAL 22
#define KG_USAGE_ACCEPTOR_SIGN 23
#define KG_USAGE_INITIATOR_SEAL 24
#define KG_USAGE_INITIATOR_SIGN 25
enum qop {
GSS_KRB5_INTEG_C_QOP_MD5 = 0x0001, /* *partial* MD5 = "MD2.5" */
GSS_KRB5_INTEG_C_QOP_DES_MD5 = 0x0002,
GSS_KRB5_INTEG_C_QOP_DES_MAC = 0x0003,
GSS_KRB5_INTEG_C_QOP_HMAC_SHA1 = 0x0004,
GSS_KRB5_INTEG_C_QOP_MASK = 0x00ff,
GSS_KRB5_CONF_C_QOP_DES = 0x0100,
GSS_KRB5_CONF_C_QOP_DES3_KD = 0x0200,
GSS_KRB5_CONF_C_QOP_MASK = 0xff00
};
/** internal types **/
typedef struct _krb5_gss_name_rec {
krb5_principal princ; /* immutable */
char *service; /* immutable */
char *host; /* immutable */
k5_mutex_t lock; /* protects ad_context only for now */
krb5_authdata_context ad_context;
} krb5_gss_name_rec, *krb5_gss_name_t;
typedef struct _krb5_gss_cred_id_rec {
/* protect against simultaneous accesses */
k5_mutex_t lock;
/* name/type of credential */
gss_cred_usage_t usage;
krb5_gss_name_t name;
krb5_principal impersonator;
unsigned int default_identity : 1;
unsigned int iakerb_mech : 1;
unsigned int destroy_ccache : 1;
unsigned int suppress_ci_flags : 1;
/* keytab (accept) data */
krb5_keytab keytab;
krb5_rcache rcache;
/* ccache (init) data */
krb5_ccache ccache;
krb5_keytab client_keytab;
krb5_boolean have_tgt;
krb5_timestamp expire;
krb5_timestamp refresh_time;
krb5_enctype *req_enctypes; /* limit negotiated enctypes to this list */
char *password;
} krb5_gss_cred_id_rec, *krb5_gss_cred_id_t;
typedef struct _krb5_gss_ctx_ext_rec {
struct {
krb5_data *conv;
int verified;
} iakerb;
} krb5_gss_ctx_ext_rec, *krb5_gss_ctx_ext_t;
typedef struct _krb5_gss_ctx_id_rec {
krb5_magic magic;
unsigned int initiate : 1; /* nonzero if initiating, zero if accepting */
unsigned int established : 1;
unsigned int have_acceptor_subkey : 1;
unsigned int seed_init : 1; /* XXX tested but never actually set */
unsigned int terminated : 1;
OM_uint32 gss_flags;
unsigned char seed[16];
krb5_gss_name_t here;
krb5_gss_name_t there;
krb5_key subkey; /* One of two potential keys to use with RFC 4121
* packets; this key must always be set. */
int signalg;
size_t cksum_size;
int sealalg;
krb5_key enc; /* RFC 1964 encryption key; seq xored with a constant
* for DES, seq for other RFC 1964 enctypes */
krb5_key seq; /* RFC 1964 sequencing key */
krb5_ticket_times krb_times;
krb5_flags krb_flags;
/* XXX these used to be signed. the old spec is inspecific, and
the new spec specifies unsigned. I don't believe that the change
affects the wire encoding. */
uint64_t seq_send;
uint64_t seq_recv;
g_seqnum_state seqstate;
krb5_context k5_context;
krb5_auth_context auth_context;
gss_OID_desc *mech_used;
/* Protocol spec revision for sending packets
0 => RFC 1964 with 3DES and RC4 enhancements
1 => RFC 4121
No others defined so far. It is always permitted to receive
tokens in RFC 4121 format. If enc is non-null, receiving RFC
1964 tokens is permitted.*/
int proto;
krb5_cksumtype cksumtype; /* for "main" subkey */
krb5_key acceptor_subkey; /* CFX only */
krb5_cksumtype acceptor_subkey_cksumtype;
int cred_rcache; /* did we get rcache from creds? */
krb5_authdata **authdata;
} krb5_gss_ctx_id_rec, *krb5_gss_ctx_id_t;
extern g_set kg_vdb;
#ifndef LEAN_CLIENT
extern k5_mutex_t gssint_krb5_keytab_lock;
#endif /* LEAN_CLIENT */
/** helper functions **/
OM_uint32 kg_get_defcred
(OM_uint32 *minor_status,
gss_cred_id_t *cred);
krb5_error_code kg_checksum_channel_bindings
(krb5_context context, gss_channel_bindings_t cb,
krb5_checksum *cksum);
krb5_error_code kg_make_seq_num (krb5_context context,
krb5_key key,
int direction, krb5_ui_4 seqnum, unsigned char *cksum,
unsigned char *buf);
krb5_error_code kg_get_seq_num (krb5_context context,
krb5_key key,
unsigned char *cksum, unsigned char *buf, int *direction,
krb5_ui_4 *seqnum);
krb5_error_code kg_make_seed (krb5_context context,
krb5_key key,
unsigned char *seed);
krb5_error_code
kg_setup_keys(krb5_context context,
krb5_gss_ctx_id_rec *ctx,
krb5_key subkey,
krb5_cksumtype *cksumtype);
int kg_confounder_size (krb5_context context, krb5_enctype enctype);
krb5_error_code kg_make_confounder (krb5_context context,
krb5_enctype enctype, unsigned char *buf);
krb5_error_code kg_encrypt (krb5_context context,
krb5_key key, int usage,
krb5_pointer iv,
krb5_const_pointer in,
krb5_pointer out,
unsigned int length);
/* Encrypt length bytes at ptr in place, with the given key and usage. If
* iv is not NULL, use it as the cipher state. */
krb5_error_code kg_encrypt_inplace(krb5_context context, krb5_key key,
int usage, krb5_pointer iv,
krb5_pointer ptr, unsigned int length);
krb5_error_code kg_encrypt_iov (krb5_context context,
int proto, int dce_style,
size_t ec, size_t rrc,
krb5_key key, int usage,
krb5_pointer iv,
gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code
kg_arcfour_docrypt (const krb5_keyblock *keyblock, int usage,
const unsigned char *kd_data, size_t kd_data_len,
const unsigned char *input_buf, size_t input_len,
unsigned char *output_buf);
krb5_error_code
kg_arcfour_docrypt_iov (krb5_context context,
const krb5_keyblock *keyblock, int usage,
const unsigned char *kd_data, size_t kd_data_len,
gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code kg_decrypt (krb5_context context,
krb5_key key, int usage,
krb5_pointer iv,
krb5_const_pointer in,
krb5_pointer out,
unsigned int length);
krb5_error_code kg_decrypt_iov (krb5_context context,
int proto, int dce_style,
size_t ec, size_t rrc,
krb5_key key, int usage,
krb5_pointer iv,
gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 kg_seal (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer,
int toktype);
OM_uint32 kg_unseal (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_token_buffer,
gss_buffer_t message_buffer,
int *conf_state,
gss_qop_t *qop_state,
int toktype);
OM_uint32 kg_seal_size (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 output_size,
OM_uint32 *input_size);
krb5_error_code kg_ctx_size (krb5_context kcontext,
krb5_pointer arg,
size_t *sizep);
krb5_error_code kg_ctx_externalize (krb5_context kcontext,
krb5_pointer arg,
krb5_octet **buffer,
size_t *lenremain);
krb5_error_code kg_ctx_internalize (krb5_context kcontext,
krb5_pointer *argp,
krb5_octet **buffer,
size_t *lenremain);
OM_uint32 kg_sync_ccache_name (krb5_context context, OM_uint32 *minor_status);
OM_uint32 kg_caller_provided_ccache_name (OM_uint32 *minor_status,
int *out_caller_provided_name);
OM_uint32 kg_get_ccache_name (OM_uint32 *minor_status,
const char **out_name);
OM_uint32 kg_set_ccache_name (OM_uint32 *minor_status,
const char *name);
/* AEAD */
krb5_error_code gss_krb5int_make_seal_token_v3_iov(krb5_context context,
krb5_gss_ctx_id_rec *ctx,
int conf_req_flag,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
OM_uint32 gss_krb5int_unseal_v3_iov(krb5_context context,
OM_uint32 *minor_status,
krb5_gss_ctx_id_rec *ctx,
gss_iov_buffer_desc *iov,
int iov_count,
int *conf_state,
gss_qop_t *qop_state,
int toktype);
gss_iov_buffer_t kg_locate_iov (gss_iov_buffer_desc *iov,
int iov_count,
OM_uint32 type);
gss_iov_buffer_t kg_locate_header_iov(gss_iov_buffer_desc *iov, int iov_count,
int toktype);
void kg_iov_msglen(gss_iov_buffer_desc *iov,
int iov_count,
size_t *data_length,
size_t *assoc_data_length);
void kg_release_iov(gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code kg_make_checksum_iov_v1(krb5_context context,
krb5_cksumtype type,
size_t token_cksum_len,
krb5_key seq,
krb5_key enc, /* for conf len */
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype,
krb5_checksum *checksum);
krb5_error_code kg_make_checksum_iov_v3(krb5_context context,
krb5_cksumtype type,
size_t rrc,
krb5_key key,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
krb5_error_code kg_verify_checksum_iov_v3(krb5_context context,
krb5_cksumtype type,
size_t rrc,
krb5_key key,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype,
krb5_boolean *valid);
OM_uint32 kg_seal_iov (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
OM_uint32 kg_unseal_iov (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int *conf_state,
gss_qop_t *qop_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
OM_uint32 kg_seal_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
krb5_cryptotype kg_translate_flag_iov(OM_uint32 type);
OM_uint32 kg_fixup_padding_iov(OM_uint32 *minor_status,
gss_iov_buffer_desc *iov,
int iov_count);
krb5_boolean kg_integ_only_iov(gss_iov_buffer_desc *iov, int iov_count);
krb5_error_code kg_allocate_iov(gss_iov_buffer_t iov, size_t size);
krb5_error_code
krb5_to_gss_cred(krb5_context context,
krb5_creds *creds,
krb5_gss_cred_id_t *out_cred);
krb5_boolean
kg_cred_time_to_refresh(krb5_context context, krb5_gss_cred_id_rec *cred);
void
kg_cred_set_initial_refresh(krb5_context context, krb5_gss_cred_id_rec *cred,
krb5_ticket_times *times);
OM_uint32
kg_cred_resolve(OM_uint32 *minor_status, krb5_context context,
gss_cred_id_t cred_handle, gss_name_t target_name);
/** declarations of internal name mechanism functions **/
OM_uint32 KRB5_CALLCONV krb5_gss_acquire_cred
(OM_uint32*, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t*, /* output_cred_handle */
gss_OID_set*, /* actual_mechs */
OM_uint32* /* time_rec */
);
OM_uint32 KRB5_CALLCONV iakerb_gss_acquire_cred
(OM_uint32*, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t*, /* output_cred_handle */
gss_OID_set*, /* actual_mechs */
OM_uint32* /* time_rec */
);
OM_uint32 KRB5_CALLCONV
krb5_gss_acquire_cred_with_password(
OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
int cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV
iakerb_gss_acquire_cred_with_password(
OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
int cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV krb5_gss_release_cred
(OM_uint32*, /* minor_status */
gss_cred_id_t* /* cred_handle */
);
OM_uint32 KRB5_CALLCONV krb5_gss_init_sec_context
(OM_uint32*, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t*, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID*, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32* /* time_rec */
);
OM_uint32 krb5_gss_init_sec_context_ext
(OM_uint32*, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t*, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID*, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
krb5_gss_ctx_ext_t /* exts */
);
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV krb5_gss_accept_sec_context
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_cred_id_t, /* verifier_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_name_t*, /* src_name */
gss_OID*, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
gss_cred_id_t* /* delegated_cred_handle */
);
OM_uint32 KRB5_CALLCONV krb5_gss_accept_sec_context_ext
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_cred_id_t, /* verifier_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_name_t*, /* src_name */
gss_OID*, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
gss_cred_id_t*, /* delegated_cred_handle */
krb5_gss_ctx_ext_t/*exts */
);
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_sec_context_by_oid
(OM_uint32*, /* minor_status */
const gss_ctx_id_t,
/* context_handle */
const gss_OID, /* desired_object */
gss_buffer_set_t* /* data_set */
);
OM_uint32 KRB5_CALLCONV krb5_gss_set_sec_context_option
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
const gss_OID, /* desired_object */
const gss_buffer_t/* value */
);
OM_uint32 KRB5_CALLCONV krb5_gss_process_context_token
(OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t /* token_buffer */
);
OM_uint32 KRB5_CALLCONV krb5_gss_delete_sec_context
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_buffer_t /* output_token */
);
OM_uint32 KRB5_CALLCONV krb5_gss_context_time
(OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
OM_uint32* /* time_rec */
);
OM_uint32 KRB5_CALLCONV krb5_gss_display_status
(OM_uint32*, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type */
OM_uint32*, /* message_context */
gss_buffer_t /* status_string */
);
OM_uint32 KRB5_CALLCONV krb5_gss_indicate_mechs
(OM_uint32*, /* minor_status */
gss_OID_set* /* mech_set */
);
OM_uint32 KRB5_CALLCONV krb5_gss_compare_name
(OM_uint32*, /* minor_status */
gss_name_t, /* name1 */
gss_name_t, /* name2 */
int* /* name_equal */
);
OM_uint32 KRB5_CALLCONV krb5_gss_display_name
(OM_uint32*, /* minor_status */
gss_name_t, /* input_name */
gss_buffer_t, /* output_name_buffer */
gss_OID* /* output_name_type */
);
OM_uint32 KRB5_CALLCONV krb5_gss_import_name
(OM_uint32*, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type */
gss_name_t* /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_release_name
(OM_uint32*, /* minor_status */
gss_name_t* /* input_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_cred
(OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_name_t *, /* name */
OM_uint32 *, /* lifetime */
gss_cred_usage_t*,/* cred_usage */
gss_OID_set * /* mechanisms */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_context
(OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_name_t*, /* initiator_name */
gss_name_t*, /* acceptor_name */
OM_uint32*, /* lifetime_rec */
gss_OID*, /* mech_type */
OM_uint32*, /* ret_flags */
int*, /* locally_initiated */
int* /* open */
);
/* New V2 entry points */
OM_uint32 KRB5_CALLCONV krb5_gss_get_mic
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_buffer_t, /* message_buffer */
gss_buffer_t /* message_token */
);
OM_uint32 KRB5_CALLCONV krb5_gss_get_mic_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_get_mic_iov_length
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_verify_mic
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* message_buffer */
gss_buffer_t, /* message_token */
gss_qop_t * /* qop_state */
);
OM_uint32 KRB5_CALLCONV krb5_gss_verify_mic_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_message_buffer */
int *, /* conf_state */
gss_buffer_t /* output_message_buffer */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap_iov_length
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_unwrap
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* output_message_buffer */
int *, /* conf_state */
gss_qop_t * /* qop_state */
);
OM_uint32 KRB5_CALLCONV krb5_gss_unwrap_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int *, /* conf_state */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap_size_limit
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
OM_uint32, /* req_output_size */
OM_uint32 * /* max_input_size */
);
OM_uint32 KRB5_CALLCONV krb5_gss_import_name_object
(OM_uint32 *, /* minor_status */
void *, /* input_name */
gss_OID, /* input_name_type */
gss_name_t * /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_export_name_object
(OM_uint32 *, /* minor_status */
gss_name_t, /* input_name */
gss_OID, /* desired_name_type */
void * * /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_cred_by_mech
(OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_OID, /* mech_type */
gss_name_t *, /* name */
OM_uint32 *, /* initiator_lifetime */
OM_uint32 *, /* acceptor_lifetime */
gss_cred_usage_t * /* cred_usage */
);
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV krb5_gss_export_sec_context
(OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_buffer_t /* interprocess_token */
);
OM_uint32 KRB5_CALLCONV krb5_gss_import_sec_context
(OM_uint32 *, /* minor_status */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t * /* context_handle */
);
#endif /* LEAN_CLIENT */
krb5_error_code krb5_gss_ser_init(krb5_context);
OM_uint32 krb5_gss_release_oid
(OM_uint32 *, /* minor_status */
gss_OID * /* oid */
);
OM_uint32 KRB5_CALLCONV krb5_gss_internal_release_oid
(OM_uint32 *, /* minor_status */
gss_OID * /* oid */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_names_for_mech
(OM_uint32 *, /* minor_status */
gss_OID, /* mechanism */
gss_OID_set * /* name_types */
);
OM_uint32 krb5_gss_canonicalize_name
(OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
const gss_OID, /* mech_type */
gss_name_t * /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_export_name
(OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_buffer_t /* exported_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_duplicate_name
(OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_name_t * /* dest_name */
);
OM_uint32 krb5_gss_validate_cred
(OM_uint32 *, /* minor_status */
gss_cred_id_t /* cred */
);
OM_uint32 KRB5_CALLCONV krb5_gss_acquire_cred_impersonate_name(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* impersonator_cred_handle */
const gss_name_t, /* desired_name */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *); /* time_rec */
OM_uint32
krb5_gss_validate_cred_1(OM_uint32 * /* minor_status */,
gss_cred_id_t /* cred_handle */,
krb5_context /* context */);
gss_OID krb5_gss_convert_static_mech_oid(gss_OID oid);
krb5_error_code gss_krb5int_make_seal_token_v3(krb5_context,
krb5_gss_ctx_id_rec *,
const gss_buffer_desc *,
gss_buffer_t,
int, int);
OM_uint32 gss_krb5int_unseal_token_v3(krb5_context *contextptr,
OM_uint32 *minor_status,
krb5_gss_ctx_id_rec *ctx,
unsigned char *ptr,
unsigned int bodysize,
gss_buffer_t message_buffer,
int *conf_state, gss_qop_t *qop_state,
int toktype);
int gss_krb5int_rotate_left (void *ptr, size_t bufsiz, size_t rc);
/* naming_exts.c */
#define KG_INIT_NAME_NO_COPY 0x1
krb5_error_code
kg_init_name(krb5_context context, krb5_principal principal,
char *service, char *host, krb5_authdata_context ad_context,
krb5_flags flags, krb5_gss_name_t *name);
krb5_error_code
kg_release_name(krb5_context context, krb5_gss_name_t *name);
krb5_error_code
kg_duplicate_name(krb5_context context, const krb5_gss_name_t src,
krb5_gss_name_t *dst);
krb5_boolean
kg_compare_name(krb5_context context,
krb5_gss_name_t name1,
krb5_gss_name_t name2);
krb5_boolean
kg_acceptor_princ(krb5_context context, krb5_gss_name_t name,
krb5_principal *princ_out);
OM_uint32 KRB5_CALLCONV
krb5_gss_display_name_ext(OM_uint32 *minor_status,
gss_name_t name,
gss_OID display_as_name_type,
gss_buffer_t display_name);
OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_name(OM_uint32 *minor_status,
gss_name_t name,
int *name_is_MN,
gss_OID *MN_mech,
gss_buffer_set_t *attrs);
OM_uint32 KRB5_CALLCONV
krb5_gss_get_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr,
int *authenticated,
int *complete,
gss_buffer_t value,
gss_buffer_t display_value,
int *more);
OM_uint32 KRB5_CALLCONV
krb5_gss_set_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
int complete,
gss_buffer_t attr,
gss_buffer_t value);
OM_uint32 KRB5_CALLCONV
krb5_gss_delete_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr);
OM_uint32 KRB5_CALLCONV
krb5_gss_export_name_composite(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t exp_composite_name);
OM_uint32 KRB5_CALLCONV
krb5_gss_map_name_to_any(OM_uint32 *minor_status,
gss_name_t name,
int authenticated,
gss_buffer_t type_id,
gss_any_t *output);
OM_uint32 KRB5_CALLCONV
krb5_gss_release_any_name_mapping(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t type_id,
gss_any_t *input);
OM_uint32 KRB5_CALLCONV
krb5_gss_pseudo_random(OM_uint32 *minor_status,
gss_ctx_id_t context,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out);
OM_uint32 KRB5_CALLCONV
krb5_gss_store_cred(OM_uint32 *minor_status,
gss_cred_id_t input_cred_handle,
gss_cred_usage_t cred_usage,
const gss_OID desired_mech,
OM_uint32 overwrite_cred,
OM_uint32 default_cred,
gss_OID_set *elements_stored,
gss_cred_usage_t *cred_usage_stored);
/* s4u_gss_glue.c */
OM_uint32
kg_compose_deleg_cred(OM_uint32 *minor_status,
krb5_gss_cred_id_t impersonator_cred,
krb5_creds *subject_creds,
OM_uint32 time_req,
krb5_gss_cred_id_t *output_cred,
OM_uint32 *time_rec,
krb5_context context);
/*
* These take unglued krb5-mech-specific contexts.
*/
#define GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH 11
#define GSS_KRB5_GET_TKT_FLAGS_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x01"
OM_uint32 gss_krb5int_get_tkt_flags
(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set);
#define GSS_KRB5_COPY_CCACHE_OID_LENGTH 11
#define GSS_KRB5_COPY_CCACHE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x02"
OM_uint32 gss_krb5int_copy_ccache
(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_oid,
const gss_buffer_t value);
#define GSS_KRB5_CCACHE_NAME_OID_LENGTH 11
#define GSS_KRB5_CCACHE_NAME_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x03"
struct krb5_gss_ccache_name_req {
const char *name;
const char **out_name;
};
OM_uint32
gss_krb5int_ccache_name(OM_uint32 *minor_status, const gss_OID, const gss_OID,
const gss_buffer_t);
#define GSS_KRB5_INQ_SSPI_SESSION_KEY_OID_LENGTH 11
#define GSS_KRB5_INQ_SSPI_SESSION_KEY_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x05"
OM_uint32
gss_krb5int_inq_session_key(OM_uint32 *, const gss_ctx_id_t, const gss_OID, gss_buffer_set_t *);
#define GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH 11
#define GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x04"
struct krb5_gss_set_allowable_enctypes_req {
OM_uint32 num_ktypes;
krb5_enctype *ktypes;
};
OM_uint32
gss_krb5int_set_allowable_enctypes(OM_uint32 *minor_status,
gss_cred_id_t *cred,
const gss_OID desired_oid,
const gss_buffer_t value);
#define GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x06"
OM_uint32
gss_krb5int_export_lucid_sec_context(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set);
#define GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x07"
OM_uint32
gss_krb5int_free_lucid_sec_context(OM_uint32 *, const gss_OID,
const gss_OID, gss_buffer_t);
extern k5_mutex_t kg_kdc_flag_mutex;
krb5_error_code krb5_gss_init_context (krb5_context *ctxp);
#define GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_USE_KDC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x08"
OM_uint32 krb5int_gss_use_kdc_context(OM_uint32 *, const gss_OID,
const gss_OID, gss_buffer_t);
krb5_error_code krb5_gss_use_kdc_context(void);
#define GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH 11
#define GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x09"
OM_uint32
gss_krb5int_register_acceptor_identity(OM_uint32 *, const gss_OID, const gss_OID, gss_buffer_t);
#define GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0a"
OM_uint32
gss_krb5int_extract_authz_data_from_sec_context(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *ad_data);
#define GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH 11
#define GSS_KRB5_SET_CRED_RCACHE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0b"
OM_uint32
gss_krb5int_set_cred_rcache(OM_uint32 *, gss_cred_id_t *, const gss_OID, const gss_buffer_t);
#define GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0c"
OM_uint32
gss_krb5int_extract_authtime_from_sec_context(OM_uint32 *,
const gss_ctx_id_t,
const gss_OID,
gss_buffer_set_t *);
#define GSS_KRB5_IMPORT_CRED_OID_LENGTH 11
#define GSS_KRB5_IMPORT_CRED_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0d"
struct krb5_gss_import_cred_req {
krb5_ccache id;
krb5_principal keytab_principal;
krb5_keytab keytab;
};
OM_uint32
gss_krb5int_import_cred(OM_uint32 *minor_status,
gss_cred_id_t *cred,
const gss_OID desired_oid,
const gss_buffer_t value);
#ifdef _GSS_STATIC_LINK
int gss_krb5int_lib_init(void);
void gss_krb5int_lib_fini(void);
#endif /* _GSS_STATIC_LINK */
OM_uint32 gss_krb5int_initialize_library(void);
void gss_krb5int_cleanup_library(void);
/* For error message handling. */
/* Returns a shared string, not a private copy! */
extern char *
krb5_gss_get_error_message(OM_uint32 minor_code);
extern void
krb5_gss_save_error_string(OM_uint32 minor_code, char *msg);
extern void
krb5_gss_save_error_message(OM_uint32 minor_code, const char *format, ...)
#if !defined(__cplusplus) && (__GNUC__ > 2)
__attribute__((__format__(__printf__, 2, 3)))
#endif
;
extern void
krb5_gss_save_error_info(OM_uint32 minor_code, krb5_context ctx);
#define get_error_message krb5_gss_get_error_message
#define save_error_string krb5_gss_save_error_string
#define save_error_message krb5_gss_save_error_message
#ifdef KRB5_KERNEL
/* Error messages aren't needed in the kernel, so reduce dependencies. */
#define save_error_info(x,y)
#else
#define save_error_info krb5_gss_save_error_info
#endif
extern void krb5_gss_delete_error_info(void *p);
/* Prefix concatenated with Kerberos encryption type */
#define GSS_KRB5_SESSION_KEY_ENCTYPE_OID_LENGTH 10
#define GSS_KRB5_SESSION_KEY_ENCTYPE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x04"
/* IAKERB */
OM_uint32 KRB5_CALLCONV
iakerb_gss_init_sec_context(OM_uint32 *minor_status,
gss_cred_id_t claimant_cred_handle,
gss_ctx_id_t *context_handle,
gss_name_t target_name,
gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_channel_bindings_t input_chan_bindings,
gss_buffer_t input_token,
gss_OID *actual_mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV
iakerb_gss_accept_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handler,
gss_cred_id_t verifier_cred_handle,
gss_buffer_t input_token,
gss_channel_bindings_t input_chan_bindings,
gss_name_t *src_name,
gss_OID *mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle);
OM_uint32 KRB5_CALLCONV
iakerb_gss_delete_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t output_token);
krb5_error_code
iakerb_make_finished(krb5_context context,
krb5_key key,
const krb5_data *conv,
krb5_data **finished);
krb5_error_code
iakerb_verify_finished(krb5_context context,
krb5_key key,
const krb5_data *conv,
const krb5_data *finished);
/*
* Transfer contents of a krb5_data to a gss_buffer and invalidate the source
* On unix, this is a simple pointer copy
* On windows, memory is reallocated and copied.
*/
static inline krb5_error_code
data_to_gss(krb5_data *input_k5data, gss_buffer_t output_buffer)
{
krb5_error_code code = 0;
output_buffer->length = input_k5data->length;
#if defined(_WIN32) || defined(DEBUG_GSSALLOC)
if (output_buffer->length > 0) {
output_buffer->value = gssalloc_malloc(output_buffer->length);
if (output_buffer->value)
memcpy(output_buffer->value, input_k5data->data, output_buffer->length);
else
code = ENOMEM;
} else {
output_buffer->value = NULL;
}
free(input_k5data->data);
#else
output_buffer->value = input_k5data->data;
#endif
*input_k5data = empty_data();
return code;
}
#define KRB5_GSS_EXTS_IAKERB_FINISHED 1
/* Credential store extensions */
#define KRB5_CS_CLI_KEYTAB_URN "client_keytab"
#define KRB5_CS_KEYTAB_URN "keytab"
#define KRB5_CS_CCACHE_URN "ccache"
#define KRB5_CS_RCACHE_URN "rcache"
OM_uint32
kg_value_from_cred_store(gss_const_key_value_set_t cred_store,
const char *type, const char **value);
OM_uint32 KRB5_CALLCONV
krb5_gss_acquire_cred_from(
OM_uint32 *, /* minor_status */
const gss_name_t, /* desired_name */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_const_key_value_set_t, /* cred_store */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *); /* time_rec */
OM_uint32 KRB5_CALLCONV
krb5_gss_store_cred_into(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_cred_usage_t, /* input_usage */
const gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_const_key_value_set_t, /* cred_store */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t *); /* cred_usage_stored */
OM_uint32 KRB5_CALLCONV
krb5_gss_export_cred(OM_uint32 *minor_status, gss_cred_id_t cred_handle,
gss_buffer_t token);
OM_uint32 KRB5_CALLCONV
krb5_gss_import_cred(OM_uint32 *minor_status, gss_buffer_t token,
gss_cred_id_t *cred_handle);
OM_uint32 KRB5_CALLCONV
iakerb_gss_process_context_token(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer);
OM_uint32 KRB5_CALLCONV
iakerb_gss_context_time(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV
iakerb_gss_inquire_context(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_name_t *src_name,
gss_name_t *targ_name, OM_uint32 *lifetime_rec,
gss_OID *mech_type, OM_uint32 *ctx_flags,
int *locally_initiated, int *opened);
OM_uint32 KRB5_CALLCONV
iakerb_gss_get_mic(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_buffer_t message_buffer,
gss_buffer_t message_token);
OM_uint32 KRB5_CALLCONV
iakerb_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_get_mic_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_qop_t qop_req,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_verify_mic(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_buffer_t msg_buffer, gss_buffer_t token_buffer,
gss_qop_t *qop_state);
OM_uint32 KRB5_CALLCONV
iakerb_gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int conf_req_flag, gss_qop_t qop_req,
gss_buffer_t input_message_buffer, int *conf_state,
gss_buffer_t output_message_buffer);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int conf_req_flag, gss_qop_t qop_req, int *conf_state,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, int conf_req_flag,
gss_qop_t qop_req, int *conf_state,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_unwrap(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t output_message_buffer, int *conf_state,
gss_qop_t *qop_state);
OM_uint32 KRB5_CALLCONV
iakerb_gss_unwrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int *conf_state, gss_qop_t *qop_state,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap_size_limit(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, int conf_req_flag,
gss_qop_t qop_req, OM_uint32 req_output_size,
OM_uint32 *max_input_size);
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV
iakerb_gss_export_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token);
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV
iakerb_gss_inquire_sec_context_by_oid(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set);
OM_uint32 KRB5_CALLCONV
iakerb_gss_set_sec_context_option(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value);
OM_uint32 KRB5_CALLCONV
iakerb_gss_pseudo_random(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int prf_key, const gss_buffer_t prf_in,
ssize_t desired_output_len, gss_buffer_t prf_out);
/* Magic string to identify exported krb5 GSS credentials. Increment this if
* the format changes. */
#define CRED_EXPORT_MAGIC "K5C1"
#endif /* _GSSAPIP_KRB5_H_ */
|
/* -*- mode: c; indent-tabs-mode: nil -*- */
/*
* Copyright 2000, 2008 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
*/
/*
* Copyright 1993 by OpenVision Technologies, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software
* and its documentation for any purpose is hereby granted without fee,
* provided that the above copyright notice appears in all copies and
* that both that copyright notice and this permission notice appear in
* supporting documentation, and that the name of OpenVision not be used
* in advertising or publicity pertaining to distribution of the software
* without specific, written prior permission. OpenVision makes no
* representations about the suitability of this software for any
* purpose. It is provided "as is" without express or implied warranty.
*
* OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
* USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef _GSSAPIP_KRB5_H_
#define _GSSAPIP_KRB5_H_
#include <k5-int.h>
#ifdef HAVE_MEMORY_H
#include <memory.h>
#endif
/* work around sunos braindamage */
#ifdef major
#undef major
#endif
#ifdef minor
#undef minor
#endif
#include "gssapiP_generic.h"
/* The include of gssapi_krb5.h will dtrt with the above #defines in
* effect.
*/
#include "gssapi_krb5.h"
#include "gssapi_err_krb5.h"
#include "gssapi_ext.h"
/* for debugging */
#undef CFX_EXERCISE
/** constants **/
#define GSS_MECH_KRB5_OID_LENGTH 9
#define GSS_MECH_KRB5_OID "\052\206\110\206\367\022\001\002\002"
#define GSS_MECH_KRB5_OLD_OID_LENGTH 5
#define GSS_MECH_KRB5_OLD_OID "\053\005\001\005\002"
/* Incorrect krb5 mech OID emitted by MS. */
#define GSS_MECH_KRB5_WRONG_OID_LENGTH 9
#define GSS_MECH_KRB5_WRONG_OID "\052\206\110\202\367\022\001\002\002"
/* IAKERB variant */
#define GSS_MECH_IAKERB_OID_LENGTH 6
#define GSS_MECH_IAKERB_OID "\053\006\001\005\002\005"
extern const gss_OID_set_desc * const kg_all_mechs;
#define CKSUMTYPE_KG_CB 0x8003
#define KG_TOK_CTX_AP_REQ 0x0100
#define KG_TOK_CTX_AP_REP 0x0200
#define KG_TOK_CTX_ERROR 0x0300
#define KG_TOK_SIGN_MSG 0x0101
#define KG_TOK_SEAL_MSG 0x0201
#define KG_TOK_MIC_MSG 0x0101
#define KG_TOK_WRAP_MSG 0x0201
#define KG_TOK_DEL_CTX 0x0102
#define KG2_TOK_MIC_MSG 0x0404
#define KG2_TOK_WRAP_MSG 0x0504
#define KG2_TOK_DEL_CTX 0x0405
#define IAKERB_TOK_PROXY 0x0501
#define KRB5_GSS_FOR_CREDS_OPTION 1
#define KG2_RESP_FLAG_ERROR 0x0001
#define KG2_RESP_FLAG_DELEG_OK 0x0002
/** CFX flags **/
#define FLAG_SENDER_IS_ACCEPTOR 0x01
#define FLAG_WRAP_CONFIDENTIAL 0x02
#define FLAG_ACCEPTOR_SUBKEY 0x04
/* These are to be stored in little-endian order, i.e., des-mac is
stored as 02 00. */
enum sgn_alg {
SGN_ALG_DES_MAC_MD5 = 0x0000,
SGN_ALG_MD2_5 = 0x0001,
SGN_ALG_DES_MAC = 0x0002,
SGN_ALG_3 = 0x0003, /* not published */
SGN_ALG_HMAC_MD5 = 0x0011, /* microsoft w2k; */
SGN_ALG_HMAC_SHA1_DES3_KD = 0x0004
};
enum seal_alg {
SEAL_ALG_NONE = 0xffff,
SEAL_ALG_DES = 0x0000,
SEAL_ALG_1 = 0x0001, /* not published */
SEAL_ALG_MICROSOFT_RC4 = 0x0010, /* microsoft w2k; */
SEAL_ALG_DES3KD = 0x0002
};
/* for 3DES */
#define KG_USAGE_SEAL 22
#define KG_USAGE_SIGN 23
#define KG_USAGE_SEQ 24
/* for draft-ietf-krb-wg-gssapi-cfx-01 */
#define KG_USAGE_ACCEPTOR_SEAL 22
#define KG_USAGE_ACCEPTOR_SIGN 23
#define KG_USAGE_INITIATOR_SEAL 24
#define KG_USAGE_INITIATOR_SIGN 25
enum qop {
GSS_KRB5_INTEG_C_QOP_MD5 = 0x0001, /* *partial* MD5 = "MD2.5" */
GSS_KRB5_INTEG_C_QOP_DES_MD5 = 0x0002,
GSS_KRB5_INTEG_C_QOP_DES_MAC = 0x0003,
GSS_KRB5_INTEG_C_QOP_HMAC_SHA1 = 0x0004,
GSS_KRB5_INTEG_C_QOP_MASK = 0x00ff,
GSS_KRB5_CONF_C_QOP_DES = 0x0100,
GSS_KRB5_CONF_C_QOP_DES3_KD = 0x0200,
GSS_KRB5_CONF_C_QOP_MASK = 0xff00
};
/** internal types **/
typedef struct _krb5_gss_name_rec {
krb5_principal princ; /* immutable */
char *service; /* immutable */
char *host; /* immutable */
k5_mutex_t lock; /* protects ad_context only for now */
krb5_authdata_context ad_context;
} krb5_gss_name_rec, *krb5_gss_name_t;
typedef struct _krb5_gss_cred_id_rec {
/* protect against simultaneous accesses */
k5_mutex_t lock;
/* name/type of credential */
gss_cred_usage_t usage;
krb5_gss_name_t name;
krb5_principal impersonator;
unsigned int default_identity : 1;
unsigned int iakerb_mech : 1;
unsigned int destroy_ccache : 1;
unsigned int suppress_ci_flags : 1;
/* keytab (accept) data */
krb5_keytab keytab;
krb5_rcache rcache;
/* ccache (init) data */
krb5_ccache ccache;
krb5_keytab client_keytab;
krb5_boolean have_tgt;
krb5_timestamp expire;
krb5_timestamp refresh_time;
krb5_enctype *req_enctypes; /* limit negotiated enctypes to this list */
char *password;
} krb5_gss_cred_id_rec, *krb5_gss_cred_id_t;
typedef struct _krb5_gss_ctx_ext_rec {
struct {
krb5_data *conv;
int verified;
} iakerb;
} krb5_gss_ctx_ext_rec, *krb5_gss_ctx_ext_t;
typedef struct _krb5_gss_ctx_id_rec {
krb5_magic magic;
unsigned int initiate : 1; /* nonzero if initiating, zero if accepting */
unsigned int established : 1;
unsigned int have_acceptor_subkey : 1;
unsigned int seed_init : 1; /* XXX tested but never actually set */
unsigned int terminated : 1;
OM_uint32 gss_flags;
unsigned char seed[16];
krb5_gss_name_t here;
krb5_gss_name_t there;
krb5_key subkey; /* One of two potential keys to use with RFC 4121
* packets; this key must always be set. */
int signalg;
size_t cksum_size;
int sealalg;
krb5_key enc; /* RFC 1964 encryption key; seq xored with a constant
* for DES, seq for other RFC 1964 enctypes */
krb5_key seq; /* RFC 1964 sequencing key */
krb5_ticket_times krb_times;
krb5_flags krb_flags;
/* XXX these used to be signed. the old spec is inspecific, and
the new spec specifies unsigned. I don't believe that the change
affects the wire encoding. */
uint64_t seq_send;
uint64_t seq_recv;
g_seqnum_state seqstate;
krb5_context k5_context;
krb5_auth_context auth_context;
gss_OID_desc *mech_used;
/* Protocol spec revision for sending packets
0 => RFC 1964 with 3DES and RC4 enhancements
1 => RFC 4121
No others defined so far. It is always permitted to receive
tokens in RFC 4121 format. If enc is non-null, receiving RFC
1964 tokens is permitted.*/
int proto;
krb5_cksumtype cksumtype; /* for "main" subkey */
krb5_key acceptor_subkey; /* CFX only */
krb5_cksumtype acceptor_subkey_cksumtype;
int cred_rcache; /* did we get rcache from creds? */
krb5_authdata **authdata;
} krb5_gss_ctx_id_rec, *krb5_gss_ctx_id_t;
extern g_set kg_vdb;
#ifndef LEAN_CLIENT
extern k5_mutex_t gssint_krb5_keytab_lock;
#endif /* LEAN_CLIENT */
/** helper functions **/
OM_uint32 kg_get_defcred
(OM_uint32 *minor_status,
gss_cred_id_t *cred);
krb5_error_code kg_checksum_channel_bindings
(krb5_context context, gss_channel_bindings_t cb,
krb5_checksum *cksum);
krb5_error_code kg_make_seq_num (krb5_context context,
krb5_key key,
int direction, krb5_ui_4 seqnum, unsigned char *cksum,
unsigned char *buf);
krb5_error_code kg_get_seq_num (krb5_context context,
krb5_key key,
unsigned char *cksum, unsigned char *buf, int *direction,
krb5_ui_4 *seqnum);
krb5_error_code kg_make_seed (krb5_context context,
krb5_key key,
unsigned char *seed);
krb5_error_code
kg_setup_keys(krb5_context context,
krb5_gss_ctx_id_rec *ctx,
krb5_key subkey,
krb5_cksumtype *cksumtype);
int kg_confounder_size (krb5_context context, krb5_enctype enctype);
krb5_error_code kg_make_confounder (krb5_context context,
krb5_enctype enctype, unsigned char *buf);
krb5_error_code kg_encrypt (krb5_context context,
krb5_key key, int usage,
krb5_pointer iv,
krb5_const_pointer in,
krb5_pointer out,
unsigned int length);
/* Encrypt length bytes at ptr in place, with the given key and usage. If
* iv is not NULL, use it as the cipher state. */
krb5_error_code kg_encrypt_inplace(krb5_context context, krb5_key key,
int usage, krb5_pointer iv,
krb5_pointer ptr, unsigned int length);
krb5_error_code kg_encrypt_iov (krb5_context context,
int proto, int dce_style,
size_t ec, size_t rrc,
krb5_key key, int usage,
krb5_pointer iv,
gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code
kg_arcfour_docrypt (const krb5_keyblock *keyblock, int usage,
const unsigned char *kd_data, size_t kd_data_len,
const unsigned char *input_buf, size_t input_len,
unsigned char *output_buf);
krb5_error_code
kg_arcfour_docrypt_iov (krb5_context context,
const krb5_keyblock *keyblock, int usage,
const unsigned char *kd_data, size_t kd_data_len,
gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code kg_decrypt (krb5_context context,
krb5_key key, int usage,
krb5_pointer iv,
krb5_const_pointer in,
krb5_pointer out,
unsigned int length);
krb5_error_code kg_decrypt_iov (krb5_context context,
int proto, int dce_style,
size_t ec, size_t rrc,
krb5_key key, int usage,
krb5_pointer iv,
gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 kg_seal (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
gss_buffer_t input_message_buffer,
int *conf_state,
gss_buffer_t output_message_buffer,
int toktype);
OM_uint32 kg_unseal (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
gss_buffer_t input_token_buffer,
gss_buffer_t message_buffer,
int *conf_state,
gss_qop_t *qop_state,
int toktype);
OM_uint32 kg_seal_size (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
OM_uint32 output_size,
OM_uint32 *input_size);
krb5_error_code kg_ctx_size (krb5_context kcontext,
krb5_pointer arg,
size_t *sizep);
krb5_error_code kg_ctx_externalize (krb5_context kcontext,
krb5_pointer arg,
krb5_octet **buffer,
size_t *lenremain);
krb5_error_code kg_ctx_internalize (krb5_context kcontext,
krb5_pointer *argp,
krb5_octet **buffer,
size_t *lenremain);
OM_uint32 kg_sync_ccache_name (krb5_context context, OM_uint32 *minor_status);
OM_uint32 kg_caller_provided_ccache_name (OM_uint32 *minor_status,
int *out_caller_provided_name);
OM_uint32 kg_get_ccache_name (OM_uint32 *minor_status,
const char **out_name);
OM_uint32 kg_set_ccache_name (OM_uint32 *minor_status,
const char *name);
/* AEAD */
krb5_error_code gss_krb5int_make_seal_token_v3_iov(krb5_context context,
krb5_gss_ctx_id_rec *ctx,
int conf_req_flag,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
OM_uint32 gss_krb5int_unseal_v3_iov(krb5_context context,
OM_uint32 *minor_status,
krb5_gss_ctx_id_rec *ctx,
gss_iov_buffer_desc *iov,
int iov_count,
int *conf_state,
gss_qop_t *qop_state,
int toktype);
gss_iov_buffer_t kg_locate_iov (gss_iov_buffer_desc *iov,
int iov_count,
OM_uint32 type);
gss_iov_buffer_t kg_locate_header_iov(gss_iov_buffer_desc *iov, int iov_count,
int toktype);
void kg_iov_msglen(gss_iov_buffer_desc *iov,
int iov_count,
size_t *data_length,
size_t *assoc_data_length);
void kg_release_iov(gss_iov_buffer_desc *iov,
int iov_count);
krb5_error_code kg_make_checksum_iov_v1(krb5_context context,
krb5_cksumtype type,
size_t token_cksum_len,
krb5_key seq,
krb5_key enc, /* for conf len */
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype,
krb5_checksum *checksum);
krb5_error_code kg_make_checksum_iov_v3(krb5_context context,
krb5_cksumtype type,
size_t rrc,
krb5_key key,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
krb5_error_code kg_verify_checksum_iov_v3(krb5_context context,
krb5_cksumtype type,
size_t rrc,
krb5_key key,
krb5_keyusage sign_usage,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype,
krb5_boolean *valid);
OM_uint32 kg_seal_iov (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
OM_uint32 kg_unseal_iov (OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int *conf_state,
gss_qop_t *qop_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
OM_uint32 kg_seal_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle,
int conf_req_flag,
gss_qop_t qop_req,
int *conf_state,
gss_iov_buffer_desc *iov,
int iov_count,
int toktype);
krb5_cryptotype kg_translate_flag_iov(OM_uint32 type);
OM_uint32 kg_fixup_padding_iov(OM_uint32 *minor_status,
gss_iov_buffer_desc *iov,
int iov_count);
krb5_boolean kg_integ_only_iov(gss_iov_buffer_desc *iov, int iov_count);
krb5_error_code kg_allocate_iov(gss_iov_buffer_t iov, size_t size);
krb5_error_code
krb5_to_gss_cred(krb5_context context,
krb5_creds *creds,
krb5_gss_cred_id_t *out_cred);
krb5_boolean
kg_cred_time_to_refresh(krb5_context context, krb5_gss_cred_id_rec *cred);
void
kg_cred_set_initial_refresh(krb5_context context, krb5_gss_cred_id_rec *cred,
krb5_ticket_times *times);
OM_uint32
kg_cred_resolve(OM_uint32 *minor_status, krb5_context context,
gss_cred_id_t cred_handle, gss_name_t target_name);
/** declarations of internal name mechanism functions **/
OM_uint32 KRB5_CALLCONV krb5_gss_acquire_cred
(OM_uint32*, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t*, /* output_cred_handle */
gss_OID_set*, /* actual_mechs */
OM_uint32* /* time_rec */
);
OM_uint32 KRB5_CALLCONV iakerb_gss_acquire_cred
(OM_uint32*, /* minor_status */
gss_name_t, /* desired_name */
OM_uint32, /* time_req */
gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t*, /* output_cred_handle */
gss_OID_set*, /* actual_mechs */
OM_uint32* /* time_rec */
);
OM_uint32 KRB5_CALLCONV
krb5_gss_acquire_cred_with_password(
OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
int cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV
iakerb_gss_acquire_cred_with_password(
OM_uint32 *minor_status,
const gss_name_t desired_name,
const gss_buffer_t password,
OM_uint32 time_req,
const gss_OID_set desired_mechs,
int cred_usage,
gss_cred_id_t *output_cred_handle,
gss_OID_set *actual_mechs,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV krb5_gss_release_cred
(OM_uint32*, /* minor_status */
gss_cred_id_t* /* cred_handle */
);
OM_uint32 KRB5_CALLCONV krb5_gss_init_sec_context
(OM_uint32*, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t*, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID*, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32* /* time_rec */
);
OM_uint32 krb5_gss_init_sec_context_ext
(OM_uint32*, /* minor_status */
gss_cred_id_t, /* claimant_cred_handle */
gss_ctx_id_t*, /* context_handle */
gss_name_t, /* target_name */
gss_OID, /* mech_type */
OM_uint32, /* req_flags */
OM_uint32, /* time_req */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_buffer_t, /* input_token */
gss_OID*, /* actual_mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
krb5_gss_ctx_ext_t /* exts */
);
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV krb5_gss_accept_sec_context
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_cred_id_t, /* verifier_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_name_t*, /* src_name */
gss_OID*, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
gss_cred_id_t* /* delegated_cred_handle */
);
OM_uint32 KRB5_CALLCONV krb5_gss_accept_sec_context_ext
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_cred_id_t, /* verifier_cred_handle */
gss_buffer_t, /* input_token_buffer */
gss_channel_bindings_t,
/* input_chan_bindings */
gss_name_t*, /* src_name */
gss_OID*, /* mech_type */
gss_buffer_t, /* output_token */
OM_uint32*, /* ret_flags */
OM_uint32*, /* time_rec */
gss_cred_id_t*, /* delegated_cred_handle */
krb5_gss_ctx_ext_t/*exts */
);
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_sec_context_by_oid
(OM_uint32*, /* minor_status */
const gss_ctx_id_t,
/* context_handle */
const gss_OID, /* desired_object */
gss_buffer_set_t* /* data_set */
);
OM_uint32 KRB5_CALLCONV krb5_gss_set_sec_context_option
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
const gss_OID, /* desired_object */
const gss_buffer_t/* value */
);
OM_uint32 KRB5_CALLCONV krb5_gss_process_context_token
(OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t /* token_buffer */
);
OM_uint32 KRB5_CALLCONV krb5_gss_delete_sec_context
(OM_uint32*, /* minor_status */
gss_ctx_id_t*, /* context_handle */
gss_buffer_t /* output_token */
);
OM_uint32 KRB5_CALLCONV krb5_gss_context_time
(OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
OM_uint32* /* time_rec */
);
OM_uint32 KRB5_CALLCONV krb5_gss_display_status
(OM_uint32*, /* minor_status */
OM_uint32, /* status_value */
int, /* status_type */
gss_OID, /* mech_type */
OM_uint32*, /* message_context */
gss_buffer_t /* status_string */
);
OM_uint32 KRB5_CALLCONV krb5_gss_indicate_mechs
(OM_uint32*, /* minor_status */
gss_OID_set* /* mech_set */
);
OM_uint32 KRB5_CALLCONV krb5_gss_compare_name
(OM_uint32*, /* minor_status */
gss_name_t, /* name1 */
gss_name_t, /* name2 */
int* /* name_equal */
);
OM_uint32 KRB5_CALLCONV krb5_gss_display_name
(OM_uint32*, /* minor_status */
gss_name_t, /* input_name */
gss_buffer_t, /* output_name_buffer */
gss_OID* /* output_name_type */
);
OM_uint32 KRB5_CALLCONV krb5_gss_import_name
(OM_uint32*, /* minor_status */
gss_buffer_t, /* input_name_buffer */
gss_OID, /* input_name_type */
gss_name_t* /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_release_name
(OM_uint32*, /* minor_status */
gss_name_t* /* input_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_cred
(OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_name_t *, /* name */
OM_uint32 *, /* lifetime */
gss_cred_usage_t*,/* cred_usage */
gss_OID_set * /* mechanisms */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_context
(OM_uint32*, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_name_t*, /* initiator_name */
gss_name_t*, /* acceptor_name */
OM_uint32*, /* lifetime_rec */
gss_OID*, /* mech_type */
OM_uint32*, /* ret_flags */
int*, /* locally_initiated */
int* /* open */
);
/* New V2 entry points */
OM_uint32 KRB5_CALLCONV krb5_gss_get_mic
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_buffer_t, /* message_buffer */
gss_buffer_t /* message_token */
);
OM_uint32 KRB5_CALLCONV krb5_gss_get_mic_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_get_mic_iov_length
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t, /* qop_req */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_verify_mic
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* message_buffer */
gss_buffer_t, /* message_token */
gss_qop_t * /* qop_state */
);
OM_uint32 KRB5_CALLCONV krb5_gss_verify_mic_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
gss_buffer_t, /* input_message_buffer */
int *, /* conf_state */
gss_buffer_t /* output_message_buffer */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap_iov_length
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
int *, /* conf_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_unwrap
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
gss_buffer_t, /* input_message_buffer */
gss_buffer_t, /* output_message_buffer */
int *, /* conf_state */
gss_qop_t * /* qop_state */
);
OM_uint32 KRB5_CALLCONV krb5_gss_unwrap_iov
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int *, /* conf_state */
gss_qop_t *, /* qop_state */
gss_iov_buffer_desc *, /* iov */
int /* iov_count */
);
OM_uint32 KRB5_CALLCONV krb5_gss_wrap_size_limit
(OM_uint32 *, /* minor_status */
gss_ctx_id_t, /* context_handle */
int, /* conf_req_flag */
gss_qop_t, /* qop_req */
OM_uint32, /* req_output_size */
OM_uint32 * /* max_input_size */
);
OM_uint32 KRB5_CALLCONV krb5_gss_import_name_object
(OM_uint32 *, /* minor_status */
void *, /* input_name */
gss_OID, /* input_name_type */
gss_name_t * /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_export_name_object
(OM_uint32 *, /* minor_status */
gss_name_t, /* input_name */
gss_OID, /* desired_name_type */
void * * /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_cred_by_mech
(OM_uint32 *, /* minor_status */
gss_cred_id_t, /* cred_handle */
gss_OID, /* mech_type */
gss_name_t *, /* name */
OM_uint32 *, /* initiator_lifetime */
OM_uint32 *, /* acceptor_lifetime */
gss_cred_usage_t * /* cred_usage */
);
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV krb5_gss_export_sec_context
(OM_uint32 *, /* minor_status */
gss_ctx_id_t *, /* context_handle */
gss_buffer_t /* interprocess_token */
);
OM_uint32 KRB5_CALLCONV krb5_gss_import_sec_context
(OM_uint32 *, /* minor_status */
gss_buffer_t, /* interprocess_token */
gss_ctx_id_t * /* context_handle */
);
#endif /* LEAN_CLIENT */
krb5_error_code krb5_gss_ser_init(krb5_context);
OM_uint32 krb5_gss_release_oid
(OM_uint32 *, /* minor_status */
gss_OID * /* oid */
);
OM_uint32 KRB5_CALLCONV krb5_gss_internal_release_oid
(OM_uint32 *, /* minor_status */
gss_OID * /* oid */
);
OM_uint32 KRB5_CALLCONV krb5_gss_inquire_names_for_mech
(OM_uint32 *, /* minor_status */
gss_OID, /* mechanism */
gss_OID_set * /* name_types */
);
OM_uint32 krb5_gss_canonicalize_name
(OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
const gss_OID, /* mech_type */
gss_name_t * /* output_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_export_name
(OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_buffer_t /* exported_name */
);
OM_uint32 KRB5_CALLCONV krb5_gss_duplicate_name
(OM_uint32 *, /* minor_status */
const gss_name_t, /* input_name */
gss_name_t * /* dest_name */
);
OM_uint32 krb5_gss_validate_cred
(OM_uint32 *, /* minor_status */
gss_cred_id_t /* cred */
);
OM_uint32 KRB5_CALLCONV krb5_gss_acquire_cred_impersonate_name(
OM_uint32 *, /* minor_status */
const gss_cred_id_t, /* impersonator_cred_handle */
const gss_name_t, /* desired_name */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *); /* time_rec */
OM_uint32
krb5_gss_validate_cred_1(OM_uint32 * /* minor_status */,
gss_cred_id_t /* cred_handle */,
krb5_context /* context */);
gss_OID krb5_gss_convert_static_mech_oid(gss_OID oid);
krb5_error_code gss_krb5int_make_seal_token_v3(krb5_context,
krb5_gss_ctx_id_rec *,
const gss_buffer_desc *,
gss_buffer_t,
int, int);
OM_uint32 gss_krb5int_unseal_token_v3(krb5_context *contextptr,
OM_uint32 *minor_status,
krb5_gss_ctx_id_rec *ctx,
unsigned char *ptr,
unsigned int bodysize,
gss_buffer_t message_buffer,
int *conf_state, gss_qop_t *qop_state,
int toktype);
int gss_krb5int_rotate_left (void *ptr, size_t bufsiz, size_t rc);
/* naming_exts.c */
#define KG_INIT_NAME_NO_COPY 0x1
krb5_error_code
kg_init_name(krb5_context context, krb5_principal principal,
char *service, char *host, krb5_authdata_context ad_context,
krb5_flags flags, krb5_gss_name_t *name);
krb5_error_code
kg_release_name(krb5_context context, krb5_gss_name_t *name);
krb5_error_code
kg_duplicate_name(krb5_context context, const krb5_gss_name_t src,
krb5_gss_name_t *dst);
krb5_boolean
kg_compare_name(krb5_context context,
krb5_gss_name_t name1,
krb5_gss_name_t name2);
krb5_boolean
kg_acceptor_princ(krb5_context context, krb5_gss_name_t name,
krb5_principal *princ_out);
OM_uint32 KRB5_CALLCONV
krb5_gss_display_name_ext(OM_uint32 *minor_status,
gss_name_t name,
gss_OID display_as_name_type,
gss_buffer_t display_name);
OM_uint32 KRB5_CALLCONV
krb5_gss_inquire_name(OM_uint32 *minor_status,
gss_name_t name,
int *name_is_MN,
gss_OID *MN_mech,
gss_buffer_set_t *attrs);
OM_uint32 KRB5_CALLCONV
krb5_gss_get_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr,
int *authenticated,
int *complete,
gss_buffer_t value,
gss_buffer_t display_value,
int *more);
OM_uint32 KRB5_CALLCONV
krb5_gss_set_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
int complete,
gss_buffer_t attr,
gss_buffer_t value);
OM_uint32 KRB5_CALLCONV
krb5_gss_delete_name_attribute(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t attr);
OM_uint32 KRB5_CALLCONV
krb5_gss_export_name_composite(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t exp_composite_name);
OM_uint32 KRB5_CALLCONV
krb5_gss_map_name_to_any(OM_uint32 *minor_status,
gss_name_t name,
int authenticated,
gss_buffer_t type_id,
gss_any_t *output);
OM_uint32 KRB5_CALLCONV
krb5_gss_release_any_name_mapping(OM_uint32 *minor_status,
gss_name_t name,
gss_buffer_t type_id,
gss_any_t *input);
OM_uint32 KRB5_CALLCONV
krb5_gss_pseudo_random(OM_uint32 *minor_status,
gss_ctx_id_t context,
int prf_key,
const gss_buffer_t prf_in,
ssize_t desired_output_len,
gss_buffer_t prf_out);
OM_uint32 KRB5_CALLCONV
krb5_gss_store_cred(OM_uint32 *minor_status,
gss_cred_id_t input_cred_handle,
gss_cred_usage_t cred_usage,
const gss_OID desired_mech,
OM_uint32 overwrite_cred,
OM_uint32 default_cred,
gss_OID_set *elements_stored,
gss_cred_usage_t *cred_usage_stored);
/* s4u_gss_glue.c */
OM_uint32
kg_compose_deleg_cred(OM_uint32 *minor_status,
krb5_gss_cred_id_t impersonator_cred,
krb5_creds *subject_creds,
OM_uint32 time_req,
krb5_gss_cred_id_t *output_cred,
OM_uint32 *time_rec,
krb5_context context);
/*
* These take unglued krb5-mech-specific contexts.
*/
#define GSS_KRB5_GET_TKT_FLAGS_OID_LENGTH 11
#define GSS_KRB5_GET_TKT_FLAGS_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x01"
OM_uint32 gss_krb5int_get_tkt_flags
(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set);
#define GSS_KRB5_COPY_CCACHE_OID_LENGTH 11
#define GSS_KRB5_COPY_CCACHE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x02"
OM_uint32 gss_krb5int_copy_ccache
(OM_uint32 *minor_status,
gss_cred_id_t *cred_handle,
const gss_OID desired_oid,
const gss_buffer_t value);
#define GSS_KRB5_CCACHE_NAME_OID_LENGTH 11
#define GSS_KRB5_CCACHE_NAME_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x03"
struct krb5_gss_ccache_name_req {
const char *name;
const char **out_name;
};
OM_uint32
gss_krb5int_ccache_name(OM_uint32 *minor_status, const gss_OID, const gss_OID,
const gss_buffer_t);
#define GSS_KRB5_INQ_SSPI_SESSION_KEY_OID_LENGTH 11
#define GSS_KRB5_INQ_SSPI_SESSION_KEY_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x05"
OM_uint32
gss_krb5int_inq_session_key(OM_uint32 *, const gss_ctx_id_t, const gss_OID, gss_buffer_set_t *);
#define GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID_LENGTH 11
#define GSS_KRB5_SET_ALLOWABLE_ENCTYPES_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x04"
struct krb5_gss_set_allowable_enctypes_req {
OM_uint32 num_ktypes;
krb5_enctype *ktypes;
};
OM_uint32
gss_krb5int_set_allowable_enctypes(OM_uint32 *minor_status,
gss_cred_id_t *cred,
const gss_OID desired_oid,
const gss_buffer_t value);
#define GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_EXPORT_LUCID_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x06"
OM_uint32
gss_krb5int_export_lucid_sec_context(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set);
#define GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_FREE_LUCID_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x07"
OM_uint32
gss_krb5int_free_lucid_sec_context(OM_uint32 *, const gss_OID,
const gss_OID, gss_buffer_t);
extern k5_mutex_t kg_kdc_flag_mutex;
krb5_error_code krb5_gss_init_context (krb5_context *ctxp);
#define GSS_KRB5_USE_KDC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_USE_KDC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x08"
OM_uint32 krb5int_gss_use_kdc_context(OM_uint32 *, const gss_OID,
const gss_OID, gss_buffer_t);
krb5_error_code krb5_gss_use_kdc_context(void);
#define GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID_LENGTH 11
#define GSS_KRB5_REGISTER_ACCEPTOR_IDENTITY_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x09"
OM_uint32
gss_krb5int_register_acceptor_identity(OM_uint32 *, const gss_OID, const gss_OID, gss_buffer_t);
#define GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_EXTRACT_AUTHZ_DATA_FROM_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0a"
OM_uint32
gss_krb5int_extract_authz_data_from_sec_context(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *ad_data);
#define GSS_KRB5_SET_CRED_RCACHE_OID_LENGTH 11
#define GSS_KRB5_SET_CRED_RCACHE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0b"
OM_uint32
gss_krb5int_set_cred_rcache(OM_uint32 *, gss_cred_id_t *, const gss_OID, const gss_buffer_t);
#define GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID_LENGTH 11
#define GSS_KRB5_EXTRACT_AUTHTIME_FROM_SEC_CONTEXT_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0c"
OM_uint32
gss_krb5int_extract_authtime_from_sec_context(OM_uint32 *,
const gss_ctx_id_t,
const gss_OID,
gss_buffer_set_t *);
#define GSS_KRB5_IMPORT_CRED_OID_LENGTH 11
#define GSS_KRB5_IMPORT_CRED_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x05\x0d"
struct krb5_gss_import_cred_req {
krb5_ccache id;
krb5_principal keytab_principal;
krb5_keytab keytab;
};
OM_uint32
gss_krb5int_import_cred(OM_uint32 *minor_status,
gss_cred_id_t *cred,
const gss_OID desired_oid,
const gss_buffer_t value);
#ifdef _GSS_STATIC_LINK
int gss_krb5int_lib_init(void);
void gss_krb5int_lib_fini(void);
#endif /* _GSS_STATIC_LINK */
OM_uint32 gss_krb5int_initialize_library(void);
void gss_krb5int_cleanup_library(void);
/* For error message handling. */
/* Returns a shared string, not a private copy! */
extern char *
krb5_gss_get_error_message(OM_uint32 minor_code);
extern void
krb5_gss_save_error_string(OM_uint32 minor_code, char *msg);
extern void
krb5_gss_save_error_message(OM_uint32 minor_code, const char *format, ...)
#if !defined(__cplusplus) && (__GNUC__ > 2)
__attribute__((__format__(__printf__, 2, 3)))
#endif
;
extern void
krb5_gss_save_error_info(OM_uint32 minor_code, krb5_context ctx);
#define get_error_message krb5_gss_get_error_message
#define save_error_string krb5_gss_save_error_string
#define save_error_message krb5_gss_save_error_message
#ifdef KRB5_KERNEL
/* Error messages aren't needed in the kernel, so reduce dependencies. */
#define save_error_info(x,y)
#else
#define save_error_info krb5_gss_save_error_info
#endif
extern void krb5_gss_delete_error_info(void *p);
/* Prefix concatenated with Kerberos encryption type */
#define GSS_KRB5_SESSION_KEY_ENCTYPE_OID_LENGTH 10
#define GSS_KRB5_SESSION_KEY_ENCTYPE_OID "\x2a\x86\x48\x86\xf7\x12\x01\x02\x02\x04"
/* IAKERB */
OM_uint32 KRB5_CALLCONV
iakerb_gss_init_sec_context(OM_uint32 *minor_status,
gss_cred_id_t claimant_cred_handle,
gss_ctx_id_t *context_handle,
gss_name_t target_name,
gss_OID mech_type,
OM_uint32 req_flags,
OM_uint32 time_req,
gss_channel_bindings_t input_chan_bindings,
gss_buffer_t input_token,
gss_OID *actual_mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV
iakerb_gss_accept_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handler,
gss_cred_id_t verifier_cred_handle,
gss_buffer_t input_token,
gss_channel_bindings_t input_chan_bindings,
gss_name_t *src_name,
gss_OID *mech_type,
gss_buffer_t output_token,
OM_uint32 *ret_flags,
OM_uint32 *time_rec,
gss_cred_id_t *delegated_cred_handle);
OM_uint32 KRB5_CALLCONV
iakerb_gss_delete_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t output_token);
krb5_error_code
iakerb_make_finished(krb5_context context,
krb5_key key,
const krb5_data *conv,
krb5_data **finished);
krb5_error_code
iakerb_verify_finished(krb5_context context,
krb5_key key,
const krb5_data *conv,
const krb5_data *finished);
/*
* Transfer contents of a krb5_data to a gss_buffer and invalidate the source
* On unix, this is a simple pointer copy
* On windows, memory is reallocated and copied.
*/
static inline krb5_error_code
data_to_gss(krb5_data *input_k5data, gss_buffer_t output_buffer)
{
krb5_error_code code = 0;
output_buffer->length = input_k5data->length;
#if defined(_WIN32) || defined(DEBUG_GSSALLOC)
if (output_buffer->length > 0) {
output_buffer->value = gssalloc_malloc(output_buffer->length);
if (output_buffer->value)
memcpy(output_buffer->value, input_k5data->data, output_buffer->length);
else
code = ENOMEM;
} else {
output_buffer->value = NULL;
}
free(input_k5data->data);
#else
output_buffer->value = input_k5data->data;
#endif
*input_k5data = empty_data();
return code;
}
#define KRB5_GSS_EXTS_IAKERB_FINISHED 1
/* Credential store extensions */
#define KRB5_CS_CLI_KEYTAB_URN "client_keytab"
#define KRB5_CS_KEYTAB_URN "keytab"
#define KRB5_CS_CCACHE_URN "ccache"
#define KRB5_CS_RCACHE_URN "rcache"
OM_uint32
kg_value_from_cred_store(gss_const_key_value_set_t cred_store,
const char *type, const char **value);
OM_uint32 KRB5_CALLCONV
krb5_gss_acquire_cred_from(
OM_uint32 *, /* minor_status */
const gss_name_t, /* desired_name */
OM_uint32, /* time_req */
const gss_OID_set, /* desired_mechs */
gss_cred_usage_t, /* cred_usage */
gss_const_key_value_set_t, /* cred_store */
gss_cred_id_t *, /* output_cred_handle */
gss_OID_set *, /* actual_mechs */
OM_uint32 *); /* time_rec */
OM_uint32 KRB5_CALLCONV
krb5_gss_store_cred_into(
OM_uint32 *, /* minor_status */
gss_cred_id_t, /* input_cred_handle */
gss_cred_usage_t, /* input_usage */
const gss_OID, /* desired_mech */
OM_uint32, /* overwrite_cred */
OM_uint32, /* default_cred */
gss_const_key_value_set_t, /* cred_store */
gss_OID_set *, /* elements_stored */
gss_cred_usage_t *); /* cred_usage_stored */
OM_uint32 KRB5_CALLCONV
krb5_gss_export_cred(OM_uint32 *minor_status, gss_cred_id_t cred_handle,
gss_buffer_t token);
OM_uint32 KRB5_CALLCONV
krb5_gss_import_cred(OM_uint32 *minor_status, gss_buffer_t token,
gss_cred_id_t *cred_handle);
OM_uint32 KRB5_CALLCONV
iakerb_gss_process_context_token(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_buffer_t token_buffer);
OM_uint32 KRB5_CALLCONV
iakerb_gss_context_time(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
OM_uint32 *time_rec);
OM_uint32 KRB5_CALLCONV
iakerb_gss_inquire_context(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_name_t *src_name,
gss_name_t *targ_name, OM_uint32 *lifetime_rec,
gss_OID *mech_type, OM_uint32 *ctx_flags,
int *locally_initiated, int *opened);
OM_uint32 KRB5_CALLCONV
iakerb_gss_get_mic(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_buffer_t message_buffer,
gss_buffer_t message_token);
OM_uint32 KRB5_CALLCONV
iakerb_gss_get_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t qop_req, gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_get_mic_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, gss_qop_t qop_req,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_verify_mic(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_buffer_t msg_buffer, gss_buffer_t token_buffer,
gss_qop_t *qop_state);
OM_uint32 KRB5_CALLCONV
iakerb_gss_verify_mic_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_qop_t *qop_state, gss_iov_buffer_desc *iov,
int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int conf_req_flag, gss_qop_t qop_req,
gss_buffer_t input_message_buffer, int *conf_state,
gss_buffer_t output_message_buffer);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int conf_req_flag, gss_qop_t qop_req, int *conf_state,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap_iov_length(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, int conf_req_flag,
gss_qop_t qop_req, int *conf_state,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_unwrap(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
gss_buffer_t input_message_buffer,
gss_buffer_t output_message_buffer, int *conf_state,
gss_qop_t *qop_state);
OM_uint32 KRB5_CALLCONV
iakerb_gss_unwrap_iov(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int *conf_state, gss_qop_t *qop_state,
gss_iov_buffer_desc *iov, int iov_count);
OM_uint32 KRB5_CALLCONV
iakerb_gss_wrap_size_limit(OM_uint32 *minor_status,
gss_ctx_id_t context_handle, int conf_req_flag,
gss_qop_t qop_req, OM_uint32 req_output_size,
OM_uint32 *max_input_size);
#ifndef LEAN_CLIENT
OM_uint32 KRB5_CALLCONV
iakerb_gss_export_sec_context(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
gss_buffer_t interprocess_token);
OM_uint32 KRB5_CALLCONV
iakerb_gss_import_sec_context(OM_uint32 *minor_status,
const gss_buffer_t interprocess_token,
gss_ctx_id_t *context_handle);
#endif /* LEAN_CLIENT */
OM_uint32 KRB5_CALLCONV
iakerb_gss_inquire_sec_context_by_oid(OM_uint32 *minor_status,
const gss_ctx_id_t context_handle,
const gss_OID desired_object,
gss_buffer_set_t *data_set);
OM_uint32 KRB5_CALLCONV
iakerb_gss_set_sec_context_option(OM_uint32 *minor_status,
gss_ctx_id_t *context_handle,
const gss_OID desired_object,
const gss_buffer_t value);
OM_uint32 KRB5_CALLCONV
iakerb_gss_pseudo_random(OM_uint32 *minor_status, gss_ctx_id_t context_handle,
int prf_key, const gss_buffer_t prf_in,
ssize_t desired_output_len, gss_buffer_t prf_out);
/* Magic string to identify exported krb5 GSS credentials. Increment this if
* the format changes. */
#define CRED_EXPORT_MAGIC "K5C1"
#endif /* _GSSAPIP_KRB5_H_ */
|
1537_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License.
obtain a copy of the License at
http://www.imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore pixel accessor methods.
*/
#ifndef _MAGICKCORE_PIXEL_ACCESSOR_H
#define _MAGICKCORE_PIXEL_ACCESSOR_H
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#undef index
static inline Quantum GetPixela(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[aPixelChannel].offset]);
}
static inline Quantum GetPixelAlpha(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits == UndefinedPixelTrait)
return(OpaqueAlpha);
return(pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline PixelTrait GetPixelAlphaTraits(const Image *restrict image)
{
return(image->channel_map[AlphaPixelChannel].traits);
}
static inline Quantum GetPixelb(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[bPixelChannel].offset]);
}
static inline Quantum GetPixelBlack(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[BlackPixelChannel].offset]);
}
static inline PixelTrait GetPixelBlackTraits(const Image *restrict image)
{
return(image->channel_map[BlackPixelChannel].traits);
}
static inline Quantum GetPixelBlue(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[BluePixelChannel].offset]);
}
static inline PixelTrait GetPixelBlueTraits(const Image *restrict image)
{
return(image->channel_map[BluePixelChannel].traits);
}
static inline Quantum GetPixelCb(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[CbPixelChannel].offset]);
}
static inline PixelTrait GetPixelCbTraits(const Image *restrict image)
{
return(image->channel_map[CbPixelChannel].traits);
}
static inline Quantum GetPixelChannel(const Image *restrict image,
const PixelChannel channel,const Quantum *restrict pixel)
{
if (image->channel_map[channel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[channel].offset]);
}
static inline PixelChannel GetPixelChannelChannel(const Image *restrict image,
const ssize_t offset)
{
return(image->channel_map[offset].channel);
}
static inline ssize_t GetPixelChannelOffset(const Image *restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].offset);
}
static inline PixelTrait GetPixelChannelTraits(const Image *restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline size_t GetPixelChannels(const Image *restrict image)
{
return(image->number_channels);
}
static inline Quantum GetPixelCr(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[CrPixelChannel].offset]);
}
static inline PixelTrait GetPixelCrTraits(const Image *restrict image)
{
return(image->channel_map[CrPixelChannel].traits);
}
static inline Quantum GetPixelCyan(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[CyanPixelChannel].offset]);
}
static inline PixelTrait GetPixelCyanTraits(const Image *restrict image)
{
return(image->channel_map[CyanPixelChannel].traits);
}
static inline Quantum GetPixelGray(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[GrayPixelChannel].offset]);
}
static inline PixelTrait GetPixelGrayTraits(const Image *restrict image)
{
return(image->channel_map[GrayPixelChannel].traits);
}
static inline Quantum GetPixelGreen(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[GreenPixelChannel].offset]);
}
static inline PixelTrait GetPixelGreenTraits(const Image *restrict image)
{
return(image->channel_map[GreenPixelChannel].traits);
}
static inline Quantum GetPixelIndex(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[IndexPixelChannel].offset]);
}
static inline PixelTrait GetPixelIndexTraits(const Image *restrict image)
{
return(image->channel_map[IndexPixelChannel].traits);
}
static inline MagickRealType GetPixelInfoChannel(
const PixelInfo *restrict pixel_info,const PixelChannel channel)
{
switch (channel)
{
case RedPixelChannel: return(pixel_info->red);
case GreenPixelChannel: return(pixel_info->green);
case BluePixelChannel: return(pixel_info->blue);
case BlackPixelChannel: return(pixel_info->black);
case AlphaPixelChannel: return(pixel_info->alpha);
case IndexPixelChannel: return(pixel_info->index);
default: return((MagickRealType) 0.0);
}
}
static inline MagickRealType GetPixelInfoLuma(const PixelInfo *restrict pixel)
{
MagickRealType
blue,
green,
red;
if (pixel->colorspace == GRAYColorspace)
return(pixel->red);
if (pixel->colorspace == sRGBColorspace)
return(0.212656f*pixel->red+0.715158f*pixel->green+0.072186f*pixel->blue);
red=EncodePixelGamma(pixel->red);
green=EncodePixelGamma(pixel->green);
blue=EncodePixelGamma(pixel->blue);
return(0.212656f*red+0.715158f*green+0.072186f*blue);
}
static inline MagickRealType GetPixelInfoLuminance(
const PixelInfo *restrict pixel)
{
MagickRealType
blue,
green,
red;
if (pixel->colorspace == GRAYColorspace)
return(pixel->red);
if (pixel->colorspace != sRGBColorspace)
return(0.212656f*pixel->red+0.715158f*pixel->green+0.072186f*pixel->blue);
red=DecodePixelGamma(pixel->red);
green=DecodePixelGamma(pixel->green);
blue=DecodePixelGamma(pixel->blue);
return(0.212656f*red+0.715158f*green+0.072186f*blue);
}
static inline Quantum GetPixelL(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[LPixelChannel].offset]);
}
static inline MagickRealType GetPixelLuma(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->colorspace == GRAYColorspace)
return((MagickRealType) pixel[image->channel_map[GrayPixelChannel].offset]);
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
static inline MagickRealType GetPixelLuminance(const Image *restrict image,
const Quantum *restrict pixel)
{
MagickRealType
blue,
green,
red;
if (image->colorspace == GRAYColorspace)
return((MagickRealType) pixel[image->channel_map[GrayPixelChannel].offset]);
if (image->colorspace != sRGBColorspace)
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]);
red=DecodePixelGamma((MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset]);
green=DecodePixelGamma((MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset]);
blue=DecodePixelGamma((MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset]);
return(0.212656f*red+0.715158f*green+0.072186f*blue); /* Rec709 */
}
static inline Quantum GetPixelMagenta(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[MagentaPixelChannel].offset]);
}
static inline PixelTrait GetPixelMagentaTraits(const Image *restrict image)
{
return(image->channel_map[MagentaPixelChannel].traits);
}
static inline Quantum GetPixelReadMask(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[ReadMaskPixelChannel].offset]);
}
static inline Quantum GetPixelWriteMask(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[WriteMaskPixelChannel].offset]);
}
static inline PixelTrait GetPixelReadMaskTraits(const Image *restrict image)
{
return(image->channel_map[ReadMaskPixelChannel].traits);
}
static inline size_t GetPixelMetaChannels(const Image *restrict image)
{
return(image->number_meta_channels);
}
static inline size_t GetPixelMetacontentExtent(const Image *restrict image)
{
return(image->metacontent_extent);
}
static inline Quantum GetPixelOpacity(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != BlendPixelTrait)
return(QuantumRange-OpaqueAlpha);
return(QuantumRange-pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline Quantum GetPixelRed(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[RedPixelChannel].offset]);
}
static inline PixelTrait GetPixelRedTraits(const Image *restrict image)
{
return(image->channel_map[RedPixelChannel].traits);
}
static inline void GetPixelInfoPixel(const Image *restrict image,
const Quantum *restrict pixel,PixelInfo *restrict pixel_info)
{
pixel_info->colorspace=image->colorspace;
pixel_info->fuzz=image->fuzz;
pixel_info->red=(MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset];
pixel_info->green=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset];
pixel_info->blue=(MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset];
pixel_info->black=0.0f;
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel_info->black=(MagickRealType)
pixel[image->channel_map[BlackPixelChannel].offset];
pixel_info->alpha=(MagickRealType) OpaqueAlpha;
pixel_info->alpha_trait=UndefinedPixelTrait;
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
{
pixel_info->alpha=(MagickRealType)
pixel[image->channel_map[AlphaPixelChannel].offset];
pixel_info->alpha_trait=BlendPixelTrait;
}
pixel_info->index=0.0f;
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel_info->index=(MagickRealType)
pixel[image->channel_map[IndexPixelChannel].offset];
}
static inline PixelTrait GetPixelTraits(const Image *restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline Quantum GetPixelY(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[YPixelChannel].offset]);
}
static inline PixelTrait GetPixelYTraits(const Image *restrict image)
{
return(image->channel_map[YPixelChannel].traits);
}
static inline Quantum GetPixelYellow(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[YellowPixelChannel].offset]);
}
static inline PixelTrait GetPixelYellowTraits(const Image *restrict image)
{
return(image->channel_map[YellowPixelChannel].traits);
}
static inline MagickRealType AbsolutePixelValue(const MagickRealType x)
{
return(x < 0.0f ? -x : x);
}
static inline MagickBooleanType IsPixelAtDepth(const Quantum pixel,
const QuantumAny range)
{
Quantum
quantum;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range+0.5);
#else
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range);
#endif
return(pixel == quantum ? MagickTrue : MagickFalse);
}
static inline MagickBooleanType IsPixelEquivalent(const Image *restrict image,
const Quantum *restrict p,const PixelInfo *restrict q)
{
MagickRealType
blue,
green,
red;
red=(MagickRealType) p[image->channel_map[RedPixelChannel].offset];
green=(MagickRealType) p[image->channel_map[GreenPixelChannel].offset];
blue=(MagickRealType) p[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red-q->red) < MagickEpsilon) &&
(AbsolutePixelValue(green-q->green) < MagickEpsilon) &&
(AbsolutePixelValue(blue-q->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelGray(const Image *restrict image,
const Quantum *restrict pixel)
{
MagickRealType
blue,
green,
red;
red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset];
green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset];
blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red-green) < MagickEpsilon) &&
(AbsolutePixelValue(green-blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoEquivalent(
const PixelInfo *restrict p,const PixelInfo *restrict q)
{
if ((p->alpha_trait != UndefinedPixelTrait) &&
(q->alpha_trait == UndefinedPixelTrait) &&
(AbsolutePixelValue(p->alpha-OpaqueAlpha) >= MagickEpsilon))
return(MagickFalse);
if ((q->alpha_trait != UndefinedPixelTrait) &&
(p->alpha_trait == UndefinedPixelTrait) &&
(AbsolutePixelValue(q->alpha-OpaqueAlpha)) >= MagickEpsilon)
return(MagickFalse);
if ((p->alpha_trait != UndefinedPixelTrait) &&
(q->alpha_trait != UndefinedPixelTrait))
{
if (AbsolutePixelValue(p->alpha-q->alpha) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->alpha-TransparentAlpha) < MagickEpsilon)
return(MagickTrue);
}
if (AbsolutePixelValue(p->red-q->red) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->green-q->green) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->blue-q->blue) >= MagickEpsilon)
return(MagickFalse);
if ((p->colorspace == CMYKColorspace) &&
(AbsolutePixelValue(p->black-q->black) >= MagickEpsilon))
return(MagickFalse);
return(MagickTrue);
}
static inline MagickBooleanType IsPixelMonochrome(const Image *restrict image,
const Quantum *restrict pixel)
{
MagickRealType
blue,
green,
red;
red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset];
if ((AbsolutePixelValue(red) >= MagickEpsilon) ||
(AbsolutePixelValue(red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset];
blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red-green) < MagickEpsilon) &&
(AbsolutePixelValue(green-blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoGray(
const PixelInfo *restrict pixel_info)
{
if ((pixel_info->colorspace != GRAYColorspace) &&
(pixel_info->colorspace != RGBColorspace))
return(MagickFalse);
if ((AbsolutePixelValue(pixel_info->red-pixel_info->green) < MagickEpsilon) &&
(AbsolutePixelValue(pixel_info->green-pixel_info->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoMonochrome(
const PixelInfo *restrict pixel_info)
{
if ((pixel_info->colorspace != GRAYColorspace) &&
(pixel_info->colorspace != RGBColorspace))
return(MagickFalse);
if ((AbsolutePixelValue(pixel_info->red) >= MagickEpsilon) ||
(AbsolutePixelValue(pixel_info->red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
if ((AbsolutePixelValue(pixel_info->red-pixel_info->green) < MagickEpsilon) &&
(AbsolutePixelValue(pixel_info->green-pixel_info->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline void SetPixela(const Image *restrict image,
const Quantum a,Quantum *restrict pixel)
{
if (image->channel_map[aPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[aPixelChannel].offset]=a;
}
static inline void SetPixelAlpha(const Image *restrict image,
const Quantum alpha,Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=alpha;
}
static inline void SetPixelAlphaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[AlphaPixelChannel].traits=traits;
}
static inline void SetPixelb(const Image *restrict image,
const Quantum b,Quantum *restrict pixel)
{
if (image->channel_map[bPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[bPixelChannel].offset]=b;
}
static inline void SetPixelBackgoundColor(const Image *restrict image,
Quantum *restrict pixel)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0;
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(image->background_color.red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(image->background_color.green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(image->background_color.blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(image->background_color.black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
image->background_color.alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(image->background_color.alpha);
}
static inline void SetPixelBlack(const Image *restrict image,
const Quantum black,Quantum *restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=black;
}
static inline void SetPixelBlackTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BlackPixelChannel].traits=traits;
}
static inline void SetPixelBlue(const Image *restrict image,const Quantum blue,
Quantum *restrict pixel)
{
pixel[image->channel_map[BluePixelChannel].offset]=blue;
}
static inline void SetPixelBlueTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BluePixelChannel].traits=traits;
}
static inline void SetPixelCb(const Image *restrict image,const Quantum cb,
Quantum *restrict pixel)
{
pixel[image->channel_map[CbPixelChannel].offset]=cb;
}
static inline void SetPixelCbTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CbPixelChannel].traits=traits;
}
static inline void SetPixelChannel(const Image *restrict image,
const PixelChannel channel,const Quantum quantum,Quantum *restrict pixel)
{
if (image->channel_map[channel].traits != UndefinedPixelTrait)
pixel[image->channel_map[channel].offset]=quantum;
}
static inline void SetPixelChannelAttributes(const Image *restrict image,
const PixelChannel channel,const PixelTrait traits,const ssize_t offset)
{
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
image->channel_map[channel].traits=traits;
}
static inline void SetPixelChannelChannel(const Image *restrict image,
const PixelChannel channel,const ssize_t offset)
{
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
}
static inline void SetPixelChannels(Image *image,const size_t number_channels)
{
image->number_channels=number_channels;
}
static inline void SetPixelChannelTraits(Image *image,
const PixelChannel channel,const PixelTrait traits)
{
image->channel_map[channel].traits=traits;
}
static inline void SetPixelCr(const Image *restrict image,const Quantum cr,
Quantum *restrict pixel)
{
pixel[image->channel_map[CrPixelChannel].offset]=cr;
}
static inline void SetPixelCrTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CrPixelChannel].traits=traits;
}
static inline void SetPixelCyan(const Image *restrict image,const Quantum cyan,
Quantum *restrict pixel)
{
pixel[image->channel_map[CyanPixelChannel].offset]=cyan;
}
static inline void SetPixelGray(const Image *restrict image,const Quantum gray,
Quantum *restrict pixel)
{
pixel[image->channel_map[GrayPixelChannel].offset]=gray;
}
static inline void SetPixelGrayTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GrayPixelChannel].traits=traits;
}
static inline void SetPixelGreen(const Image *restrict image,
const Quantum green,Quantum *restrict pixel)
{
pixel[image->channel_map[GreenPixelChannel].offset]=green;
}
static inline void SetPixelGreenTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GreenPixelChannel].traits=traits;
}
static inline void SetPixelIndex(const Image *restrict image,
const Quantum index,Quantum *restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[IndexPixelChannel].offset]=index;
}
static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits)
{
image->channel_map[IndexPixelChannel].traits=traits;
}
static inline void SetPixelViaPixelInfo(const Image *restrict image,
const PixelInfo *restrict pixel_info,Quantum *restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(pixel_info->red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(pixel_info->green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(pixel_info->blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(pixel_info->black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
pixel_info->alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(pixel_info->alpha);
}
static inline void SetPixelL(const Image *restrict image,const Quantum L,
Quantum *restrict pixel)
{
if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[LPixelChannel].offset]=L;
}
static inline void SetPixelMagenta(const Image *restrict image,
const Quantum magenta,Quantum *restrict pixel)
{
pixel[image->channel_map[MagentaPixelChannel].offset]=magenta;
}
static inline void SetPixelMagentaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[MagentaPixelChannel].traits=traits;
}
static inline void SetPixelReadMask(const Image *restrict image,
const Quantum mask,Quantum *restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[ReadMaskPixelChannel].offset]=mask;
}
static inline void SetPixelWriteMask(const Image *restrict image,
const Quantum mask,Quantum *restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[WriteMaskPixelChannel].offset]=mask;
}
static inline void SetPixelMetacontentExtent(Image *image,const size_t extent)
{
image->metacontent_extent=extent;
}
static inline void SetPixelOpacity(const Image *restrict image,
const Quantum alpha,Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha;
}
static inline void SetPixelRed(const Image *restrict image,const Quantum red,
Quantum *restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=red;
}
static inline void SetPixelRedTraits(Image *image,const PixelTrait traits)
{
image->channel_map[RedPixelChannel].traits=traits;
}
static inline void SetPixelYellow(const Image *restrict image,
const Quantum yellow,Quantum *restrict pixel)
{
pixel[image->channel_map[YellowPixelChannel].offset]=yellow;
}
static inline void SetPixelYellowTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YellowPixelChannel].traits=traits;
}
static inline void SetPixelY(const Image *restrict image,const Quantum y,
Quantum *restrict pixel)
{
pixel[image->channel_map[YPixelChannel].offset]=y;
}
static inline void SetPixelYTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YPixelChannel].traits=traits;
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
/*
Copyright 1999-2015 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License.
obtain a copy of the License at
http://www.imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore pixel accessor methods.
*/
#ifndef _MAGICKCORE_PIXEL_ACCESSOR_H
#define _MAGICKCORE_PIXEL_ACCESSOR_H
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#undef index
static inline Quantum ClampPixel(const MagickRealType value)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((Quantum) value);
#else
if (value < 0.0)
return((Quantum) 0.0);
if (value >= (MagickRealType) QuantumRange)
return((Quantum) QuantumRange);
return((Quantum) value);
#endif
}
static inline Quantum GetPixela(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[aPixelChannel].offset]);
}
static inline Quantum GetPixelAlpha(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits == UndefinedPixelTrait)
return(OpaqueAlpha);
return(pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline PixelTrait GetPixelAlphaTraits(const Image *restrict image)
{
return(image->channel_map[AlphaPixelChannel].traits);
}
static inline Quantum GetPixelb(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[bPixelChannel].offset]);
}
static inline Quantum GetPixelBlack(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[BlackPixelChannel].offset]);
}
static inline PixelTrait GetPixelBlackTraits(const Image *restrict image)
{
return(image->channel_map[BlackPixelChannel].traits);
}
static inline Quantum GetPixelBlue(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[BluePixelChannel].offset]);
}
static inline PixelTrait GetPixelBlueTraits(const Image *restrict image)
{
return(image->channel_map[BluePixelChannel].traits);
}
static inline Quantum GetPixelCb(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[CbPixelChannel].offset]);
}
static inline PixelTrait GetPixelCbTraits(const Image *restrict image)
{
return(image->channel_map[CbPixelChannel].traits);
}
static inline Quantum GetPixelChannel(const Image *restrict image,
const PixelChannel channel,const Quantum *restrict pixel)
{
if (image->channel_map[channel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[channel].offset]);
}
static inline PixelChannel GetPixelChannelChannel(const Image *restrict image,
const ssize_t offset)
{
return(image->channel_map[offset].channel);
}
static inline ssize_t GetPixelChannelOffset(const Image *restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].offset);
}
static inline PixelTrait GetPixelChannelTraits(const Image *restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline size_t GetPixelChannels(const Image *restrict image)
{
return(image->number_channels);
}
static inline Quantum GetPixelCr(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[CrPixelChannel].offset]);
}
static inline PixelTrait GetPixelCrTraits(const Image *restrict image)
{
return(image->channel_map[CrPixelChannel].traits);
}
static inline Quantum GetPixelCyan(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[CyanPixelChannel].offset]);
}
static inline PixelTrait GetPixelCyanTraits(const Image *restrict image)
{
return(image->channel_map[CyanPixelChannel].traits);
}
static inline Quantum GetPixelGray(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[GrayPixelChannel].offset]);
}
static inline PixelTrait GetPixelGrayTraits(const Image *restrict image)
{
return(image->channel_map[GrayPixelChannel].traits);
}
static inline Quantum GetPixelGreen(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[GreenPixelChannel].offset]);
}
static inline PixelTrait GetPixelGreenTraits(const Image *restrict image)
{
return(image->channel_map[GreenPixelChannel].traits);
}
static inline Quantum GetPixelIndex(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[IndexPixelChannel].offset]);
}
static inline PixelTrait GetPixelIndexTraits(const Image *restrict image)
{
return(image->channel_map[IndexPixelChannel].traits);
}
static inline MagickRealType GetPixelInfoChannel(
const PixelInfo *restrict pixel_info,const PixelChannel channel)
{
switch (channel)
{
case RedPixelChannel: return(pixel_info->red);
case GreenPixelChannel: return(pixel_info->green);
case BluePixelChannel: return(pixel_info->blue);
case BlackPixelChannel: return(pixel_info->black);
case AlphaPixelChannel: return(pixel_info->alpha);
case IndexPixelChannel: return(pixel_info->index);
default: return((MagickRealType) 0.0);
}
}
static inline MagickRealType GetPixelInfoLuma(const PixelInfo *restrict pixel)
{
MagickRealType
blue,
green,
red;
if (pixel->colorspace == GRAYColorspace)
return(pixel->red);
if (pixel->colorspace == sRGBColorspace)
return(0.212656f*pixel->red+0.715158f*pixel->green+0.072186f*pixel->blue);
red=EncodePixelGamma(pixel->red);
green=EncodePixelGamma(pixel->green);
blue=EncodePixelGamma(pixel->blue);
return(0.212656f*red+0.715158f*green+0.072186f*blue);
}
static inline MagickRealType GetPixelInfoLuminance(
const PixelInfo *restrict pixel)
{
MagickRealType
blue,
green,
red;
if (pixel->colorspace == GRAYColorspace)
return(pixel->red);
if (pixel->colorspace != sRGBColorspace)
return(0.212656f*pixel->red+0.715158f*pixel->green+0.072186f*pixel->blue);
red=DecodePixelGamma(pixel->red);
green=DecodePixelGamma(pixel->green);
blue=DecodePixelGamma(pixel->blue);
return(0.212656f*red+0.715158f*green+0.072186f*blue);
}
static inline Quantum GetPixelL(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[LPixelChannel].offset]);
}
static inline MagickRealType GetPixelLuma(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->colorspace == GRAYColorspace)
return((MagickRealType) pixel[image->channel_map[GrayPixelChannel].offset]);
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]); /* Rec709 */
}
static inline MagickRealType GetPixelLuminance(const Image *restrict image,
const Quantum *restrict pixel)
{
MagickRealType
blue,
green,
red;
if (image->colorspace == GRAYColorspace)
return((MagickRealType) pixel[image->channel_map[GrayPixelChannel].offset]);
if (image->colorspace != sRGBColorspace)
return(0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]);
red=DecodePixelGamma((MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset]);
green=DecodePixelGamma((MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset]);
blue=DecodePixelGamma((MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset]);
return(0.212656f*red+0.715158f*green+0.072186f*blue); /* Rec709 */
}
static inline Quantum GetPixelMagenta(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[MagentaPixelChannel].offset]);
}
static inline PixelTrait GetPixelMagentaTraits(const Image *restrict image)
{
return(image->channel_map[MagentaPixelChannel].traits);
}
static inline Quantum GetPixelReadMask(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[ReadMaskPixelChannel].offset]);
}
static inline Quantum GetPixelWriteMask(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[WriteMaskPixelChannel].offset]);
}
static inline PixelTrait GetPixelReadMaskTraits(const Image *restrict image)
{
return(image->channel_map[ReadMaskPixelChannel].traits);
}
static inline size_t GetPixelMetaChannels(const Image *restrict image)
{
return(image->number_meta_channels);
}
static inline size_t GetPixelMetacontentExtent(const Image *restrict image)
{
return(image->metacontent_extent);
}
static inline Quantum GetPixelOpacity(const Image *restrict image,
const Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != BlendPixelTrait)
return(QuantumRange-OpaqueAlpha);
return(QuantumRange-pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline Quantum GetPixelRed(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[RedPixelChannel].offset]);
}
static inline PixelTrait GetPixelRedTraits(const Image *restrict image)
{
return(image->channel_map[RedPixelChannel].traits);
}
static inline void GetPixelInfoPixel(const Image *restrict image,
const Quantum *restrict pixel,PixelInfo *restrict pixel_info)
{
pixel_info->colorspace=image->colorspace;
pixel_info->fuzz=image->fuzz;
pixel_info->red=(MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset];
pixel_info->green=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset];
pixel_info->blue=(MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset];
pixel_info->black=0.0f;
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel_info->black=(MagickRealType)
pixel[image->channel_map[BlackPixelChannel].offset];
pixel_info->alpha=(MagickRealType) OpaqueAlpha;
pixel_info->alpha_trait=UndefinedPixelTrait;
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
{
pixel_info->alpha=(MagickRealType)
pixel[image->channel_map[AlphaPixelChannel].offset];
pixel_info->alpha_trait=BlendPixelTrait;
}
pixel_info->index=0.0f;
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel_info->index=(MagickRealType)
pixel[image->channel_map[IndexPixelChannel].offset];
}
static inline PixelTrait GetPixelTraits(const Image *restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline Quantum GetPixelY(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[YPixelChannel].offset]);
}
static inline PixelTrait GetPixelYTraits(const Image *restrict image)
{
return(image->channel_map[YPixelChannel].traits);
}
static inline Quantum GetPixelYellow(const Image *restrict image,
const Quantum *restrict pixel)
{
return(pixel[image->channel_map[YellowPixelChannel].offset]);
}
static inline PixelTrait GetPixelYellowTraits(const Image *restrict image)
{
return(image->channel_map[YellowPixelChannel].traits);
}
static inline MagickRealType AbsolutePixelValue(const MagickRealType x)
{
return(x < 0.0f ? -x : x);
}
static inline MagickBooleanType IsPixelAtDepth(const Quantum pixel,
const QuantumAny range)
{
Quantum
quantum;
#if !defined(MAGICKCORE_HDRI_SUPPORT)
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range+0.5);
#else
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range);
#endif
return(pixel == quantum ? MagickTrue : MagickFalse);
}
static inline MagickBooleanType IsPixelEquivalent(const Image *restrict image,
const Quantum *restrict p,const PixelInfo *restrict q)
{
MagickRealType
blue,
green,
red;
red=(MagickRealType) p[image->channel_map[RedPixelChannel].offset];
green=(MagickRealType) p[image->channel_map[GreenPixelChannel].offset];
blue=(MagickRealType) p[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red-q->red) < MagickEpsilon) &&
(AbsolutePixelValue(green-q->green) < MagickEpsilon) &&
(AbsolutePixelValue(blue-q->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelGray(const Image *restrict image,
const Quantum *restrict pixel)
{
MagickRealType
blue,
green,
red;
red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset];
green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset];
blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red-green) < MagickEpsilon) &&
(AbsolutePixelValue(green-blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoEquivalent(
const PixelInfo *restrict p,const PixelInfo *restrict q)
{
if ((p->alpha_trait != UndefinedPixelTrait) &&
(q->alpha_trait == UndefinedPixelTrait) &&
(AbsolutePixelValue(p->alpha-OpaqueAlpha) >= MagickEpsilon))
return(MagickFalse);
if ((q->alpha_trait != UndefinedPixelTrait) &&
(p->alpha_trait == UndefinedPixelTrait) &&
(AbsolutePixelValue(q->alpha-OpaqueAlpha)) >= MagickEpsilon)
return(MagickFalse);
if ((p->alpha_trait != UndefinedPixelTrait) &&
(q->alpha_trait != UndefinedPixelTrait))
{
if (AbsolutePixelValue(p->alpha-q->alpha) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->alpha-TransparentAlpha) < MagickEpsilon)
return(MagickTrue);
}
if (AbsolutePixelValue(p->red-q->red) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->green-q->green) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->blue-q->blue) >= MagickEpsilon)
return(MagickFalse);
if ((p->colorspace == CMYKColorspace) &&
(AbsolutePixelValue(p->black-q->black) >= MagickEpsilon))
return(MagickFalse);
return(MagickTrue);
}
static inline MagickBooleanType IsPixelMonochrome(const Image *restrict image,
const Quantum *restrict pixel)
{
MagickRealType
blue,
green,
red;
red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset];
if ((AbsolutePixelValue(red) >= MagickEpsilon) ||
(AbsolutePixelValue(red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
green=(MagickRealType) pixel[image->channel_map[GreenPixelChannel].offset];
blue=(MagickRealType) pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red-green) < MagickEpsilon) &&
(AbsolutePixelValue(green-blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoGray(
const PixelInfo *restrict pixel_info)
{
if ((pixel_info->colorspace != GRAYColorspace) &&
(pixel_info->colorspace != RGBColorspace))
return(MagickFalse);
if ((AbsolutePixelValue(pixel_info->red-pixel_info->green) < MagickEpsilon) &&
(AbsolutePixelValue(pixel_info->green-pixel_info->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoMonochrome(
const PixelInfo *restrict pixel_info)
{
if ((pixel_info->colorspace != GRAYColorspace) &&
(pixel_info->colorspace != RGBColorspace))
return(MagickFalse);
if ((AbsolutePixelValue(pixel_info->red) >= MagickEpsilon) ||
(AbsolutePixelValue(pixel_info->red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
if ((AbsolutePixelValue(pixel_info->red-pixel_info->green) < MagickEpsilon) &&
(AbsolutePixelValue(pixel_info->green-pixel_info->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline void SetPixela(const Image *restrict image,
const Quantum a,Quantum *restrict pixel)
{
if (image->channel_map[aPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[aPixelChannel].offset]=a;
}
static inline void SetPixelAlpha(const Image *restrict image,
const Quantum alpha,Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=alpha;
}
static inline void SetPixelAlphaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[AlphaPixelChannel].traits=traits;
}
static inline void SetPixelb(const Image *restrict image,
const Quantum b,Quantum *restrict pixel)
{
if (image->channel_map[bPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[bPixelChannel].offset]=b;
}
static inline void SetPixelBackgoundColor(const Image *restrict image,
Quantum *restrict pixel)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=0;
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(image->background_color.red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(image->background_color.green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(image->background_color.blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(image->background_color.black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
image->background_color.alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(image->background_color.alpha);
}
static inline void SetPixelBlack(const Image *restrict image,
const Quantum black,Quantum *restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=black;
}
static inline void SetPixelBlackTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BlackPixelChannel].traits=traits;
}
static inline void SetPixelBlue(const Image *restrict image,const Quantum blue,
Quantum *restrict pixel)
{
pixel[image->channel_map[BluePixelChannel].offset]=blue;
}
static inline void SetPixelBlueTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BluePixelChannel].traits=traits;
}
static inline void SetPixelCb(const Image *restrict image,const Quantum cb,
Quantum *restrict pixel)
{
pixel[image->channel_map[CbPixelChannel].offset]=cb;
}
static inline void SetPixelCbTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CbPixelChannel].traits=traits;
}
static inline void SetPixelChannel(const Image *restrict image,
const PixelChannel channel,const Quantum quantum,Quantum *restrict pixel)
{
if (image->channel_map[channel].traits != UndefinedPixelTrait)
pixel[image->channel_map[channel].offset]=quantum;
}
static inline void SetPixelChannelAttributes(const Image *restrict image,
const PixelChannel channel,const PixelTrait traits,const ssize_t offset)
{
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
image->channel_map[channel].traits=traits;
}
static inline void SetPixelChannelChannel(const Image *restrict image,
const PixelChannel channel,const ssize_t offset)
{
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
}
static inline void SetPixelChannels(Image *image,const size_t number_channels)
{
image->number_channels=number_channels;
}
static inline void SetPixelChannelTraits(Image *image,
const PixelChannel channel,const PixelTrait traits)
{
image->channel_map[channel].traits=traits;
}
static inline void SetPixelCr(const Image *restrict image,const Quantum cr,
Quantum *restrict pixel)
{
pixel[image->channel_map[CrPixelChannel].offset]=cr;
}
static inline void SetPixelCrTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CrPixelChannel].traits=traits;
}
static inline void SetPixelCyan(const Image *restrict image,const Quantum cyan,
Quantum *restrict pixel)
{
pixel[image->channel_map[CyanPixelChannel].offset]=cyan;
}
static inline void SetPixelGray(const Image *restrict image,const Quantum gray,
Quantum *restrict pixel)
{
pixel[image->channel_map[GrayPixelChannel].offset]=gray;
}
static inline void SetPixelGrayTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GrayPixelChannel].traits=traits;
}
static inline void SetPixelGreen(const Image *restrict image,
const Quantum green,Quantum *restrict pixel)
{
pixel[image->channel_map[GreenPixelChannel].offset]=green;
}
static inline void SetPixelGreenTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GreenPixelChannel].traits=traits;
}
static inline void SetPixelIndex(const Image *restrict image,
const Quantum index,Quantum *restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[IndexPixelChannel].offset]=index;
}
static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits)
{
image->channel_map[IndexPixelChannel].traits=traits;
}
static inline void SetPixelViaPixelInfo(const Image *restrict image,
const PixelInfo *restrict pixel_info,Quantum *restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(pixel_info->red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(pixel_info->green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(pixel_info->blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(pixel_info->black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
pixel_info->alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(pixel_info->alpha);
}
static inline void SetPixelL(const Image *restrict image,const Quantum L,
Quantum *restrict pixel)
{
if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[LPixelChannel].offset]=L;
}
static inline void SetPixelMagenta(const Image *restrict image,
const Quantum magenta,Quantum *restrict pixel)
{
pixel[image->channel_map[MagentaPixelChannel].offset]=magenta;
}
static inline void SetPixelMagentaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[MagentaPixelChannel].traits=traits;
}
static inline void SetPixelReadMask(const Image *restrict image,
const Quantum mask,Quantum *restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[ReadMaskPixelChannel].offset]=mask;
}
static inline void SetPixelWriteMask(const Image *restrict image,
const Quantum mask,Quantum *restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[WriteMaskPixelChannel].offset]=mask;
}
static inline void SetPixelMetacontentExtent(Image *image,const size_t extent)
{
image->metacontent_extent=extent;
}
static inline void SetPixelOpacity(const Image *restrict image,
const Quantum alpha,Quantum *restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha;
}
static inline void SetPixelRed(const Image *restrict image,const Quantum red,
Quantum *restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=red;
}
static inline void SetPixelRedTraits(Image *image,const PixelTrait traits)
{
image->channel_map[RedPixelChannel].traits=traits;
}
static inline void SetPixelYellow(const Image *restrict image,
const Quantum yellow,Quantum *restrict pixel)
{
pixel[image->channel_map[YellowPixelChannel].offset]=yellow;
}
static inline void SetPixelYellowTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YellowPixelChannel].traits=traits;
}
static inline void SetPixelY(const Image *restrict image,const Quantum y,
Quantum *restrict pixel)
{
pixel[image->channel_map[YPixelChannel].offset]=y;
}
static inline void SetPixelYTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YPixelChannel].traits=traits;
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
1848_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*-------------------------------------------------------------------------
*
* datetime.h
* Definitions for date/time support code.
* The support code is shared with other date data types,
* including abstime, reltime, date, and time.
*
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/datetime.h
*
*-------------------------------------------------------------------------
*/
#ifndef DATETIME_H
#define DATETIME_H
#include "nodes/nodes.h"
#include "utils/timestamp.h"
/* this struct is declared in utils/tzparser.h: */
struct tzEntry;
/* ----------------------------------------------------------------
* time types + support macros
*
* String definitions for standard time quantities.
*
* These strings are the defaults used to form output time strings.
* Other alternative forms are hardcoded into token tables in datetime.c.
* ----------------------------------------------------------------
*/
#define DAGO "ago"
#define DCURRENT "current"
#define EPOCH "epoch"
#define INVALID "invalid"
#define EARLY "-infinity"
#define LATE "infinity"
#define NOW "now"
#define TODAY "today"
#define TOMORROW "tomorrow"
#define YESTERDAY "yesterday"
#define ZULU "zulu"
#define DMICROSEC "usecond"
#define DMILLISEC "msecond"
#define DSECOND "second"
#define DMINUTE "minute"
#define DHOUR "hour"
#define DDAY "day"
#define DWEEK "week"
#define DMONTH "month"
#define DQUARTER "quarter"
#define DYEAR "year"
#define DDECADE "decade"
#define DCENTURY "century"
#define DMILLENNIUM "millennium"
#define DA_D "ad"
#define DB_C "bc"
#define DTIMEZONE "timezone"
/*
* Fundamental time field definitions for parsing.
*
* Meridian: am, pm, or 24-hour style.
* Millennium: ad, bc
*/
#define AM 0
#define PM 1
#define HR24 2
#define AD 0
#define BC 1
/*
* Fields for time decoding.
*
* Can't have more of these than there are bits in an unsigned int
* since these are turned into bit masks during parsing and decoding.
*
* Furthermore, the values for YEAR, MONTH, DAY, HOUR, MINUTE, SECOND
* must be in the range 0..14 so that the associated bitmasks can fit
* into the left half of an INTERVAL's typmod value. Since those bits
* are stored in typmods, you can't change them without initdb!
*/
#define RESERV 0
#define MONTH 1
#define YEAR 2
#define DAY 3
#define JULIAN 4
#define TZ 5
#define DTZ 6
#define DTZMOD 7
#define IGNORE_DTF 8
#define AMPM 9
#define HOUR 10
#define MINUTE 11
#define SECOND 12
#define MILLISECOND 13
#define MICROSECOND 14
#define DOY 15
#define DOW 16
#define UNITS 17
#define ADBC 18
/* these are only for relative dates */
#define AGO 19
#define ABS_BEFORE 20
#define ABS_AFTER 21
/* generic fields to help with parsing */
#define ISODATE 22
#define ISOTIME 23
/* these are only for parsing intervals */
#define WEEK 24
#define DECADE 25
#define CENTURY 26
#define MILLENNIUM 27
/* reserved for unrecognized string values */
#define UNKNOWN_FIELD 31
/*
* Token field definitions for time parsing and decoding.
* These need to fit into the datetkn table type.
* At the moment, that means keep them within [-127,127].
* These are also used for bit masks in DecodeDateDelta()
* so actually restrict them to within [0,31] for now.
* - thomas 97/06/19
* Not all of these fields are used for masks in DecodeDateDelta
* so allow some larger than 31. - thomas 1997-11-17
*/
#define DTK_NUMBER 0
#define DTK_STRING 1
#define DTK_DATE 2
#define DTK_TIME 3
#define DTK_TZ 4
#define DTK_AGO 5
#define DTK_SPECIAL 6
#define DTK_INVALID 7
#define DTK_CURRENT 8
#define DTK_EARLY 9
#define DTK_LATE 10
#define DTK_EPOCH 11
#define DTK_NOW 12
#define DTK_YESTERDAY 13
#define DTK_TODAY 14
#define DTK_TOMORROW 15
#define DTK_ZULU 16
#define DTK_DELTA 17
#define DTK_SECOND 18
#define DTK_MINUTE 19
#define DTK_HOUR 20
#define DTK_DAY 21
#define DTK_WEEK 22
#define DTK_MONTH 23
#define DTK_QUARTER 24
#define DTK_YEAR 25
#define DTK_DECADE 26
#define DTK_CENTURY 27
#define DTK_MILLENNIUM 28
#define DTK_MILLISEC 29
#define DTK_MICROSEC 30
#define DTK_JULIAN 31
#define DTK_DOW 32
#define DTK_DOY 33
#define DTK_TZ_HOUR 34
#define DTK_TZ_MINUTE 35
#define DTK_ISOYEAR 36
#define DTK_ISODOW 37
/*
* Bit mask definitions for time parsing.
*/
#define DTK_M(t) (0x01 << (t))
/* Convenience: a second, plus any fractional component */
#define DTK_ALL_SECS_M (DTK_M(SECOND) | DTK_M(MILLISECOND) | DTK_M(MICROSECOND))
#define DTK_DATE_M (DTK_M(YEAR) | DTK_M(MONTH) | DTK_M(DAY))
#define DTK_TIME_M (DTK_M(HOUR) | DTK_M(MINUTE) | DTK_ALL_SECS_M)
#define MAXDATELEN 63 /* maximum possible length of an input date
* string (not counting tr. null) */
#define MAXDATEFIELDS 25 /* maximum possible number of fields in a date
* string */
#define TOKMAXLEN 10 /* only this many chars are stored in
* datetktbl */
/* keep this struct small; it gets used a lot */
typedef struct
{
char token[TOKMAXLEN];
char type;
char value; /* this may be unsigned, alas */
} datetkn;
/* one of its uses is in tables of time zone abbreviations */
typedef struct TimeZoneAbbrevTable
{
int numabbrevs;
datetkn abbrevs[1]; /* VARIABLE LENGTH ARRAY */
} TimeZoneAbbrevTable;
/* FMODULO()
* Macro to replace modf(), which is broken on some platforms.
* t = input and remainder
* q = integer part
* u = divisor
*/
#define FMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)) : floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
/* TMODULO()
* Like FMODULO(), but work on the timestamp datatype (either int64 or float8).
* We assume that int64 follows the C99 semantics for division (negative
* quotients truncate towards zero).
*/
#ifdef HAVE_INT64_TIMESTAMP
#define TMODULO(t,q,u) \
do { \
(q) = ((t) / (u)); \
if ((q) != 0) (t) -= ((q) * (u)); \
} while(0)
#else
#define TMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)) : floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
#endif
/*
* Date/time validation
* Include check for leap year.
*/
extern const char *const months[]; /* months (3-char abbreviations) */
extern const char *const days[]; /* days (full names) */
extern const int day_tab[2][13];
#define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0))
/*
* Datetime input parsing routines (ParseDateTime, DecodeDateTime, etc)
* return zero or a positive value on success. On failure, they return
* one of these negative code values. DateTimeParseError may be used to
* produce a correct ereport.
*/
#define DTERR_BAD_FORMAT (-1)
#define DTERR_FIELD_OVERFLOW (-2)
#define DTERR_MD_FIELD_OVERFLOW (-3) /* triggers hint about DateStyle */
#define DTERR_INTERVAL_OVERFLOW (-4)
#define DTERR_TZDISP_OVERFLOW (-5)
extern void GetCurrentDateTime(struct pg_tm * tm);
extern void GetCurrentTimeUsec(struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern void j2date(int jd, int *year, int *month, int *day);
extern int date2j(int year, int month, int day);
extern int ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
char **field, int *ftype,
int maxfields, int *numfields);
extern int DecodeDateTime(char **field, int *ftype,
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern int DecodeTimeOnly(char **field, int *ftype,
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern int DecodeInterval(char **field, int *ftype, int nf, int range,
int *dtype, struct pg_tm * tm, fsec_t *fsec);
extern int DecodeISO8601Interval(char *str,
int *dtype, struct pg_tm * tm, fsec_t *fsec);
extern void DateTimeParseError(int dterr, const char *str,
const char *datatype) __attribute__((noreturn));
extern int DetermineTimeZoneOffset(struct pg_tm * tm, pg_tz *tzp);
extern void EncodeDateOnly(struct pg_tm * tm, int style, char *str);
extern void EncodeTimeOnly(struct pg_tm * tm, fsec_t fsec, bool print_tz, int tz, int style, char *str);
extern void EncodeDateTime(struct pg_tm * tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str);
extern void EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str);
extern int ValidateDate(int fmask, bool isjulian, bool is2digits, bool bc,
struct pg_tm * tm);
extern int DecodeSpecial(int field, char *lowtoken, int *val);
extern int DecodeUnits(int field, char *lowtoken, int *val);
extern int j2day(int jd);
extern Node *TemporalTransform(int32 max_precis, Node *node);
extern bool CheckDateTokenTables(void);
extern void ConvertTimeZoneAbbrevs(TimeZoneAbbrevTable *tbl,
struct tzEntry *abbrevs, int n);
extern void InstallTimeZoneAbbrevs(TimeZoneAbbrevTable *tbl);
extern Datum pg_timezone_abbrevs(PG_FUNCTION_ARGS);
extern Datum pg_timezone_names(PG_FUNCTION_ARGS);
#endif /* DATETIME_H */
|
/*-------------------------------------------------------------------------
*
* datetime.h
* Definitions for date/time support code.
* The support code is shared with other date data types,
* including abstime, reltime, date, and time.
*
*
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* src/include/utils/datetime.h
*
*-------------------------------------------------------------------------
*/
#ifndef DATETIME_H
#define DATETIME_H
#include "nodes/nodes.h"
#include "utils/timestamp.h"
/* this struct is declared in utils/tzparser.h: */
struct tzEntry;
/* ----------------------------------------------------------------
* time types + support macros
*
* String definitions for standard time quantities.
*
* These strings are the defaults used to form output time strings.
* Other alternative forms are hardcoded into token tables in datetime.c.
* ----------------------------------------------------------------
*/
#define DAGO "ago"
#define DCURRENT "current"
#define EPOCH "epoch"
#define INVALID "invalid"
#define EARLY "-infinity"
#define LATE "infinity"
#define NOW "now"
#define TODAY "today"
#define TOMORROW "tomorrow"
#define YESTERDAY "yesterday"
#define ZULU "zulu"
#define DMICROSEC "usecond"
#define DMILLISEC "msecond"
#define DSECOND "second"
#define DMINUTE "minute"
#define DHOUR "hour"
#define DDAY "day"
#define DWEEK "week"
#define DMONTH "month"
#define DQUARTER "quarter"
#define DYEAR "year"
#define DDECADE "decade"
#define DCENTURY "century"
#define DMILLENNIUM "millennium"
#define DA_D "ad"
#define DB_C "bc"
#define DTIMEZONE "timezone"
/*
* Fundamental time field definitions for parsing.
*
* Meridian: am, pm, or 24-hour style.
* Millennium: ad, bc
*/
#define AM 0
#define PM 1
#define HR24 2
#define AD 0
#define BC 1
/*
* Fields for time decoding.
*
* Can't have more of these than there are bits in an unsigned int
* since these are turned into bit masks during parsing and decoding.
*
* Furthermore, the values for YEAR, MONTH, DAY, HOUR, MINUTE, SECOND
* must be in the range 0..14 so that the associated bitmasks can fit
* into the left half of an INTERVAL's typmod value. Since those bits
* are stored in typmods, you can't change them without initdb!
*/
#define RESERV 0
#define MONTH 1
#define YEAR 2
#define DAY 3
#define JULIAN 4
#define TZ 5
#define DTZ 6
#define DTZMOD 7
#define IGNORE_DTF 8
#define AMPM 9
#define HOUR 10
#define MINUTE 11
#define SECOND 12
#define MILLISECOND 13
#define MICROSECOND 14
#define DOY 15
#define DOW 16
#define UNITS 17
#define ADBC 18
/* these are only for relative dates */
#define AGO 19
#define ABS_BEFORE 20
#define ABS_AFTER 21
/* generic fields to help with parsing */
#define ISODATE 22
#define ISOTIME 23
/* these are only for parsing intervals */
#define WEEK 24
#define DECADE 25
#define CENTURY 26
#define MILLENNIUM 27
/* reserved for unrecognized string values */
#define UNKNOWN_FIELD 31
/*
* Token field definitions for time parsing and decoding.
* These need to fit into the datetkn table type.
* At the moment, that means keep them within [-127,127].
* These are also used for bit masks in DecodeDateDelta()
* so actually restrict them to within [0,31] for now.
* - thomas 97/06/19
* Not all of these fields are used for masks in DecodeDateDelta
* so allow some larger than 31. - thomas 1997-11-17
*/
#define DTK_NUMBER 0
#define DTK_STRING 1
#define DTK_DATE 2
#define DTK_TIME 3
#define DTK_TZ 4
#define DTK_AGO 5
#define DTK_SPECIAL 6
#define DTK_INVALID 7
#define DTK_CURRENT 8
#define DTK_EARLY 9
#define DTK_LATE 10
#define DTK_EPOCH 11
#define DTK_NOW 12
#define DTK_YESTERDAY 13
#define DTK_TODAY 14
#define DTK_TOMORROW 15
#define DTK_ZULU 16
#define DTK_DELTA 17
#define DTK_SECOND 18
#define DTK_MINUTE 19
#define DTK_HOUR 20
#define DTK_DAY 21
#define DTK_WEEK 22
#define DTK_MONTH 23
#define DTK_QUARTER 24
#define DTK_YEAR 25
#define DTK_DECADE 26
#define DTK_CENTURY 27
#define DTK_MILLENNIUM 28
#define DTK_MILLISEC 29
#define DTK_MICROSEC 30
#define DTK_JULIAN 31
#define DTK_DOW 32
#define DTK_DOY 33
#define DTK_TZ_HOUR 34
#define DTK_TZ_MINUTE 35
#define DTK_ISOYEAR 36
#define DTK_ISODOW 37
/*
* Bit mask definitions for time parsing.
*/
#define DTK_M(t) (0x01 << (t))
/* Convenience: a second, plus any fractional component */
#define DTK_ALL_SECS_M (DTK_M(SECOND) | DTK_M(MILLISECOND) | DTK_M(MICROSECOND))
#define DTK_DATE_M (DTK_M(YEAR) | DTK_M(MONTH) | DTK_M(DAY))
#define DTK_TIME_M (DTK_M(HOUR) | DTK_M(MINUTE) | DTK_ALL_SECS_M)
/*
* Working buffer size for input and output of interval, timestamp, etc.
* Inputs that need more working space will be rejected early. Longer outputs
* will overrun buffers, so this must suffice for all possible output. As of
* this writing, interval_out() needs the most space at ~90 bytes.
*/
#define MAXDATELEN 128
/* maximum possible number of fields in a date string */
#define MAXDATEFIELDS 25
/* only this many chars are stored in datetktbl */
#define TOKMAXLEN 10
/* keep this struct small; it gets used a lot */
typedef struct
{
char token[TOKMAXLEN];
char type;
char value; /* this may be unsigned, alas */
} datetkn;
/* one of its uses is in tables of time zone abbreviations */
typedef struct TimeZoneAbbrevTable
{
int numabbrevs;
datetkn abbrevs[1]; /* VARIABLE LENGTH ARRAY */
} TimeZoneAbbrevTable;
/* FMODULO()
* Macro to replace modf(), which is broken on some platforms.
* t = input and remainder
* q = integer part
* u = divisor
*/
#define FMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)) : floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
/* TMODULO()
* Like FMODULO(), but work on the timestamp datatype (either int64 or float8).
* We assume that int64 follows the C99 semantics for division (negative
* quotients truncate towards zero).
*/
#ifdef HAVE_INT64_TIMESTAMP
#define TMODULO(t,q,u) \
do { \
(q) = ((t) / (u)); \
if ((q) != 0) (t) -= ((q) * (u)); \
} while(0)
#else
#define TMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)) : floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
#endif
/*
* Date/time validation
* Include check for leap year.
*/
extern const char *const months[]; /* months (3-char abbreviations) */
extern const char *const days[]; /* days (full names) */
extern const int day_tab[2][13];
#define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0))
/*
* Datetime input parsing routines (ParseDateTime, DecodeDateTime, etc)
* return zero or a positive value on success. On failure, they return
* one of these negative code values. DateTimeParseError may be used to
* produce a correct ereport.
*/
#define DTERR_BAD_FORMAT (-1)
#define DTERR_FIELD_OVERFLOW (-2)
#define DTERR_MD_FIELD_OVERFLOW (-3) /* triggers hint about DateStyle */
#define DTERR_INTERVAL_OVERFLOW (-4)
#define DTERR_TZDISP_OVERFLOW (-5)
extern void GetCurrentDateTime(struct pg_tm * tm);
extern void GetCurrentTimeUsec(struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern void j2date(int jd, int *year, int *month, int *day);
extern int date2j(int year, int month, int day);
extern int ParseDateTime(const char *timestr, char *workbuf, size_t buflen,
char **field, int *ftype,
int maxfields, int *numfields);
extern int DecodeDateTime(char **field, int *ftype,
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern int DecodeTimeOnly(char **field, int *ftype,
int nf, int *dtype,
struct pg_tm * tm, fsec_t *fsec, int *tzp);
extern int DecodeInterval(char **field, int *ftype, int nf, int range,
int *dtype, struct pg_tm * tm, fsec_t *fsec);
extern int DecodeISO8601Interval(char *str,
int *dtype, struct pg_tm * tm, fsec_t *fsec);
extern void DateTimeParseError(int dterr, const char *str,
const char *datatype) __attribute__((noreturn));
extern int DetermineTimeZoneOffset(struct pg_tm * tm, pg_tz *tzp);
extern void EncodeDateOnly(struct pg_tm * tm, int style, char *str);
extern void EncodeTimeOnly(struct pg_tm * tm, fsec_t fsec, bool print_tz, int tz, int style, char *str);
extern void EncodeDateTime(struct pg_tm * tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str);
extern void EncodeInterval(struct pg_tm * tm, fsec_t fsec, int style, char *str);
extern int ValidateDate(int fmask, bool isjulian, bool is2digits, bool bc,
struct pg_tm * tm);
extern int DecodeSpecial(int field, char *lowtoken, int *val);
extern int DecodeUnits(int field, char *lowtoken, int *val);
extern int j2day(int jd);
extern Node *TemporalTransform(int32 max_precis, Node *node);
extern bool CheckDateTokenTables(void);
extern void ConvertTimeZoneAbbrevs(TimeZoneAbbrevTable *tbl,
struct tzEntry *abbrevs, int n);
extern void InstallTimeZoneAbbrevs(TimeZoneAbbrevTable *tbl);
extern Datum pg_timezone_abbrevs(PG_FUNCTION_ARGS);
extern Datum pg_timezone_names(PG_FUNCTION_ARGS);
#endif /* DATETIME_H */
|
2019_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* src/interfaces/ecpg/pgtypeslib/dt.h */
#ifndef DT_H
#define DT_H
#include <pgtypes_timestamp.h>
#define MAXTZLEN 10
#ifdef HAVE_INT64_TIMESTAMP
typedef int32 fsec_t;
#else
typedef double fsec_t;
/* round off to MAX_TIMESTAMP_PRECISION decimal places */
/* note: this is also used for rounding off intervals */
#define TS_PREC_INV 1000000.0
#define TSROUND(j) (rint(((double) (j)) * TS_PREC_INV) / TS_PREC_INV)
#endif
#define USE_POSTGRES_DATES 0
#define USE_ISO_DATES 1
#define USE_SQL_DATES 2
#define USE_GERMAN_DATES 3
#define INTSTYLE_POSTGRES 0
#define INTSTYLE_POSTGRES_VERBOSE 1
#define INTSTYLE_SQL_STANDARD 2
#define INTSTYLE_ISO_8601 3
#define INTERVAL_FULL_RANGE (0x7FFF)
#define INTERVAL_MASK(b) (1 << (b))
#define MAX_INTERVAL_PRECISION 6
#define DTERR_BAD_FORMAT (-1)
#define DTERR_FIELD_OVERFLOW (-2)
#define DTERR_MD_FIELD_OVERFLOW (-3) /* triggers hint about DateStyle */
#define DTERR_INTERVAL_OVERFLOW (-4)
#define DTERR_TZDISP_OVERFLOW (-5)
#define DAGO "ago"
#define EPOCH "epoch"
#define INVALID "invalid"
#define EARLY "-infinity"
#define LATE "infinity"
#define NOW "now"
#define TODAY "today"
#define TOMORROW "tomorrow"
#define YESTERDAY "yesterday"
#define ZULU "zulu"
#define DMICROSEC "usecond"
#define DMILLISEC "msecond"
#define DSECOND "second"
#define DMINUTE "minute"
#define DHOUR "hour"
#define DDAY "day"
#define DWEEK "week"
#define DMONTH "month"
#define DQUARTER "quarter"
#define DYEAR "year"
#define DDECADE "decade"
#define DCENTURY "century"
#define DMILLENNIUM "millennium"
#define DA_D "ad"
#define DB_C "bc"
#define DTIMEZONE "timezone"
#define DCURRENT "current"
/*
* Fundamental time field definitions for parsing.
*
* Meridian: am, pm, or 24-hour style.
* Millennium: ad, bc
*/
#define AM 0
#define PM 1
#define HR24 2
#define AD 0
#define BC 1
/*
* Fields for time decoding.
*
* Can't have more of these than there are bits in an unsigned int
* since these are turned into bit masks during parsing and decoding.
*
* Furthermore, the values for YEAR, MONTH, DAY, HOUR, MINUTE, SECOND
* must be in the range 0..14 so that the associated bitmasks can fit
* into the left half of an INTERVAL's typmod value.
*
* Copy&pasted these values from src/include/utils/datetime.h
* 2008-11-20, changing a number of their values.
*/
#define RESERV 0
#define MONTH 1
#define YEAR 2
#define DAY 3
#define JULIAN 4
#define TZ 5
#define DTZ 6
#define DTZMOD 7
#define IGNORE_DTF 8
#define AMPM 9
#define HOUR 10
#define MINUTE 11
#define SECOND 12
#define MILLISECOND 13
#define MICROSECOND 14
#define DOY 15
#define DOW 16
#define UNITS 17
#define ADBC 18
/* these are only for relative dates */
#define AGO 19
#define ABS_BEFORE 20
#define ABS_AFTER 21
/* generic fields to help with parsing */
#define ISODATE 22
#define ISOTIME 23
/* reserved for unrecognized string values */
#define UNKNOWN_FIELD 31
/*
* Token field definitions for time parsing and decoding.
* These need to fit into the datetkn table type.
* At the moment, that means keep them within [-127,127].
* These are also used for bit masks in DecodeDateDelta()
* so actually restrict them to within [0,31] for now.
* - thomas 97/06/19
* Not all of these fields are used for masks in DecodeDateDelta
* so allow some larger than 31. - thomas 1997-11-17
*/
#define DTK_NUMBER 0
#define DTK_STRING 1
#define DTK_DATE 2
#define DTK_TIME 3
#define DTK_TZ 4
#define DTK_AGO 5
#define DTK_SPECIAL 6
#define DTK_INVALID 7
#define DTK_CURRENT 8
#define DTK_EARLY 9
#define DTK_LATE 10
#define DTK_EPOCH 11
#define DTK_NOW 12
#define DTK_YESTERDAY 13
#define DTK_TODAY 14
#define DTK_TOMORROW 15
#define DTK_ZULU 16
#define DTK_DELTA 17
#define DTK_SECOND 18
#define DTK_MINUTE 19
#define DTK_HOUR 20
#define DTK_DAY 21
#define DTK_WEEK 22
#define DTK_MONTH 23
#define DTK_QUARTER 24
#define DTK_YEAR 25
#define DTK_DECADE 26
#define DTK_CENTURY 27
#define DTK_MILLENNIUM 28
#define DTK_MILLISEC 29
#define DTK_MICROSEC 30
#define DTK_JULIAN 31
#define DTK_DOW 32
#define DTK_DOY 33
#define DTK_TZ_HOUR 34
#define DTK_TZ_MINUTE 35
#define DTK_ISOYEAR 36
#define DTK_ISODOW 37
/*
* Bit mask definitions for time parsing.
*/
/* Copy&pasted these values from src/include/utils/datetime.h */
#define DTK_M(t) (0x01 << (t))
#define DTK_ALL_SECS_M (DTK_M(SECOND) | DTK_M(MILLISECOND) | DTK_M(MICROSECOND))
#define DTK_DATE_M (DTK_M(YEAR) | DTK_M(MONTH) | DTK_M(DAY))
#define DTK_TIME_M (DTK_M(HOUR) | DTK_M(MINUTE) | DTK_M(SECOND))
#define MAXDATELEN 63 /* maximum possible length of an input date
* string (not counting tr. null) */
#define MAXDATEFIELDS 25 /* maximum possible number of fields in a date
* string */
#define TOKMAXLEN 10 /* only this many chars are stored in
* datetktbl */
/* keep this struct small; it gets used a lot */
typedef struct
{
#if defined(_AIX)
char *token;
#else
char token[TOKMAXLEN];
#endif /* _AIX */
char type;
char value; /* this may be unsigned, alas */
} datetkn;
/* FMODULO()
* Macro to replace modf(), which is broken on some platforms.
* t = input and remainder
* q = integer part
* u = divisor
*/
#define FMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)): floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
/* TMODULO()
* Like FMODULO(), but work on the timestamp datatype (either int64 or float8).
* We assume that int64 follows the C99 semantics for division (negative
* quotients truncate towards zero).
*/
#ifdef HAVE_INT64_TIMESTAMP
#define TMODULO(t,q,u) \
do { \
(q) = ((t) / (u)); \
if ((q) != 0) (t) -= ((q) * (u)); \
} while(0)
#else
#define TMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)): floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
#endif
/* in both timestamp.h and ecpg/dt.h */
#define DAYS_PER_YEAR 365.25 /* assumes leap year every four years */
#define MONTHS_PER_YEAR 12
/*
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
* also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
#define HOURS_PER_DAY 24 /* assume no daylight savings time changes */
/*
* This doesn't adjust for uneven daylight savings time intervals or leap
* seconds, and it crudely estimates leap years. A more accurate value
* for days per years is 365.2422.
*/
#define SECS_PER_YEAR (36525 * 864) /* avoid floating-point computation */
#define SECS_PER_DAY 86400
#define SECS_PER_HOUR 3600
#define SECS_PER_MINUTE 60
#define MINS_PER_HOUR 60
#ifdef HAVE_INT64_TIMESTAMP
#define USECS_PER_DAY INT64CONST(86400000000)
#define USECS_PER_HOUR INT64CONST(3600000000)
#define USECS_PER_MINUTE INT64CONST(60000000)
#define USECS_PER_SEC INT64CONST(1000000)
#endif
/*
* Date/time validation
* Include check for leap year.
*/
#define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0))
/* Julian date support for date2j() and j2date()
*
* IS_VALID_JULIAN checks the minimum date exactly, but is a bit sloppy
* about the maximum, since it's far enough out to not be especially
* interesting.
*/
#define JULIAN_MINYEAR (-4713)
#define JULIAN_MINMONTH (11)
#define JULIAN_MINDAY (24)
#define JULIAN_MAXYEAR (5874898)
#define IS_VALID_JULIAN(y,m,d) ((((y) > JULIAN_MINYEAR) \
|| (((y) == JULIAN_MINYEAR) && (((m) > JULIAN_MINMONTH) \
|| (((m) == JULIAN_MINMONTH) && ((d) >= JULIAN_MINDAY))))) \
&& ((y) < JULIAN_MAXYEAR))
#define UTIME_MINYEAR (1901)
#define UTIME_MINMONTH (12)
#define UTIME_MINDAY (14)
#define UTIME_MAXYEAR (2038)
#define UTIME_MAXMONTH (01)
#define UTIME_MAXDAY (18)
#define IS_VALID_UTIME(y,m,d) ((((y) > UTIME_MINYEAR) \
|| (((y) == UTIME_MINYEAR) && (((m) > UTIME_MINMONTH) \
|| (((m) == UTIME_MINMONTH) && ((d) >= UTIME_MINDAY))))) \
&& (((y) < UTIME_MAXYEAR) \
|| (((y) == UTIME_MAXYEAR) && (((m) < UTIME_MAXMONTH) \
|| (((m) == UTIME_MAXMONTH) && ((d) <= UTIME_MAXDAY))))))
#ifdef HAVE_INT64_TIMESTAMP
#define DT_NOBEGIN (-INT64CONST(0x7fffffffffffffff) - 1)
#define DT_NOEND (INT64CONST(0x7fffffffffffffff))
#else
#ifdef HUGE_VAL
#define DT_NOBEGIN (-HUGE_VAL)
#define DT_NOEND (HUGE_VAL)
#else
#define DT_NOBEGIN (-DBL_MAX)
#define DT_NOEND (DBL_MAX)
#endif
#endif /* HAVE_INT64_TIMESTAMP */
#define TIMESTAMP_NOBEGIN(j) do {(j) = DT_NOBEGIN;} while (0)
#define TIMESTAMP_NOEND(j) do {(j) = DT_NOEND;} while (0)
#define TIMESTAMP_IS_NOBEGIN(j) ((j) == DT_NOBEGIN)
#define TIMESTAMP_IS_NOEND(j) ((j) == DT_NOEND)
#define TIMESTAMP_NOT_FINITE(j) (TIMESTAMP_IS_NOBEGIN(j) || TIMESTAMP_IS_NOEND(j))
int DecodeInterval(char **, int *, int, int *, struct tm *, fsec_t *);
int DecodeTime(char *, int *, struct tm *, fsec_t *);
int EncodeDateTime(struct tm * tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates);
int EncodeInterval(struct tm * tm, fsec_t fsec, int style, char *str);
int tm2timestamp(struct tm *, fsec_t, int *, timestamp *);
int DecodeUnits(int field, char *lowtoken, int *val);
bool CheckDateTokenTables(void);
int EncodeDateOnly(struct tm * tm, int style, char *str, bool EuroDates);
int GetEpochTime(struct tm *);
int ParseDateTime(char *, char *, char **, int *, int *, char **);
int DecodeDateTime(char **, int *, int, int *, struct tm *, fsec_t *, bool);
void j2date(int, int *, int *, int *);
void GetCurrentDateTime(struct tm *);
int date2j(int, int, int);
void TrimTrailingZeros(char *);
void dt2time(double, int *, int *, int *, fsec_t *);
int PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp * d,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *tz);
extern char *pgtypes_date_weekdays_short[];
extern char *pgtypes_date_months[];
extern char *months[];
extern char *days[];
extern int day_tab[2][13];
#endif /* DT_H */
|
/* src/interfaces/ecpg/pgtypeslib/dt.h */
#ifndef DT_H
#define DT_H
#include <pgtypes_timestamp.h>
#define MAXTZLEN 10
#ifdef HAVE_INT64_TIMESTAMP
typedef int32 fsec_t;
#else
typedef double fsec_t;
/* round off to MAX_TIMESTAMP_PRECISION decimal places */
/* note: this is also used for rounding off intervals */
#define TS_PREC_INV 1000000.0
#define TSROUND(j) (rint(((double) (j)) * TS_PREC_INV) / TS_PREC_INV)
#endif
#define USE_POSTGRES_DATES 0
#define USE_ISO_DATES 1
#define USE_SQL_DATES 2
#define USE_GERMAN_DATES 3
#define INTSTYLE_POSTGRES 0
#define INTSTYLE_POSTGRES_VERBOSE 1
#define INTSTYLE_SQL_STANDARD 2
#define INTSTYLE_ISO_8601 3
#define INTERVAL_FULL_RANGE (0x7FFF)
#define INTERVAL_MASK(b) (1 << (b))
#define MAX_INTERVAL_PRECISION 6
#define DTERR_BAD_FORMAT (-1)
#define DTERR_FIELD_OVERFLOW (-2)
#define DTERR_MD_FIELD_OVERFLOW (-3) /* triggers hint about DateStyle */
#define DTERR_INTERVAL_OVERFLOW (-4)
#define DTERR_TZDISP_OVERFLOW (-5)
#define DAGO "ago"
#define EPOCH "epoch"
#define INVALID "invalid"
#define EARLY "-infinity"
#define LATE "infinity"
#define NOW "now"
#define TODAY "today"
#define TOMORROW "tomorrow"
#define YESTERDAY "yesterday"
#define ZULU "zulu"
#define DMICROSEC "usecond"
#define DMILLISEC "msecond"
#define DSECOND "second"
#define DMINUTE "minute"
#define DHOUR "hour"
#define DDAY "day"
#define DWEEK "week"
#define DMONTH "month"
#define DQUARTER "quarter"
#define DYEAR "year"
#define DDECADE "decade"
#define DCENTURY "century"
#define DMILLENNIUM "millennium"
#define DA_D "ad"
#define DB_C "bc"
#define DTIMEZONE "timezone"
#define DCURRENT "current"
/*
* Fundamental time field definitions for parsing.
*
* Meridian: am, pm, or 24-hour style.
* Millennium: ad, bc
*/
#define AM 0
#define PM 1
#define HR24 2
#define AD 0
#define BC 1
/*
* Fields for time decoding.
*
* Can't have more of these than there are bits in an unsigned int
* since these are turned into bit masks during parsing and decoding.
*
* Furthermore, the values for YEAR, MONTH, DAY, HOUR, MINUTE, SECOND
* must be in the range 0..14 so that the associated bitmasks can fit
* into the left half of an INTERVAL's typmod value.
*
* Copy&pasted these values from src/include/utils/datetime.h
* 2008-11-20, changing a number of their values.
*/
#define RESERV 0
#define MONTH 1
#define YEAR 2
#define DAY 3
#define JULIAN 4
#define TZ 5
#define DTZ 6
#define DTZMOD 7
#define IGNORE_DTF 8
#define AMPM 9
#define HOUR 10
#define MINUTE 11
#define SECOND 12
#define MILLISECOND 13
#define MICROSECOND 14
#define DOY 15
#define DOW 16
#define UNITS 17
#define ADBC 18
/* these are only for relative dates */
#define AGO 19
#define ABS_BEFORE 20
#define ABS_AFTER 21
/* generic fields to help with parsing */
#define ISODATE 22
#define ISOTIME 23
/* reserved for unrecognized string values */
#define UNKNOWN_FIELD 31
/*
* Token field definitions for time parsing and decoding.
* These need to fit into the datetkn table type.
* At the moment, that means keep them within [-127,127].
* These are also used for bit masks in DecodeDateDelta()
* so actually restrict them to within [0,31] for now.
* - thomas 97/06/19
* Not all of these fields are used for masks in DecodeDateDelta
* so allow some larger than 31. - thomas 1997-11-17
*/
#define DTK_NUMBER 0
#define DTK_STRING 1
#define DTK_DATE 2
#define DTK_TIME 3
#define DTK_TZ 4
#define DTK_AGO 5
#define DTK_SPECIAL 6
#define DTK_INVALID 7
#define DTK_CURRENT 8
#define DTK_EARLY 9
#define DTK_LATE 10
#define DTK_EPOCH 11
#define DTK_NOW 12
#define DTK_YESTERDAY 13
#define DTK_TODAY 14
#define DTK_TOMORROW 15
#define DTK_ZULU 16
#define DTK_DELTA 17
#define DTK_SECOND 18
#define DTK_MINUTE 19
#define DTK_HOUR 20
#define DTK_DAY 21
#define DTK_WEEK 22
#define DTK_MONTH 23
#define DTK_QUARTER 24
#define DTK_YEAR 25
#define DTK_DECADE 26
#define DTK_CENTURY 27
#define DTK_MILLENNIUM 28
#define DTK_MILLISEC 29
#define DTK_MICROSEC 30
#define DTK_JULIAN 31
#define DTK_DOW 32
#define DTK_DOY 33
#define DTK_TZ_HOUR 34
#define DTK_TZ_MINUTE 35
#define DTK_ISOYEAR 36
#define DTK_ISODOW 37
/*
* Bit mask definitions for time parsing.
*/
/* Copy&pasted these values from src/include/utils/datetime.h */
#define DTK_M(t) (0x01 << (t))
#define DTK_ALL_SECS_M (DTK_M(SECOND) | DTK_M(MILLISECOND) | DTK_M(MICROSECOND))
#define DTK_DATE_M (DTK_M(YEAR) | DTK_M(MONTH) | DTK_M(DAY))
#define DTK_TIME_M (DTK_M(HOUR) | DTK_M(MINUTE) | DTK_M(SECOND))
/*
* Working buffer size for input and output of interval, timestamp, etc.
* Inputs that need more working space will be rejected early. Longer outputs
* will overrun buffers, so this must suffice for all possible output. As of
* this writing, PGTYPESinterval_to_asc() needs the most space at ~90 bytes.
*/
#define MAXDATELEN 128
/* maximum possible number of fields in a date string */
#define MAXDATEFIELDS 25
/* only this many chars are stored in datetktbl */
#define TOKMAXLEN 10
/* keep this struct small; it gets used a lot */
typedef struct
{
#if defined(_AIX)
char *token;
#else
char token[TOKMAXLEN];
#endif /* _AIX */
char type;
char value; /* this may be unsigned, alas */
} datetkn;
/* FMODULO()
* Macro to replace modf(), which is broken on some platforms.
* t = input and remainder
* q = integer part
* u = divisor
*/
#define FMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)): floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
/* TMODULO()
* Like FMODULO(), but work on the timestamp datatype (either int64 or float8).
* We assume that int64 follows the C99 semantics for division (negative
* quotients truncate towards zero).
*/
#ifdef HAVE_INT64_TIMESTAMP
#define TMODULO(t,q,u) \
do { \
(q) = ((t) / (u)); \
if ((q) != 0) (t) -= ((q) * (u)); \
} while(0)
#else
#define TMODULO(t,q,u) \
do { \
(q) = (((t) < 0) ? ceil((t) / (u)): floor((t) / (u))); \
if ((q) != 0) (t) -= rint((q) * (u)); \
} while(0)
#endif
/* in both timestamp.h and ecpg/dt.h */
#define DAYS_PER_YEAR 365.25 /* assumes leap year every four years */
#define MONTHS_PER_YEAR 12
/*
* DAYS_PER_MONTH is very imprecise. The more accurate value is
* 365.2425/12 = 30.436875, or '30 days 10:29:06'. Right now we only
* return an integral number of days, but someday perhaps we should
* also return a 'time' value to be used as well. ISO 8601 suggests
* 30 days.
*/
#define DAYS_PER_MONTH 30 /* assumes exactly 30 days per month */
#define HOURS_PER_DAY 24 /* assume no daylight savings time changes */
/*
* This doesn't adjust for uneven daylight savings time intervals or leap
* seconds, and it crudely estimates leap years. A more accurate value
* for days per years is 365.2422.
*/
#define SECS_PER_YEAR (36525 * 864) /* avoid floating-point computation */
#define SECS_PER_DAY 86400
#define SECS_PER_HOUR 3600
#define SECS_PER_MINUTE 60
#define MINS_PER_HOUR 60
#ifdef HAVE_INT64_TIMESTAMP
#define USECS_PER_DAY INT64CONST(86400000000)
#define USECS_PER_HOUR INT64CONST(3600000000)
#define USECS_PER_MINUTE INT64CONST(60000000)
#define USECS_PER_SEC INT64CONST(1000000)
#endif
/*
* Date/time validation
* Include check for leap year.
*/
#define isleap(y) (((y) % 4) == 0 && (((y) % 100) != 0 || ((y) % 400) == 0))
/* Julian date support for date2j() and j2date()
*
* IS_VALID_JULIAN checks the minimum date exactly, but is a bit sloppy
* about the maximum, since it's far enough out to not be especially
* interesting.
*/
#define JULIAN_MINYEAR (-4713)
#define JULIAN_MINMONTH (11)
#define JULIAN_MINDAY (24)
#define JULIAN_MAXYEAR (5874898)
#define IS_VALID_JULIAN(y,m,d) ((((y) > JULIAN_MINYEAR) \
|| (((y) == JULIAN_MINYEAR) && (((m) > JULIAN_MINMONTH) \
|| (((m) == JULIAN_MINMONTH) && ((d) >= JULIAN_MINDAY))))) \
&& ((y) < JULIAN_MAXYEAR))
#define UTIME_MINYEAR (1901)
#define UTIME_MINMONTH (12)
#define UTIME_MINDAY (14)
#define UTIME_MAXYEAR (2038)
#define UTIME_MAXMONTH (01)
#define UTIME_MAXDAY (18)
#define IS_VALID_UTIME(y,m,d) ((((y) > UTIME_MINYEAR) \
|| (((y) == UTIME_MINYEAR) && (((m) > UTIME_MINMONTH) \
|| (((m) == UTIME_MINMONTH) && ((d) >= UTIME_MINDAY))))) \
&& (((y) < UTIME_MAXYEAR) \
|| (((y) == UTIME_MAXYEAR) && (((m) < UTIME_MAXMONTH) \
|| (((m) == UTIME_MAXMONTH) && ((d) <= UTIME_MAXDAY))))))
#ifdef HAVE_INT64_TIMESTAMP
#define DT_NOBEGIN (-INT64CONST(0x7fffffffffffffff) - 1)
#define DT_NOEND (INT64CONST(0x7fffffffffffffff))
#else
#ifdef HUGE_VAL
#define DT_NOBEGIN (-HUGE_VAL)
#define DT_NOEND (HUGE_VAL)
#else
#define DT_NOBEGIN (-DBL_MAX)
#define DT_NOEND (DBL_MAX)
#endif
#endif /* HAVE_INT64_TIMESTAMP */
#define TIMESTAMP_NOBEGIN(j) do {(j) = DT_NOBEGIN;} while (0)
#define TIMESTAMP_NOEND(j) do {(j) = DT_NOEND;} while (0)
#define TIMESTAMP_IS_NOBEGIN(j) ((j) == DT_NOBEGIN)
#define TIMESTAMP_IS_NOEND(j) ((j) == DT_NOEND)
#define TIMESTAMP_NOT_FINITE(j) (TIMESTAMP_IS_NOBEGIN(j) || TIMESTAMP_IS_NOEND(j))
int DecodeInterval(char **, int *, int, int *, struct tm *, fsec_t *);
int DecodeTime(char *, int *, struct tm *, fsec_t *);
int EncodeDateTime(struct tm * tm, fsec_t fsec, bool print_tz, int tz, const char *tzn, int style, char *str, bool EuroDates);
int EncodeInterval(struct tm * tm, fsec_t fsec, int style, char *str);
int tm2timestamp(struct tm *, fsec_t, int *, timestamp *);
int DecodeUnits(int field, char *lowtoken, int *val);
bool CheckDateTokenTables(void);
int EncodeDateOnly(struct tm * tm, int style, char *str, bool EuroDates);
int GetEpochTime(struct tm *);
int ParseDateTime(char *, char *, char **, int *, int *, char **);
int DecodeDateTime(char **, int *, int, int *, struct tm *, fsec_t *, bool);
void j2date(int, int *, int *, int *);
void GetCurrentDateTime(struct tm *);
int date2j(int, int, int);
void TrimTrailingZeros(char *);
void dt2time(double, int *, int *, int *, fsec_t *);
int PGTYPEStimestamp_defmt_scan(char **str, char *fmt, timestamp * d,
int *year, int *month, int *day,
int *hour, int *minute, int *second,
int *tz);
extern char *pgtypes_date_weekdays_short[];
extern char *pgtypes_date_months[];
extern char *months[];
extern char *days[];
extern int day_tab[2][13];
#endif /* DT_H */
|
2019_2
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the Interfaces handler.
*
* Version: @(#)dev.h 1.0.10 08/12/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Bjorn Ekwall. <bj0rn@blox.se>
* Pekka Riikonen <priikone@poseidon.pspt.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Moved to /usr/include/linux for NET3
*/
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_link.h>
#ifdef __KERNEL__
#include <linux/pm_qos_params.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <asm/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
#include <linux/device.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/ethtool.h>
#include <net/net_namespace.h>
#include <net/dsa.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
struct vlan_group;
struct netpoll_info;
/* 802.11 specific */
struct wireless_dev;
/* source back-compat hooks */
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
functions are available. */
#define HAVE_FREE_NETDEV /* free_netdev() */
#define HAVE_NETDEV_PRIV /* netdev_priv() */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
*
* - qdisc return codes
* - driver transmit return codes
* - errno values
*
* Drivers are allowed to return any one of those in their hard_start_xmit()
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
* higher layers. Virtual network devices transmit synchronously, in this case
* the driver transmit return codes are consumed by dev_queue_xmit(), all
* others are propagated to higher layers.
*/
/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x02 /* congestion notification */
#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
* indicates that the device will soon be dropping packets, or already drops
* some packets of the same priority; prompting us to send less aggressively. */
#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
/* Driver transmit return codes */
#define NETDEV_TX_MASK 0xf0
enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
};
typedef enum netdev_tx netdev_tx_t;
/*
* Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
* hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
*/
static inline bool dev_xmit_complete(int rc)
{
/*
* Positive cases with an skb consumed by a driver:
* - successful transmission (rc == NETDEV_TX_OK)
* - error while transmitting (rc < 0)
* - error while queueing to a different device (rc & NET_XMIT_MASK)
*/
if (likely(rc < NET_XMIT_MASK))
return true;
return false;
}
#endif
#define MAX_ADDR_LEN 32 /* Largest hardware address length */
#ifdef __KERNEL__
/*
* Compute the worst case header length according to the protocols
* used.
*/
#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96
# endif
#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
#endif
#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
!defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
!defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
!defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif
/*
* Old network device statistics. Fields are native words
* (unsigned long) so they can be read and written atomically.
* Each field is padded to 64 bits for compatibility with
* rtnl_link_stats64.
*/
#if BITS_PER_LONG == 64
#define NET_DEVICE_STATS_DEFINE(name) unsigned long name
#elif defined(__LITTLE_ENDIAN)
#define NET_DEVICE_STATS_DEFINE(name) unsigned long name, pad_ ## name
#else
#define NET_DEVICE_STATS_DEFINE(name) unsigned long pad_ ## name, name
#endif
struct net_device_stats {
NET_DEVICE_STATS_DEFINE(rx_packets);
NET_DEVICE_STATS_DEFINE(tx_packets);
NET_DEVICE_STATS_DEFINE(rx_bytes);
NET_DEVICE_STATS_DEFINE(tx_bytes);
NET_DEVICE_STATS_DEFINE(rx_errors);
NET_DEVICE_STATS_DEFINE(tx_errors);
NET_DEVICE_STATS_DEFINE(rx_dropped);
NET_DEVICE_STATS_DEFINE(tx_dropped);
NET_DEVICE_STATS_DEFINE(multicast);
NET_DEVICE_STATS_DEFINE(collisions);
NET_DEVICE_STATS_DEFINE(rx_length_errors);
NET_DEVICE_STATS_DEFINE(rx_over_errors);
NET_DEVICE_STATS_DEFINE(rx_crc_errors);
NET_DEVICE_STATS_DEFINE(rx_frame_errors);
NET_DEVICE_STATS_DEFINE(rx_fifo_errors);
NET_DEVICE_STATS_DEFINE(rx_missed_errors);
NET_DEVICE_STATS_DEFINE(tx_aborted_errors);
NET_DEVICE_STATS_DEFINE(tx_carrier_errors);
NET_DEVICE_STATS_DEFINE(tx_fifo_errors);
NET_DEVICE_STATS_DEFINE(tx_heartbeat_errors);
NET_DEVICE_STATS_DEFINE(tx_window_errors);
NET_DEVICE_STATS_DEFINE(rx_compressed);
NET_DEVICE_STATS_DEFINE(tx_compressed);
};
#endif /* __KERNEL__ */
/* Media selection options. */
enum {
IF_PORT_UNKNOWN = 0,
IF_PORT_10BASE2,
IF_PORT_10BASET,
IF_PORT_AUI,
IF_PORT_100BASET,
IF_PORT_100BASETX,
IF_PORT_100BASEFX
};
#ifdef __KERNEL__
#include <linux/cache.h>
#include <linux/skbuff.h>
struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
#define NETDEV_HW_ADDR_T_SLAVE 3
#define NETDEV_HW_ADDR_T_UNICAST 4
#define NETDEV_HW_ADDR_T_MULTICAST 5
int refcount;
bool synced;
bool global_use;
struct rcu_head rcu_head;
};
struct netdev_hw_addr_list {
struct list_head list;
int count;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
#define netdev_hw_addr_list_for_each(ha, l) \
list_for_each_entry(ha, &(l)->list, list)
#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
struct hh_cache *hh_next; /* Next entry */
atomic_t hh_refcnt; /* number of users */
/*
* We want hh_output, hh_len, hh_lock and hh_data be a in a separate
* cache line on SMP.
* They are mostly read, but hh_refcnt may be changed quite frequently,
* incurring cache line ping pongs.
*/
__be16 hh_type ____cacheline_aligned_in_smp;
/* protocol identifier, f.e ETH_P_IP
* NOTE: For VLANs, this will be the
* encapuslated type. --BLG
*/
u16 hh_len; /* length of header */
int (*hh_output)(struct sk_buff *skb);
seqlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
#define HH_DATA_MOD 16
#define HH_DATA_OFF(__len) \
(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
#define HH_DATA_ALIGN(__len) \
(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
*
* We could use other alignment values, but we must maintain the
* relationship HH alignment <= LL alignment.
*
* LL_ALLOCATED_SPACE also takes into account the tailroom the device
* may need.
*/
#define LL_RESERVED_SPACE(dev) \
((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_ALLOCATED_SPACE(dev) \
((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
int (*rebuild)(struct sk_buff *skb);
#define HAVE_HEADER_CACHE
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
};
/* These flag bits are private to the generic network queueing
* layer, they may not be explicitly referenced by any other
* code.
*/
enum netdev_state_t {
__LINK_STATE_START,
__LINK_STATE_PRESENT,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
};
/*
* This structure holds at boot time configured netdevice settings. They
* are then used in the device probing.
*/
struct netdev_boot_setup {
char name[IFNAMSIZ];
struct ifmap map;
};
#define NETDEV_BOOT_SETUP_MAX 8
extern int __init netdev_boot_setup(char *str);
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
* to the per-cpu poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
unsigned long state;
int weight;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
spinlock_t poll_lock;
int poll_owner;
#endif
unsigned int gro_count;
struct net_device *dev;
struct list_head dev_list;
struct sk_buff *gro_list;
struct sk_buff *skb;
};
enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
enum gro_result {
GRO_MERGED,
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
GRO_DROP,
};
typedef enum gro_result gro_result_t;
typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
extern void __napi_schedule(struct napi_struct *n);
static inline int napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
* napi_schedule_prep - check if napi can be scheduled
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
static inline int napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
/**
* napi_schedule - schedule NAPI poll
* @n: napi context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
*/
static inline void napi_schedule(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule(n);
}
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
static inline int napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
return 1;
}
return 0;
}
/**
* napi_complete - NAPI processing complete
* @n: napi context
*
* Mark NAPI processing as complete.
*/
extern void __napi_complete(struct napi_struct *n);
extern void napi_complete(struct napi_struct *n);
/**
* napi_disable - prevent NAPI from scheduling
* @n: napi context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
*/
static inline void napi_disable(struct napi_struct *n)
{
set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
* napi_enable - enable NAPI scheduling
* @n: napi context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
*/
static inline void napi_enable(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
smp_mb__before_clear_bit();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
#ifdef CONFIG_SMP
/**
* napi_synchronize - wait until NAPI is not running
* @n: napi context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
* does not disable future activations.
*/
static inline void napi_synchronize(const struct napi_struct *n)
{
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
}
#else
# define napi_synchronize(n) barrier()
#endif
enum netdev_queue_state_t {
__QUEUE_STATE_XOFF,
__QUEUE_STATE_FROZEN,
};
struct netdev_queue {
/*
* read mostly part
*/
struct net_device *dev;
struct Qdisc *qdisc;
unsigned long state;
struct Qdisc *qdisc_sleeping;
/*
* write mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
* please use this field instead of dev->trans_start
*/
unsigned long trans_start;
unsigned long tx_bytes;
unsigned long tx_packets;
unsigned long tx_dropped;
} ____cacheline_aligned_in_smp;
#ifdef CONFIG_RPS
/*
* This structure holds an RPS map which can be of variable length. The
* map is an array of CPUs.
*/
struct rps_map {
unsigned int len;
struct rcu_head rcu;
u16 cpus[0];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU and the
* tail pointer for that CPU's input queue at the time of last enqueue.
*/
struct rps_dev_flow {
u16 cpu;
u16 fill;
unsigned int last_qtail;
};
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
struct work_struct free_work;
struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
(_num * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
*/
struct rps_sock_flow_table {
unsigned int mask;
u16 ents[0];
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
(_num * sizeof(u16)))
#define RPS_NO_CPU 0xffff
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash) {
unsigned int cpu, index = hash & table->mask;
/* We only give a hint, preemption can change cpu under us */
cpu = raw_smp_processor_id();
if (table->ents[index] != cpu)
table->ents[index] = cpu;
}
}
static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash)
table->ents[hash & table->mask] = RPS_NO_CPU;
}
extern struct rps_sock_flow_table *rps_sock_flow_table;
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct rps_map *rps_map;
struct rps_dev_flow_table *rps_flow_table;
struct kobject kobj;
struct netdev_rx_queue *first;
atomic_t count;
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
* This function is called once when network device is registered.
* The network device can use this to any late stage initializaton
* or semantic validattion. It can fail with an error code which will
* be propogated back to register_netdev
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
* This function is called when network device transistions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
* This function is called when network device transistions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
* Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
* changes to configuration when multicast or promiscious is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
*
* void (*ndo_set_multicast_list)(struct net_device *dev);
* This function is called when the multicast address list changes.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
* mac address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Called when a user request an ioctl which can't be handled by
* the generic interface code. If not defined ioctl's return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
* is retained for legacy reason, new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
* Called when a user wants to change the Maximum Transfer Unit
* of a device. If not defined, any request to change MTU will
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
* Callback uses when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
* struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
* statistics. Drivers must do one of the following:
* 1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
* (which should normally be dev->stats64) and return a ponter to
* it. The structure must not be changed asynchronously.
* 2. Define @ndo_get_stats to update a net_device_stats structure
* (which should normally be dev->stats) and return a pointer to
* it. The structure may be changed asynchronously only if each
* field is written atomically.
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
* void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
* If device support VLAN receive accleration
* (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
* when vlan groups for the device changes. Note: grp is NULL
* if no vlan's groups are being used.
*
* void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is registered.
*
* void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
* int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
* int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
void (*ndo_set_multicast_list)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
void (*ndo_vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev,
struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
int queue, u16 vlan, u8 qos);
int (*ndo_set_vf_tx_rate)(struct net_device *dev,
int vf, int rate);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
int (*ndo_fcoe_ddp_done)(struct net_device *dev,
u16 xid);
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
};
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
/*
* This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* the interface.
*/
char name[IFNAMSIZ];
struct pm_qos_request_list *pm_qos_req;
/* device name hash chain */
struct hlist_node name_hlist;
/* snmp alias */
char *ifalias;
/*
* I/O specific fields
* FIXME: Merge these and struct ifmap into one
*/
unsigned long mem_end; /* shared mem end */
unsigned long mem_start; /* shared mem start */
unsigned long base_addr; /* device I/O address */
unsigned int irq; /* device IRQ number */
/*
* Some hardware also needs these fields, but they are not
* part of the usual set specified in Space.c.
*/
unsigned char if_port; /* Selectable AUI, TP,..*/
unsigned char dma; /* DMA channel */
unsigned long state;
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
/* Net device features */
unsigned long features;
#define NETIF_F_SG 1 /* Scatter/gather IO. */
#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
#define NETIF_F_GSO 2048 /* Enable software GSO. */
#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
/* do not use LLTX in new drivers */
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
#define NETIF_F_GRO 16384 /* Generic receive offload */
#define NETIF_F_LRO 32768 /* large receive offload */
/* the GSO_MASK reserves bits 16 through 23 */
#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
#define NETIF_F_GSO_MASK 0x00ff0000
#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_FRAGLIST)
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
union {
struct rtnl_link_stats64 stats64;
struct net_device_stats stats;
};
#ifdef CONFIG_WIRELESS_EXT
/* List of functions to handle Wireless Extensions (instead of ioctl).
* See <net/iw_handler.h> for details. Jean II */
const struct iw_handler_def * wireless_handlers;
/* Instance data managed by the core of Wireless Extensions. */
struct iw_public_data * wireless_data;
#endif
/* Management operations */
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
/* Hardware header description */
const struct header_ops *header_ops;
unsigned int flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
unsigned short padded; /* How much padding added by alloc_netdev() */
unsigned char operstate; /* RFC2863 operstate */
unsigned char link_mode; /* mapping policy to operstate */
unsigned int mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
/* extra head- and tailroom the hardware may need, but not in all cases
* can this be guaranteed, especially tailroom. Some cases also use
* LL_MAX_HEADER instead to allocate the skb.
*/
unsigned short needed_headroom;
unsigned short needed_tailroom;
struct net_device *master; /* Pointer to master device of a group,
* which this device is member of.
*/
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_len; /* hardware address length */
unsigned short dev_id; /* for shared network cards */
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc; /* Unicast mac addresses */
struct netdev_hw_addr_list mc; /* Multicast mac addresses */
int uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
/* Protocol specific pointers */
#ifdef CONFIG_NET_DSA
void *dsa_ptr; /* dsa specific data */
#endif
void *atalk_ptr; /* AppleTalk link */
void *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
void *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
void *ax25_ptr; /* AX.25 specific data */
struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
assign before registering */
/*
* Cache line mostly used on receive path (including eth_type_trans())
*/
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr; /* hw address, (before bcast
because most packets are
unicast) */
struct netdev_hw_addr_list dev_addrs; /* list of device
hw addresses */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
#ifdef CONFIG_RPS
struct kset *queues_kset;
struct netdev_rx_queue *_rx;
/* Number of RX queues allocated at alloc_netdev_mq() time */
unsigned int num_rx_queues;
#endif
struct netdev_queue rx_queue;
rx_handler_func_t *rx_handler;
void *rx_handler_data;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
/* Number of TX queues allocated at alloc_netdev_mq() time */
unsigned int num_tx_queues;
/* Number of TX queues currently active in device */
unsigned int real_num_tx_queues;
/* root qdisc from userspace point of view */
struct Qdisc *qdisc;
unsigned long tx_queue_len; /* Max frames per queue allowed */
spinlock_t tx_global_lock;
/*
* One part is mostly used on xmit path (device)
*/
/* These may be needed for future network-power-down code. */
/*
* trans_start here is expensive for high speed devices on SMP,
* please use netdev_queue->trans_start instead.
*/
unsigned long trans_start; /* Time (in jiffies) of last Tx */
int watchdog_timeo; /* used by dev_watchdog() */
struct timer_list watchdog_timer;
/* Number of references to this device */
atomic_t refcnt ____cacheline_aligned_in_smp;
/* delayed register/unregister */
struct list_head todo_list;
/* device index hash chain */
struct hlist_node index_hlist;
struct list_head link_watch_list;
/* register/unregister state machine */
enum { NETREG_UNINITIALIZED=0,
NETREG_REGISTERED, /* completed register_netdevice */
NETREG_UNREGISTERING, /* called unregister_netdevice */
NETREG_UNREGISTERED, /* completed unregister todo */
NETREG_RELEASED, /* called free_netdev */
NETREG_DUMMY, /* dummy device for NAPI poll */
} reg_state:16;
enum {
RTNL_LINK_INITIALIZED,
RTNL_LINK_INITIALIZING,
} rtnl_link_state:16;
/* Called from unregister, can be used to call free_netdev */
void (*destructor)(struct net_device *dev);
#ifdef CONFIG_NETPOLL
struct netpoll_info *npinfo;
#endif
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
#endif
/* mid-layer private */
void *ml_priv;
/* GARP */
struct garp_port *garp_port;
/* class/net/name entry */
struct device dev;
/* space for optional device, statistics, and wireless sysfs groups */
const struct attribute_group *sysfs_groups[4];
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
/* VLAN feature mask */
unsigned long vlan_features;
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
/* n-tuple filter list attached to this device */
struct ethtool_rx_ntuple_list ethtool_ntuple_list;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
return &dev->_tx[index];
}
static inline void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
void *),
void *arg)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
f(dev, &dev->_tx[i], arg);
}
/*
* Net namespace inlines
*/
static inline
struct net *dev_net(const struct net_device *dev)
{
return read_pnet(&dev->nd_net);
}
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
#ifdef CONFIG_NET_NS
release_net(dev->nd_net);
dev->nd_net = hold_net(net);
#endif
}
static inline bool netdev_uses_dsa_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_DSA
if (dev->dsa_ptr != NULL)
return dsa_uses_dsa_tags(dev->dsa_ptr);
#endif
return 0;
}
#ifndef CONFIG_NET_NS
static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
}
#else /* CONFIG_NET_NS */
void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
#endif
static inline bool netdev_uses_trailer_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_TRAILER
if (dev->dsa_ptr != NULL)
return dsa_uses_trailer_tags(dev->dsa_ptr);
#endif
return 0;
}
/**
* netdev_priv - access network device private data
* @dev: network device
*
* Get network device private data
*/
static inline void *netdev_priv(const struct net_device *dev)
{
return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
}
/* Set the sysfs physical device reference for the network logical device
* if set prior to registration will cause a symlink during initialization.
*/
#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
/* Set the sysfs device type for the network logical device to allow
* fin grained indentification of different network device types. For
* example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
/**
* netif_napi_add - initialize a napi context
* @dev: network device
* @napi: napi context
* @poll: polling function
* @weight: default weight
*
* netif_napi_add() must be used to initialize a napi context prior to calling
* *any* of the other napi related functions.
*/
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
* netif_napi_del - remove a napi context
* @napi: napi context
*
* netif_napi_del() removes a napi context from the network device napi list
*/
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
void *frag0;
/* Length of frag0. */
unsigned int frag0_len;
/* This indicates where we are processing relative to skb->data. */
int data_offset;
/* This is non-zero if the packet may be of the same flow. */
int same_flow;
/* This is non-zero if the packet cannot be merged with the new skb. */
int flush;
/* Number of segments aggregated. */
int count;
/* Free the skb? */
int free;
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
#include <linux/interrupt.h>
#include <linux/notifier.h>
extern rwlock_t dev_base_lock; /* Device list lock */
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_rcu(net, d) \
list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_safe(net, d, n) \
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = dev->dev_list.next;
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *next_net_device_rcu(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = rcu_dereference(dev->dev_list.next);
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *first_net_device(struct net *net)
{
return list_empty(&net->dev_base_head) ? NULL :
net_device_entry(net->dev_base_head.next);
}
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
extern void dev_add_pack(struct packet_type *pt);
extern void dev_remove_pack(struct packet_type *pt);
extern void __dev_remove_pack(struct packet_type *pt);
extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
unsigned short mask);
extern struct net_device *dev_get_by_name(struct net *net, const char *name);
extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
extern int dev_alloc_name(struct net_device *dev, const char *name);
extern int dev_open(struct net_device *dev);
extern int dev_close(struct net_device *dev);
extern void dev_disable_lro(struct net_device *dev);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
extern void unregister_netdevice_queue(struct net_device *dev,
struct list_head *head);
extern void unregister_netdevice_many(struct list_head *head);
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
extern int init_dummy_netdev(struct net_device *dev);
extern void netdev_resync_ops(struct net_device *dev);
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
extern int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
extern int netpoll_trap(void);
#endif
extern int skb_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern void skb_gro_reset_offset(struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
return NAPI_GRO_CB(skb)->data_offset;
}
static inline unsigned int skb_gro_len(const struct sk_buff *skb)
{
return skb->len - NAPI_GRO_CB(skb)->data_offset;
}
static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{
NAPI_GRO_CB(skb)->data_offset += len;
}
static inline void *skb_gro_header_fast(struct sk_buff *skb,
unsigned int offset)
{
return NAPI_GRO_CB(skb)->frag0 + offset;
}
static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
{
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
}
static inline void *skb_gro_mac_header(struct sk_buff *skb)
{
return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
}
static inline void *skb_gro_network_header(struct sk_buff *skb)
{
return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
skb_network_offset(skb);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned len)
{
if (!dev->header_ops || !dev->header_ops->create)
return 0;
return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
}
static inline int dev_parse_header(const struct sk_buff *skb,
unsigned char *haddr)
{
const struct net_device *dev = skb->dev;
if (!dev->header_ops || !dev->header_ops->parse)
return 0;
return dev->header_ops->parse(skb, haddr);
}
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
static inline int unregister_gifconf(unsigned int family)
{
return register_gifconf(family, NULL);
}
/*
* Incoming packets are placed on per-cpu queues
*/
struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct list_head poll_list;
struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
/* stats */
unsigned int processed;
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_head;
unsigned int input_queue_tail;
#endif
unsigned dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
sd->input_queue_head++;
#endif
}
static inline void input_queue_tail_incr_save(struct softnet_data *sd,
unsigned int *qtail)
{
#ifdef CONFIG_RPS
*qtail = ++sd->input_queue_tail;
#endif
}
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
#define HAVE_NETIF_QUEUE
extern void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
static inline void netif_tx_schedule_all(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
* netif_start_queue - allow transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
static inline void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_start_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_start_queue(txq);
}
}
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) {
netif_tx_start_queue(dev_queue);
return;
}
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}
/**
* netif_wake_queue - restart transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
*/
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_wake_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_wake_queue(txq);
}
}
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
* netif_stop_queue - stop transmitted packets
* @dev: network device
*
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
static inline void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_stop_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
}
static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
* netif_queue_stopped - test if transmit queue is flowblocked
* @dev: network device
*
* Test if transmit queue on device is currently unable to send.
*/
static inline int netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
}
/**
* netif_running - test if up
* @dev: network device
*
* Test if the device has been brought up.
*/
static inline int netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
/*
* Routines to manage the subqueues on a device. We only need start
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
*/
/**
* netif_start_subqueue - allow sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Start individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_start_queue(txq);
}
/**
* netif_stop_subqueue - stop sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Stop individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
netif_tx_stop_queue(txq);
}
/**
* netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
static inline int netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
/**
* netif_wake_subqueue - allow sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Resume individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
/**
* netif_is_multiqueue - test if device has multiple transmit queues
* @dev: network device
*
* Check if device has multiple transmit queues
*/
static inline int netif_is_multiqueue(const struct net_device *dev)
{
return (dev->num_tx_queues > 1);
}
/* Use this variant when it is known for sure that it
* is executing from hardware interrupt context or with hardware interrupts
* disabled.
*/
extern void dev_kfree_skb_irq(struct sk_buff *skb);
/* Use this variant in places where it could be invoked
* from either hardware interrupt or other context, with hardware interrupts
* either disabled or enabled.
*/
extern void dev_kfree_skb_any(struct sk_buff *skb);
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
extern gro_result_t dev_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
extern gro_result_t napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern void napi_reuse_skb(struct napi_struct *napi,
struct sk_buff *skb);
extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
extern gro_result_t napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret);
extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
extern gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
{
kfree_skb(napi->skb);
napi->skb = NULL;
}
extern int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
extern int __dev_change_flags(struct net_device *, unsigned int flags);
extern int dev_change_flags(struct net_device *, unsigned);
extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
extern int dev_change_name(struct net_device *, const char *);
extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq);
extern int dev_forward_skb(struct net_device *dev,
struct sk_buff *skb);
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */
extern void netdev_run_todo(void);
/**
* dev_put - release reference to device
* @dev: network device
*
* Release reference to device to allow it to be freed.
*/
static inline void dev_put(struct net_device *dev)
{
atomic_dec(&dev->refcnt);
}
/**
* dev_hold - get reference to device
* @dev: network device
*
* Hold reference to device to keep it from being freed.
*/
static inline void dev_hold(struct net_device *dev)
{
atomic_inc(&dev->refcnt);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
* and _off may be called from IRQ context, but it is caller
* who is responsible for serialization of these calls.
*
* The name carrier is inappropriate, these functions should really be
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
extern void linkwatch_fire_event(struct net_device *dev);
extern void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
* @dev: network device
*
* Check if carrier is present on device
*/
static inline int netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
extern unsigned long dev_trans_start(struct net_device *dev);
extern void __netdev_watchdog_up(struct net_device *dev);
extern void netif_carrier_on(struct net_device *dev);
extern void netif_carrier_off(struct net_device *dev);
extern void netif_notify_peers(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
* @dev: network device
*
* Mark device as dormant (as per RFC2863).
*
* The dormant state indicates that the relevant interface is not
* actually in a condition to pass packets (i.e., it is not 'up') but is
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
*
*/
static inline void netif_dormant_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_dormant_off - set device as not dormant.
* @dev: network device
*
* Device is not in dormant state.
*/
static inline void netif_dormant_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_dormant - test if carrier present
* @dev: network device
*
* Check if carrier is present on device
*/
static inline int netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
/**
* netif_oper_up - test if device is operational
* @dev: network device
*
* Check if carrier is operational
*/
static inline int netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
}
/**
* netif_device_present - is device available or removed
* @dev: network device
*
* Check if device has not been removed from system.
*/
static inline int netif_device_present(struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
extern void netif_device_detach(struct net_device *dev);
extern void netif_device_attach(struct net_device *dev);
/*
* Network interface message level settings
*/
#define HAVE_NETIF_MSG 1
enum {
NETIF_MSG_DRV = 0x0001,
NETIF_MSG_PROBE = 0x0002,
NETIF_MSG_LINK = 0x0004,
NETIF_MSG_TIMER = 0x0008,
NETIF_MSG_IFDOWN = 0x0010,
NETIF_MSG_IFUP = 0x0020,
NETIF_MSG_RX_ERR = 0x0040,
NETIF_MSG_TX_ERR = 0x0080,
NETIF_MSG_TX_QUEUED = 0x0100,
NETIF_MSG_INTR = 0x0200,
NETIF_MSG_TX_DONE = 0x0400,
NETIF_MSG_RX_STATUS = 0x0800,
NETIF_MSG_PKTDATA = 0x1000,
NETIF_MSG_HW = 0x2000,
NETIF_MSG_WOL = 0x4000,
};
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{
/* use default */
if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
return default_msg_enable_bits;
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
return (1 << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
txq->xmit_lock_owner = cpu;
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
txq->xmit_lock_owner = smp_processor_id();
}
static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
int ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
txq->xmit_lock_owner = smp_processor_id();
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock_bh(&txq->_xmit_lock);
}
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
txq->trans_start = jiffies;
}
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
*
* Get network device transmit lock
*/
static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
int cpu;
spin_lock(&dev->tx_global_lock);
cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
/* We are the only thread of execution doing a
* freeze, but we have to grab the _xmit_lock in
* order to synchronize with threads which are in
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
__netif_tx_lock(txq, cpu);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
}
static inline void netif_tx_lock_bh(struct net_device *dev)
{
local_bh_disable();
netif_tx_lock(dev);
}
static inline void netif_tx_unlock(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
/* No need to grab the _xmit_lock here. If the
* queue is not stopped for another reason, we
* force a schedule.
*/
clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
netif_schedule_queue(txq);
}
spin_unlock(&dev->tx_global_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
netif_tx_unlock(dev);
local_bh_enable();
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_lock(txq, cpu); \
} \
}
#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_unlock(txq); \
} \
}
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
int cpu;
local_bh_disable();
cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
{
spin_lock(&dev->addr_list_lock);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
}
static inline void netif_addr_unlock(struct net_device *dev)
{
spin_unlock(&dev->addr_list_lock);
}
static inline void netif_addr_unlock_bh(struct net_device *dev)
{
spin_unlock_bh(&dev->addr_list_lock);
}
/*
* dev_addrs walker. Should be used only for read access. Call with
* rcu_read_lock held.
*/
#define for_each_dev_addr(dev, ha) \
list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
/* Support for loadable net-drivers */
extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
void (*setup)(struct net_device *),
unsigned int queue_count);
#define alloc_netdev(sizeof_priv, name, setup) \
alloc_netdev_mq(sizeof_priv, name, setup, 1)
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
/* General hardware address lists handling functions */
extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len, unsigned char addr_type);
extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len, unsigned char addr_type);
extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
extern void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
extern int dev_addr_add_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
extern int dev_addr_del_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
extern void dev_addr_flush(struct net_device *dev);
extern int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
extern int dev_uc_sync(struct net_device *to, struct net_device *from);
extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
extern void dev_uc_flush(struct net_device *dev);
extern void dev_uc_init(struct net_device *dev);
/* Functions used for multicast addresses handling */
extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
extern void dev_mc_flush(struct net_device *dev);
extern void dev_mc_init(struct net_device *dev);
/* Functions used for secondary unicast and multicast support */
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
extern int dev_set_promiscuity(struct net_device *dev, int inc);
extern int dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
extern int netdev_bonding_change(struct net_device *dev,
unsigned long event);
extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev);
extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
static inline void netdev_rx_csum_fault(struct net_device *dev)
{
}
#endif
/* rx skb timestamps */
extern void net_enable_timestamp(void);
extern void net_disable_timestamp(void);
#ifdef CONFIG_PROC_FS
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
extern void netdev_class_remove_file(struct class_attribute *class_attr);
extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
extern void linkwatch_run_queue(void);
unsigned long netdev_increment_features(unsigned long all, unsigned long one,
unsigned long mask);
unsigned long netdev_fix_features(unsigned long features, const char *name);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
static inline int net_gso_ok(int features, int gso_type)
{
int feature = gso_type << NETIF_F_GSO_SHIFT;
return (features & feature) == feature;
}
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
dev->gso_max_size = size;
}
extern int __skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master);
static inline int skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master)
{
if (master)
return __skb_bond_should_drop(skb, master);
return 0;
}
extern struct pernet_operations __net_initdata loopback_net_ops;
static inline int dev_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
return dev->ethtool_ops->get_settings(dev, cmd);
}
static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
return 0;
return dev->ethtool_ops->get_rx_csum(dev);
}
static inline u32 dev_ethtool_get_flags(struct net_device *dev)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
return 0;
return dev->ethtool_ops->get_flags(dev);
}
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* netdev_printk helpers, similar to dev_printk */
static inline const char *netdev_name(const struct net_device *dev)
{
if (dev->reg_state != NETREG_REGISTERED)
return "(unregistered net_device)";
return dev->name;
}
#define netdev_printk(level, netdev, format, args...) \
dev_printk(level, (netdev)->dev.parent, \
"%s: " format, \
netdev_name(netdev), ##args)
#define netdev_emerg(dev, format, args...) \
netdev_printk(KERN_EMERG, dev, format, ##args)
#define netdev_alert(dev, format, args...) \
netdev_printk(KERN_ALERT, dev, format, ##args)
#define netdev_crit(dev, format, args...) \
netdev_printk(KERN_CRIT, dev, format, ##args)
#define netdev_err(dev, format, args...) \
netdev_printk(KERN_ERR, dev, format, ##args)
#define netdev_warn(dev, format, args...) \
netdev_printk(KERN_WARNING, dev, format, ##args)
#define netdev_notice(dev, format, args...) \
netdev_printk(KERN_NOTICE, dev, format, ##args)
#define netdev_info(dev, format, args...) \
netdev_printk(KERN_INFO, dev, format, ##args)
#if defined(DEBUG)
#define netdev_dbg(__dev, format, args...) \
netdev_printk(KERN_DEBUG, __dev, format, ##args)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
netdev_name(__dev), ##args); \
} while (0)
#else
#define netdev_dbg(__dev, format, args...) \
({ \
if (0) \
netdev_printk(KERN_DEBUG, __dev, format, ##args); \
0; \
})
#endif
#if defined(VERBOSE_DEBUG)
#define netdev_vdbg netdev_dbg
#else
#define netdev_vdbg(dev, format, args...) \
({ \
if (0) \
netdev_printk(KERN_DEBUG, dev, format, ##args); \
0; \
})
#endif
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
/* netif printk helpers, similar to netdev_printk */
#define netif_printk(priv, type, level, dev, fmt, args...) \
do { \
if (netif_msg_##type(priv)) \
netdev_printk(level, (dev), fmt, ##args); \
} while (0)
#define netif_emerg(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args)
#define netif_alert(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args)
#define netif_crit(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args)
#define netif_err(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_ERR, dev, fmt, ##args)
#define netif_warn(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args)
#define netif_notice(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args)
#define netif_info(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
#if defined(DEBUG)
#define netif_dbg(priv, type, dev, format, args...) \
netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
dynamic_dev_dbg((netdev)->dev.parent, \
"%s: " format, \
netdev_name(netdev), ##args); \
} while (0)
#else
#define netif_dbg(priv, type, dev, format, args...) \
({ \
if (0) \
netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
0; \
})
#endif
#if defined(VERBOSE_DEBUG)
#define netif_vdbg netdev_dbg
#else
#define netif_vdbg(priv, type, dev, format, args...) \
({ \
if (0) \
netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
0; \
})
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_NETDEVICE_H */
|
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Definitions for the Interfaces handler.
*
* Version: @(#)dev.h 1.0.10 08/12/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Corey Minyard <wf-rch!minyard@relay.EU.net>
* Donald J. Becker, <becker@cesdis.gsfc.nasa.gov>
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
* Bjorn Ekwall. <bj0rn@blox.se>
* Pekka Riikonen <priikone@poseidon.pspt.fi>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Moved to /usr/include/linux for NET3
*/
#ifndef _LINUX_NETDEVICE_H
#define _LINUX_NETDEVICE_H
#include <linux/if.h>
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_link.h>
#ifdef __KERNEL__
#include <linux/pm_qos_params.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <asm/atomic.h>
#include <asm/cache.h>
#include <asm/byteorder.h>
#include <linux/device.h>
#include <linux/percpu.h>
#include <linux/rculist.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
#include <linux/ethtool.h>
#include <net/net_namespace.h>
#include <net/dsa.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
struct vlan_group;
struct netpoll_info;
/* 802.11 specific */
struct wireless_dev;
/* source back-compat hooks */
#define SET_ETHTOOL_OPS(netdev,ops) \
( (netdev)->ethtool_ops = (ops) )
#define HAVE_ALLOC_NETDEV /* feature macro: alloc_xxxdev
functions are available. */
#define HAVE_FREE_NETDEV /* free_netdev() */
#define HAVE_NETDEV_PRIV /* netdev_priv() */
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
*
* - qdisc return codes
* - driver transmit return codes
* - errno values
*
* Drivers are allowed to return any one of those in their hard_start_xmit()
* function. Real network devices commonly used with qdiscs should only return
* the driver transmit return codes though - when qdiscs are used, the actual
* transmission happens asynchronously, so the value is not propagated to
* higher layers. Virtual network devices transmit synchronously, in this case
* the driver transmit return codes are consumed by dev_queue_xmit(), all
* others are propagated to higher layers.
*/
/* qdisc ->enqueue() return codes. */
#define NET_XMIT_SUCCESS 0x00
#define NET_XMIT_DROP 0x01 /* skb dropped */
#define NET_XMIT_CN 0x02 /* congestion notification */
#define NET_XMIT_POLICED 0x03 /* skb is shot by police */
#define NET_XMIT_MASK 0x0f /* qdisc flags in net/sch_generic.h */
/* NET_XMIT_CN is special. It does not guarantee that this packet is lost. It
* indicates that the device will soon be dropping packets, or already drops
* some packets of the same priority; prompting us to send less aggressively. */
#define net_xmit_eval(e) ((e) == NET_XMIT_CN ? 0 : (e))
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
/* Driver transmit return codes */
#define NETDEV_TX_MASK 0xf0
enum netdev_tx {
__NETDEV_TX_MIN = INT_MIN, /* make sure enum is signed */
NETDEV_TX_OK = 0x00, /* driver took care of packet */
NETDEV_TX_BUSY = 0x10, /* driver tx path was busy*/
NETDEV_TX_LOCKED = 0x20, /* driver tx lock was already taken */
};
typedef enum netdev_tx netdev_tx_t;
/*
* Current order: NETDEV_TX_MASK > NET_XMIT_MASK >= 0 is significant;
* hard_start_xmit() return < NET_XMIT_MASK means skb was consumed.
*/
static inline bool dev_xmit_complete(int rc)
{
/*
* Positive cases with an skb consumed by a driver:
* - successful transmission (rc == NETDEV_TX_OK)
* - error while transmitting (rc < 0)
* - error while queueing to a different device (rc & NET_XMIT_MASK)
*/
if (likely(rc < NET_XMIT_MASK))
return true;
return false;
}
#endif
#define MAX_ADDR_LEN 32 /* Largest hardware address length */
#ifdef __KERNEL__
/*
* Compute the worst case header length according to the protocols
* used.
*/
#if defined(CONFIG_WLAN) || defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
# if defined(CONFIG_MAC80211_MESH)
# define LL_MAX_HEADER 128
# else
# define LL_MAX_HEADER 96
# endif
#elif defined(CONFIG_TR) || defined(CONFIG_TR_MODULE)
# define LL_MAX_HEADER 48
#else
# define LL_MAX_HEADER 32
#endif
#if !defined(CONFIG_NET_IPIP) && !defined(CONFIG_NET_IPIP_MODULE) && \
!defined(CONFIG_NET_IPGRE) && !defined(CONFIG_NET_IPGRE_MODULE) && \
!defined(CONFIG_IPV6_SIT) && !defined(CONFIG_IPV6_SIT_MODULE) && \
!defined(CONFIG_IPV6_TUNNEL) && !defined(CONFIG_IPV6_TUNNEL_MODULE)
#define MAX_HEADER LL_MAX_HEADER
#else
#define MAX_HEADER (LL_MAX_HEADER + 48)
#endif
/*
* Old network device statistics. Fields are native words
* (unsigned long) so they can be read and written atomically.
* Each field is padded to 64 bits for compatibility with
* rtnl_link_stats64.
*/
#if BITS_PER_LONG == 64
#define NET_DEVICE_STATS_DEFINE(name) unsigned long name
#elif defined(__LITTLE_ENDIAN)
#define NET_DEVICE_STATS_DEFINE(name) unsigned long name, pad_ ## name
#else
#define NET_DEVICE_STATS_DEFINE(name) unsigned long pad_ ## name, name
#endif
struct net_device_stats {
NET_DEVICE_STATS_DEFINE(rx_packets);
NET_DEVICE_STATS_DEFINE(tx_packets);
NET_DEVICE_STATS_DEFINE(rx_bytes);
NET_DEVICE_STATS_DEFINE(tx_bytes);
NET_DEVICE_STATS_DEFINE(rx_errors);
NET_DEVICE_STATS_DEFINE(tx_errors);
NET_DEVICE_STATS_DEFINE(rx_dropped);
NET_DEVICE_STATS_DEFINE(tx_dropped);
NET_DEVICE_STATS_DEFINE(multicast);
NET_DEVICE_STATS_DEFINE(collisions);
NET_DEVICE_STATS_DEFINE(rx_length_errors);
NET_DEVICE_STATS_DEFINE(rx_over_errors);
NET_DEVICE_STATS_DEFINE(rx_crc_errors);
NET_DEVICE_STATS_DEFINE(rx_frame_errors);
NET_DEVICE_STATS_DEFINE(rx_fifo_errors);
NET_DEVICE_STATS_DEFINE(rx_missed_errors);
NET_DEVICE_STATS_DEFINE(tx_aborted_errors);
NET_DEVICE_STATS_DEFINE(tx_carrier_errors);
NET_DEVICE_STATS_DEFINE(tx_fifo_errors);
NET_DEVICE_STATS_DEFINE(tx_heartbeat_errors);
NET_DEVICE_STATS_DEFINE(tx_window_errors);
NET_DEVICE_STATS_DEFINE(rx_compressed);
NET_DEVICE_STATS_DEFINE(tx_compressed);
};
#endif /* __KERNEL__ */
/* Media selection options. */
enum {
IF_PORT_UNKNOWN = 0,
IF_PORT_10BASE2,
IF_PORT_10BASET,
IF_PORT_AUI,
IF_PORT_100BASET,
IF_PORT_100BASETX,
IF_PORT_100BASEFX
};
#ifdef __KERNEL__
#include <linux/cache.h>
#include <linux/skbuff.h>
struct neighbour;
struct neigh_parms;
struct sk_buff;
struct netdev_hw_addr {
struct list_head list;
unsigned char addr[MAX_ADDR_LEN];
unsigned char type;
#define NETDEV_HW_ADDR_T_LAN 1
#define NETDEV_HW_ADDR_T_SAN 2
#define NETDEV_HW_ADDR_T_SLAVE 3
#define NETDEV_HW_ADDR_T_UNICAST 4
#define NETDEV_HW_ADDR_T_MULTICAST 5
int refcount;
bool synced;
bool global_use;
struct rcu_head rcu_head;
};
struct netdev_hw_addr_list {
struct list_head list;
int count;
};
#define netdev_hw_addr_list_count(l) ((l)->count)
#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
#define netdev_hw_addr_list_for_each(ha, l) \
list_for_each_entry(ha, &(l)->list, list)
#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
#define netdev_for_each_uc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->uc)
#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
#define netdev_for_each_mc_addr(ha, dev) \
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
struct hh_cache *hh_next; /* Next entry */
atomic_t hh_refcnt; /* number of users */
/*
* We want hh_output, hh_len, hh_lock and hh_data be a in a separate
* cache line on SMP.
* They are mostly read, but hh_refcnt may be changed quite frequently,
* incurring cache line ping pongs.
*/
__be16 hh_type ____cacheline_aligned_in_smp;
/* protocol identifier, f.e ETH_P_IP
* NOTE: For VLANs, this will be the
* encapuslated type. --BLG
*/
u16 hh_len; /* length of header */
int (*hh_output)(struct sk_buff *skb);
seqlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
#define HH_DATA_MOD 16
#define HH_DATA_OFF(__len) \
(HH_DATA_MOD - (((__len - 1) & (HH_DATA_MOD - 1)) + 1))
#define HH_DATA_ALIGN(__len) \
(((__len)+(HH_DATA_MOD-1))&~(HH_DATA_MOD - 1))
unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)];
};
/* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much.
* Alternative is:
* dev->hard_header_len ? (dev->hard_header_len +
* (HH_DATA_MOD - 1)) & ~(HH_DATA_MOD - 1) : 0
*
* We could use other alignment values, but we must maintain the
* relationship HH alignment <= LL alignment.
*
* LL_ALLOCATED_SPACE also takes into account the tailroom the device
* may need.
*/
#define LL_RESERVED_SPACE(dev) \
((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_RESERVED_SPACE_EXTRA(dev,extra) \
((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
#define LL_ALLOCATED_SPACE(dev) \
((((dev)->hard_header_len+(dev)->needed_headroom+(dev)->needed_tailroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD)
struct header_ops {
int (*create) (struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
const void *saddr, unsigned len);
int (*parse)(const struct sk_buff *skb, unsigned char *haddr);
int (*rebuild)(struct sk_buff *skb);
#define HAVE_HEADER_CACHE
int (*cache)(const struct neighbour *neigh, struct hh_cache *hh);
void (*cache_update)(struct hh_cache *hh,
const struct net_device *dev,
const unsigned char *haddr);
};
/* These flag bits are private to the generic network queueing
* layer, they may not be explicitly referenced by any other
* code.
*/
enum netdev_state_t {
__LINK_STATE_START,
__LINK_STATE_PRESENT,
__LINK_STATE_NOCARRIER,
__LINK_STATE_LINKWATCH_PENDING,
__LINK_STATE_DORMANT,
};
/*
* This structure holds at boot time configured netdevice settings. They
* are then used in the device probing.
*/
struct netdev_boot_setup {
char name[IFNAMSIZ];
struct ifmap map;
};
#define NETDEV_BOOT_SETUP_MAX 8
extern int __init netdev_boot_setup(char *str);
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
* to the per-cpu poll_list, and whoever clears that bit
* can remove from the list right before clearing the bit.
*/
struct list_head poll_list;
unsigned long state;
int weight;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
spinlock_t poll_lock;
int poll_owner;
#endif
unsigned int gro_count;
struct net_device *dev;
struct list_head dev_list;
struct sk_buff *gro_list;
struct sk_buff *skb;
};
enum {
NAPI_STATE_SCHED, /* Poll is scheduled */
NAPI_STATE_DISABLE, /* Disable pending */
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
};
enum gro_result {
GRO_MERGED,
GRO_MERGED_FREE,
GRO_HELD,
GRO_NORMAL,
GRO_DROP,
};
typedef enum gro_result gro_result_t;
typedef struct sk_buff *rx_handler_func_t(struct sk_buff *skb);
extern void __napi_schedule(struct napi_struct *n);
static inline int napi_disable_pending(struct napi_struct *n)
{
return test_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
* napi_schedule_prep - check if napi can be scheduled
* @n: napi context
*
* Test if NAPI routine is already running, and if not mark
* it as running. This is used as a condition variable
* insure only one NAPI poll instance runs. We also make
* sure there is no pending NAPI disable.
*/
static inline int napi_schedule_prep(struct napi_struct *n)
{
return !napi_disable_pending(n) &&
!test_and_set_bit(NAPI_STATE_SCHED, &n->state);
}
/**
* napi_schedule - schedule NAPI poll
* @n: napi context
*
* Schedule NAPI poll routine to be called if it is not already
* running.
*/
static inline void napi_schedule(struct napi_struct *n)
{
if (napi_schedule_prep(n))
__napi_schedule(n);
}
/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
static inline int napi_reschedule(struct napi_struct *napi)
{
if (napi_schedule_prep(napi)) {
__napi_schedule(napi);
return 1;
}
return 0;
}
/**
* napi_complete - NAPI processing complete
* @n: napi context
*
* Mark NAPI processing as complete.
*/
extern void __napi_complete(struct napi_struct *n);
extern void napi_complete(struct napi_struct *n);
/**
* napi_disable - prevent NAPI from scheduling
* @n: napi context
*
* Stop NAPI from being scheduled on this context.
* Waits till any outstanding processing completes.
*/
static inline void napi_disable(struct napi_struct *n)
{
set_bit(NAPI_STATE_DISABLE, &n->state);
while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
clear_bit(NAPI_STATE_DISABLE, &n->state);
}
/**
* napi_enable - enable NAPI scheduling
* @n: napi context
*
* Resume NAPI from being scheduled on this context.
* Must be paired with napi_disable.
*/
static inline void napi_enable(struct napi_struct *n)
{
BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
smp_mb__before_clear_bit();
clear_bit(NAPI_STATE_SCHED, &n->state);
}
#ifdef CONFIG_SMP
/**
* napi_synchronize - wait until NAPI is not running
* @n: napi context
*
* Wait until NAPI is done being scheduled on this context.
* Waits till any outstanding processing completes but
* does not disable future activations.
*/
static inline void napi_synchronize(const struct napi_struct *n)
{
while (test_bit(NAPI_STATE_SCHED, &n->state))
msleep(1);
}
#else
# define napi_synchronize(n) barrier()
#endif
enum netdev_queue_state_t {
__QUEUE_STATE_XOFF,
__QUEUE_STATE_FROZEN,
};
struct netdev_queue {
/*
* read mostly part
*/
struct net_device *dev;
struct Qdisc *qdisc;
unsigned long state;
struct Qdisc *qdisc_sleeping;
/*
* write mostly part
*/
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
int xmit_lock_owner;
/*
* please use this field instead of dev->trans_start
*/
unsigned long trans_start;
unsigned long tx_bytes;
unsigned long tx_packets;
unsigned long tx_dropped;
} ____cacheline_aligned_in_smp;
#ifdef CONFIG_RPS
/*
* This structure holds an RPS map which can be of variable length. The
* map is an array of CPUs.
*/
struct rps_map {
unsigned int len;
struct rcu_head rcu;
u16 cpus[0];
};
#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
/*
* The rps_dev_flow structure contains the mapping of a flow to a CPU and the
* tail pointer for that CPU's input queue at the time of last enqueue.
*/
struct rps_dev_flow {
u16 cpu;
u16 fill;
unsigned int last_qtail;
};
/*
* The rps_dev_flow_table structure contains a table of flow mappings.
*/
struct rps_dev_flow_table {
unsigned int mask;
struct rcu_head rcu;
struct work_struct free_work;
struct rps_dev_flow flows[0];
};
#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
(_num * sizeof(struct rps_dev_flow)))
/*
* The rps_sock_flow_table contains mappings of flows to the last CPU
* on which they were processed by the application (set in recvmsg).
*/
struct rps_sock_flow_table {
unsigned int mask;
u16 ents[0];
};
#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
(_num * sizeof(u16)))
#define RPS_NO_CPU 0xffff
static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash) {
unsigned int cpu, index = hash & table->mask;
/* We only give a hint, preemption can change cpu under us */
cpu = raw_smp_processor_id();
if (table->ents[index] != cpu)
table->ents[index] = cpu;
}
}
static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
u32 hash)
{
if (table && hash)
table->ents[hash & table->mask] = RPS_NO_CPU;
}
extern struct rps_sock_flow_table *rps_sock_flow_table;
/* This structure contains an instance of an RX queue. */
struct netdev_rx_queue {
struct rps_map *rps_map;
struct rps_dev_flow_table *rps_flow_table;
struct kobject kobj;
struct netdev_rx_queue *first;
atomic_t count;
} ____cacheline_aligned_in_smp;
#endif /* CONFIG_RPS */
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
* optional and can be filled with a null pointer.
*
* int (*ndo_init)(struct net_device *dev);
* This function is called once when network device is registered.
* The network device can use this to any late stage initializaton
* or semantic validattion. It can fail with an error code which will
* be propogated back to register_netdev
*
* void (*ndo_uninit)(struct net_device *dev);
* This function is called when device is unregistered or when registration
* fails. It is not called if init fails.
*
* int (*ndo_open)(struct net_device *dev);
* This function is called when network device transistions to the up
* state.
*
* int (*ndo_stop)(struct net_device *dev);
* This function is called when network device transistions to the down
* state.
*
* netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
* struct net_device *dev);
* Called when a packet needs to be transmitted.
* Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
* (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
* void (*ndo_change_rx_flags)(struct net_device *dev, int flags);
* This function is called to allow device receiver to make
* changes to configuration when multicast or promiscious is enabled.
*
* void (*ndo_set_rx_mode)(struct net_device *dev);
* This function is called device changes address list filtering.
*
* void (*ndo_set_multicast_list)(struct net_device *dev);
* This function is called when the multicast address list changes.
*
* int (*ndo_set_mac_address)(struct net_device *dev, void *addr);
* This function is called when the Media Access Control address
* needs to be changed. If this interface is not defined, the
* mac address can not be changed.
*
* int (*ndo_validate_addr)(struct net_device *dev);
* Test if Media Access Control address is valid for the device.
*
* int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr, int cmd);
* Called when a user request an ioctl which can't be handled by
* the generic interface code. If not defined ioctl's return
* not supported error code.
*
* int (*ndo_set_config)(struct net_device *dev, struct ifmap *map);
* Used to set network devices bus interface parameters. This interface
* is retained for legacy reason, new devices should use the bus
* interface (PCI) for low level management.
*
* int (*ndo_change_mtu)(struct net_device *dev, int new_mtu);
* Called when a user wants to change the Maximum Transfer Unit
* of a device. If not defined, any request to change MTU will
* will return an error.
*
* void (*ndo_tx_timeout)(struct net_device *dev);
* Callback uses when the transmitter has not made any progress
* for dev->watchdog ticks.
*
* struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
* struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
* Called when a user wants to get the network device usage
* statistics. Drivers must do one of the following:
* 1. Define @ndo_get_stats64 to update a rtnl_link_stats64 structure
* (which should normally be dev->stats64) and return a ponter to
* it. The structure must not be changed asynchronously.
* 2. Define @ndo_get_stats to update a net_device_stats structure
* (which should normally be dev->stats) and return a pointer to
* it. The structure may be changed asynchronously only if each
* field is written atomically.
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
* void (*ndo_vlan_rx_register)(struct net_device *dev, struct vlan_group *grp);
* If device support VLAN receive accleration
* (ie. dev->features & NETIF_F_HW_VLAN_RX), then this function is called
* when vlan groups for the device changes. Note: grp is NULL
* if no vlan's groups are being used.
*
* void (*ndo_vlan_rx_add_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is registered.
*
* void (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
* If device support VLAN filtering (dev->features & NETIF_F_HW_VLAN_FILTER)
* this function is called when a VLAN id is unregistered.
*
* void (*ndo_poll_controller)(struct net_device *dev);
*
* SR-IOV management functions.
* int (*ndo_set_vf_mac)(struct net_device *dev, int vf, u8* mac);
* int (*ndo_set_vf_vlan)(struct net_device *dev, int vf, u16 vlan, u8 qos);
* int (*ndo_set_vf_tx_rate)(struct net_device *dev, int vf, int rate);
* int (*ndo_get_vf_config)(struct net_device *dev,
* int vf, struct ifla_vf_info *ivf);
* int (*ndo_set_vf_port)(struct net_device *dev, int vf,
* struct nlattr *port[]);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
*/
#define HAVE_NET_DEVICE_OPS
struct net_device_ops {
int (*ndo_init)(struct net_device *dev);
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
void (*ndo_set_multicast_list)(struct net_device *dev);
int (*ndo_set_mac_address)(struct net_device *dev,
void *addr);
int (*ndo_validate_addr)(struct net_device *dev);
int (*ndo_do_ioctl)(struct net_device *dev,
struct ifreq *ifr, int cmd);
int (*ndo_set_config)(struct net_device *dev,
struct ifmap *map);
int (*ndo_change_mtu)(struct net_device *dev,
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
void (*ndo_tx_timeout) (struct net_device *dev);
struct rtnl_link_stats64* (*ndo_get_stats64)(struct net_device *dev);
struct net_device_stats* (*ndo_get_stats)(struct net_device *dev);
void (*ndo_vlan_rx_register)(struct net_device *dev,
struct vlan_group *grp);
void (*ndo_vlan_rx_add_vid)(struct net_device *dev,
unsigned short vid);
void (*ndo_vlan_rx_kill_vid)(struct net_device *dev,
unsigned short vid);
#ifdef CONFIG_NET_POLL_CONTROLLER
void (*ndo_poll_controller)(struct net_device *dev);
int (*ndo_netpoll_setup)(struct net_device *dev,
struct netpoll_info *info);
void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif
int (*ndo_set_vf_mac)(struct net_device *dev,
int queue, u8 *mac);
int (*ndo_set_vf_vlan)(struct net_device *dev,
int queue, u16 vlan, u8 qos);
int (*ndo_set_vf_tx_rate)(struct net_device *dev,
int vf, int rate);
int (*ndo_get_vf_config)(struct net_device *dev,
int vf,
struct ifla_vf_info *ivf);
int (*ndo_set_vf_port)(struct net_device *dev,
int vf,
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
int (*ndo_fcoe_enable)(struct net_device *dev);
int (*ndo_fcoe_disable)(struct net_device *dev);
int (*ndo_fcoe_ddp_setup)(struct net_device *dev,
u16 xid,
struct scatterlist *sgl,
unsigned int sgc);
int (*ndo_fcoe_ddp_done)(struct net_device *dev,
u16 xid);
#define NETDEV_FCOE_WWNN 0
#define NETDEV_FCOE_WWPN 1
int (*ndo_fcoe_get_wwn)(struct net_device *dev,
u64 *wwn, int type);
#endif
};
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
* data with strictly "high-level" data, and it has to know about
* almost every data structure used in the INET module.
*
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
/*
* This is the first field of the "visible" part of this structure
* (i.e. as seen by users in the "Space.c" file). It is the name
* the interface.
*/
char name[IFNAMSIZ];
struct pm_qos_request_list *pm_qos_req;
/* device name hash chain */
struct hlist_node name_hlist;
/* snmp alias */
char *ifalias;
/*
* I/O specific fields
* FIXME: Merge these and struct ifmap into one
*/
unsigned long mem_end; /* shared mem end */
unsigned long mem_start; /* shared mem start */
unsigned long base_addr; /* device I/O address */
unsigned int irq; /* device IRQ number */
/*
* Some hardware also needs these fields, but they are not
* part of the usual set specified in Space.c.
*/
unsigned char if_port; /* Selectable AUI, TP,..*/
unsigned char dma; /* DMA channel */
unsigned long state;
struct list_head dev_list;
struct list_head napi_list;
struct list_head unreg_list;
/* Net device features */
unsigned long features;
#define NETIF_F_SG 1 /* Scatter/gather IO. */
#define NETIF_F_IP_CSUM 2 /* Can checksum TCP/UDP over IPv4. */
#define NETIF_F_NO_CSUM 4 /* Does not require checksum. F.e. loopack. */
#define NETIF_F_HW_CSUM 8 /* Can checksum all the packets. */
#define NETIF_F_IPV6_CSUM 16 /* Can checksum TCP/UDP over IPV6 */
#define NETIF_F_HIGHDMA 32 /* Can DMA to high memory. */
#define NETIF_F_FRAGLIST 64 /* Scatter/gather IO. */
#define NETIF_F_HW_VLAN_TX 128 /* Transmit VLAN hw acceleration */
#define NETIF_F_HW_VLAN_RX 256 /* Receive VLAN hw acceleration */
#define NETIF_F_HW_VLAN_FILTER 512 /* Receive filtering on VLAN */
#define NETIF_F_VLAN_CHALLENGED 1024 /* Device cannot handle VLAN packets */
#define NETIF_F_GSO 2048 /* Enable software GSO. */
#define NETIF_F_LLTX 4096 /* LockLess TX - deprecated. Please */
/* do not use LLTX in new drivers */
#define NETIF_F_NETNS_LOCAL 8192 /* Does not change network namespaces */
#define NETIF_F_GRO 16384 /* Generic receive offload */
#define NETIF_F_LRO 32768 /* large receive offload */
/* the GSO_MASK reserves bits 16 through 23 */
#define NETIF_F_FCOE_CRC (1 << 24) /* FCoE CRC32 */
#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
/* Segmentation offload features */
#define NETIF_F_GSO_SHIFT 16
#define NETIF_F_GSO_MASK 0x00ff0000
#define NETIF_F_TSO (SKB_GSO_TCPV4 << NETIF_F_GSO_SHIFT)
#define NETIF_F_UFO (SKB_GSO_UDP << NETIF_F_GSO_SHIFT)
#define NETIF_F_GSO_ROBUST (SKB_GSO_DODGY << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO_ECN (SKB_GSO_TCP_ECN << NETIF_F_GSO_SHIFT)
#define NETIF_F_TSO6 (SKB_GSO_TCPV6 << NETIF_F_GSO_SHIFT)
#define NETIF_F_FSO (SKB_GSO_FCOE << NETIF_F_GSO_SHIFT)
/* List of features with software fallbacks. */
#define NETIF_F_GSO_SOFTWARE (NETIF_F_TSO | NETIF_F_TSO_ECN | \
NETIF_F_TSO6 | NETIF_F_UFO)
#define NETIF_F_GEN_CSUM (NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
#define NETIF_F_V4_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IP_CSUM)
#define NETIF_F_V6_CSUM (NETIF_F_GEN_CSUM | NETIF_F_IPV6_CSUM)
#define NETIF_F_ALL_CSUM (NETIF_F_V4_CSUM | NETIF_F_V6_CSUM)
/*
* If one device supports one of these features, then enable them
* for all in netdev_increment_features.
*/
#define NETIF_F_ONE_FOR_ALL (NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ROBUST | \
NETIF_F_SG | NETIF_F_HIGHDMA | \
NETIF_F_FRAGLIST)
/* Interface index. Unique device identifier */
int ifindex;
int iflink;
union {
struct rtnl_link_stats64 stats64;
struct net_device_stats stats;
};
#ifdef CONFIG_WIRELESS_EXT
/* List of functions to handle Wireless Extensions (instead of ioctl).
* See <net/iw_handler.h> for details. Jean II */
const struct iw_handler_def * wireless_handlers;
/* Instance data managed by the core of Wireless Extensions. */
struct iw_public_data * wireless_data;
#endif
/* Management operations */
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
/* Hardware header description */
const struct header_ops *header_ops;
unsigned int flags; /* interface flags (a la BSD) */
unsigned short gflags;
unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */
unsigned short padded; /* How much padding added by alloc_netdev() */
unsigned char operstate; /* RFC2863 operstate */
unsigned char link_mode; /* mapping policy to operstate */
unsigned int mtu; /* interface MTU value */
unsigned short type; /* interface hardware type */
unsigned short hard_header_len; /* hardware hdr length */
/* extra head- and tailroom the hardware may need, but not in all cases
* can this be guaranteed, especially tailroom. Some cases also use
* LL_MAX_HEADER instead to allocate the skb.
*/
unsigned short needed_headroom;
unsigned short needed_tailroom;
struct net_device *master; /* Pointer to master device of a group,
* which this device is member of.
*/
/* Interface address info. */
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_len; /* hardware address length */
unsigned short dev_id; /* for shared network cards */
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc; /* Unicast mac addresses */
struct netdev_hw_addr_list mc; /* Multicast mac addresses */
int uc_promisc;
unsigned int promiscuity;
unsigned int allmulti;
/* Protocol specific pointers */
#ifdef CONFIG_NET_DSA
void *dsa_ptr; /* dsa specific data */
#endif
void *atalk_ptr; /* AppleTalk link */
void *ip_ptr; /* IPv4 specific data */
void *dn_ptr; /* DECnet specific data */
void *ip6_ptr; /* IPv6 specific data */
void *ec_ptr; /* Econet specific data */
void *ax25_ptr; /* AX.25 specific data */
struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
assign before registering */
/*
* Cache line mostly used on receive path (including eth_type_trans())
*/
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr; /* hw address, (before bcast
because most packets are
unicast) */
struct netdev_hw_addr_list dev_addrs; /* list of device
hw addresses */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
#ifdef CONFIG_RPS
struct kset *queues_kset;
struct netdev_rx_queue *_rx;
/* Number of RX queues allocated at alloc_netdev_mq() time */
unsigned int num_rx_queues;
#endif
struct netdev_queue rx_queue;
rx_handler_func_t *rx_handler;
void *rx_handler_data;
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
/* Number of TX queues allocated at alloc_netdev_mq() time */
unsigned int num_tx_queues;
/* Number of TX queues currently active in device */
unsigned int real_num_tx_queues;
/* root qdisc from userspace point of view */
struct Qdisc *qdisc;
unsigned long tx_queue_len; /* Max frames per queue allowed */
spinlock_t tx_global_lock;
/*
* One part is mostly used on xmit path (device)
*/
/* These may be needed for future network-power-down code. */
/*
* trans_start here is expensive for high speed devices on SMP,
* please use netdev_queue->trans_start instead.
*/
unsigned long trans_start; /* Time (in jiffies) of last Tx */
int watchdog_timeo; /* used by dev_watchdog() */
struct timer_list watchdog_timer;
/* Number of references to this device */
atomic_t refcnt ____cacheline_aligned_in_smp;
/* delayed register/unregister */
struct list_head todo_list;
/* device index hash chain */
struct hlist_node index_hlist;
struct list_head link_watch_list;
/* register/unregister state machine */
enum { NETREG_UNINITIALIZED=0,
NETREG_REGISTERED, /* completed register_netdevice */
NETREG_UNREGISTERING, /* called unregister_netdevice */
NETREG_UNREGISTERED, /* completed unregister todo */
NETREG_RELEASED, /* called free_netdev */
NETREG_DUMMY, /* dummy device for NAPI poll */
} reg_state:16;
enum {
RTNL_LINK_INITIALIZED,
RTNL_LINK_INITIALIZING,
} rtnl_link_state:16;
/* Called from unregister, can be used to call free_netdev */
void (*destructor)(struct net_device *dev);
#ifdef CONFIG_NETPOLL
struct netpoll_info *npinfo;
#endif
#ifdef CONFIG_NET_NS
/* Network namespace this network device is inside */
struct net *nd_net;
#endif
/* mid-layer private */
void *ml_priv;
/* GARP */
struct garp_port *garp_port;
/* class/net/name entry */
struct device dev;
/* space for optional device, statistics, and wireless sysfs groups */
const struct attribute_group *sysfs_groups[4];
/* rtnetlink link ops */
const struct rtnl_link_ops *rtnl_link_ops;
/* VLAN feature mask */
unsigned long vlan_features;
/* for setting kernel sock attribute on TCP connection setup */
#define GSO_MAX_SIZE 65536
unsigned int gso_max_size;
#ifdef CONFIG_DCB
/* Data Center Bridging netlink ops */
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
/* max exchange id for FCoE LRO by ddp */
unsigned int fcoe_ddp_xid;
#endif
/* n-tuple filter list attached to this device */
struct ethtool_rx_ntuple_list ethtool_ntuple_list;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
#define NETDEV_ALIGN 32
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
{
return &dev->_tx[index];
}
static inline void netdev_for_each_tx_queue(struct net_device *dev,
void (*f)(struct net_device *,
struct netdev_queue *,
void *),
void *arg)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
f(dev, &dev->_tx[i], arg);
}
/*
* Net namespace inlines
*/
static inline
struct net *dev_net(const struct net_device *dev)
{
return read_pnet(&dev->nd_net);
}
static inline
void dev_net_set(struct net_device *dev, struct net *net)
{
#ifdef CONFIG_NET_NS
release_net(dev->nd_net);
dev->nd_net = hold_net(net);
#endif
}
static inline bool netdev_uses_dsa_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_DSA
if (dev->dsa_ptr != NULL)
return dsa_uses_dsa_tags(dev->dsa_ptr);
#endif
return 0;
}
#ifndef CONFIG_NET_NS
static inline void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
}
#else /* CONFIG_NET_NS */
void skb_set_dev(struct sk_buff *skb, struct net_device *dev);
#endif
static inline bool netdev_uses_trailer_tags(struct net_device *dev)
{
#ifdef CONFIG_NET_DSA_TAG_TRAILER
if (dev->dsa_ptr != NULL)
return dsa_uses_trailer_tags(dev->dsa_ptr);
#endif
return 0;
}
/**
* netdev_priv - access network device private data
* @dev: network device
*
* Get network device private data
*/
static inline void *netdev_priv(const struct net_device *dev)
{
return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN);
}
/* Set the sysfs physical device reference for the network logical device
* if set prior to registration will cause a symlink during initialization.
*/
#define SET_NETDEV_DEV(net, pdev) ((net)->dev.parent = (pdev))
/* Set the sysfs device type for the network logical device to allow
* fin grained indentification of different network device types. For
* example Ethernet, Wirelss LAN, Bluetooth, WiMAX etc.
*/
#define SET_NETDEV_DEVTYPE(net, devtype) ((net)->dev.type = (devtype))
/**
* netif_napi_add - initialize a napi context
* @dev: network device
* @napi: napi context
* @poll: polling function
* @weight: default weight
*
* netif_napi_add() must be used to initialize a napi context prior to calling
* *any* of the other napi related functions.
*/
void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
int (*poll)(struct napi_struct *, int), int weight);
/**
* netif_napi_del - remove a napi context
* @napi: napi context
*
* netif_napi_del() removes a napi context from the network device napi list
*/
void netif_napi_del(struct napi_struct *napi);
struct napi_gro_cb {
/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
void *frag0;
/* Length of frag0. */
unsigned int frag0_len;
/* This indicates where we are processing relative to skb->data. */
int data_offset;
/* This is non-zero if the packet may be of the same flow. */
int same_flow;
/* This is non-zero if the packet cannot be merged with the new skb. */
int flush;
/* Number of segments aggregated. */
int count;
/* Free the skb? */
int free;
};
#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
struct packet_type {
__be16 type; /* This is really htons(ether_type). */
struct net_device *dev; /* NULL is wildcarded here */
int (*func) (struct sk_buff *,
struct net_device *,
struct packet_type *,
struct net_device *);
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
int features);
int (*gso_send_check)(struct sk_buff *skb);
struct sk_buff **(*gro_receive)(struct sk_buff **head,
struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb);
void *af_packet_priv;
struct list_head list;
};
#include <linux/interrupt.h>
#include <linux/notifier.h>
extern rwlock_t dev_base_lock; /* Device list lock */
#define for_each_netdev(net, d) \
list_for_each_entry(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_reverse(net, d) \
list_for_each_entry_reverse(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_rcu(net, d) \
list_for_each_entry_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_safe(net, d, n) \
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = dev->dev_list.next;
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *next_net_device_rcu(struct net_device *dev)
{
struct list_head *lh;
struct net *net;
net = dev_net(dev);
lh = rcu_dereference(dev->dev_list.next);
return lh == &net->dev_base_head ? NULL : net_device_entry(lh);
}
static inline struct net_device *first_net_device(struct net *net)
{
return list_empty(&net->dev_base_head) ? NULL :
net_device_entry(net->dev_base_head.next);
}
extern int netdev_boot_setup_check(struct net_device *dev);
extern unsigned long netdev_boot_base(const char *prefix, int unit);
extern struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
extern void dev_add_pack(struct packet_type *pt);
extern void dev_remove_pack(struct packet_type *pt);
extern void __dev_remove_pack(struct packet_type *pt);
extern struct net_device *dev_get_by_flags_rcu(struct net *net, unsigned short flags,
unsigned short mask);
extern struct net_device *dev_get_by_name(struct net *net, const char *name);
extern struct net_device *dev_get_by_name_rcu(struct net *net, const char *name);
extern struct net_device *__dev_get_by_name(struct net *net, const char *name);
extern int dev_alloc_name(struct net_device *dev, const char *name);
extern int dev_open(struct net_device *dev);
extern int dev_close(struct net_device *dev);
extern void dev_disable_lro(struct net_device *dev);
extern int dev_queue_xmit(struct sk_buff *skb);
extern int register_netdevice(struct net_device *dev);
extern void unregister_netdevice_queue(struct net_device *dev,
struct list_head *head);
extern void unregister_netdevice_many(struct list_head *head);
static inline void unregister_netdevice(struct net_device *dev)
{
unregister_netdevice_queue(dev, NULL);
}
extern void free_netdev(struct net_device *dev);
extern void synchronize_net(void);
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
extern int init_dummy_netdev(struct net_device *dev);
extern void netdev_resync_ops(struct net_device *dev);
extern int call_netdevice_notifiers(unsigned long val, struct net_device *dev);
extern struct net_device *dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *__dev_get_by_index(struct net *net, int ifindex);
extern struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
extern int dev_restart(struct net_device *dev);
#ifdef CONFIG_NETPOLL_TRAP
extern int netpoll_trap(void);
#endif
extern int skb_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
extern void skb_gro_reset_offset(struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
return NAPI_GRO_CB(skb)->data_offset;
}
static inline unsigned int skb_gro_len(const struct sk_buff *skb)
{
return skb->len - NAPI_GRO_CB(skb)->data_offset;
}
static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
{
NAPI_GRO_CB(skb)->data_offset += len;
}
static inline void *skb_gro_header_fast(struct sk_buff *skb,
unsigned int offset)
{
return NAPI_GRO_CB(skb)->frag0 + offset;
}
static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
{
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
NAPI_GRO_CB(skb)->frag0 = NULL;
NAPI_GRO_CB(skb)->frag0_len = 0;
return pskb_may_pull(skb, hlen) ? skb->data + offset : NULL;
}
static inline void *skb_gro_mac_header(struct sk_buff *skb)
{
return NAPI_GRO_CB(skb)->frag0 ?: skb_mac_header(skb);
}
static inline void *skb_gro_network_header(struct sk_buff *skb)
{
return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
skb_network_offset(skb);
}
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type,
const void *daddr, const void *saddr,
unsigned len)
{
if (!dev->header_ops || !dev->header_ops->create)
return 0;
return dev->header_ops->create(skb, dev, type, daddr, saddr, len);
}
static inline int dev_parse_header(const struct sk_buff *skb,
unsigned char *haddr)
{
const struct net_device *dev = skb->dev;
if (!dev->header_ops || !dev->header_ops->parse)
return 0;
return dev->header_ops->parse(skb, haddr);
}
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf);
static inline int unregister_gifconf(unsigned int family)
{
return register_gifconf(family, NULL);
}
/*
* Incoming packets are placed on per-cpu queues
*/
struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
struct list_head poll_list;
struct sk_buff *completion_queue;
struct sk_buff_head process_queue;
/* stats */
unsigned int processed;
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_head;
unsigned int input_queue_tail;
#endif
unsigned dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
static inline void input_queue_head_incr(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
sd->input_queue_head++;
#endif
}
static inline void input_queue_tail_incr_save(struct softnet_data *sd,
unsigned int *qtail)
{
#ifdef CONFIG_RPS
*qtail = ++sd->input_queue_tail;
#endif
}
DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
#define HAVE_NETIF_QUEUE
extern void __netif_schedule(struct Qdisc *q);
static inline void netif_schedule_queue(struct netdev_queue *txq)
{
if (!test_bit(__QUEUE_STATE_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
static inline void netif_tx_schedule_all(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++)
netif_schedule_queue(netdev_get_tx_queue(dev, i));
}
static inline void netif_tx_start_queue(struct netdev_queue *dev_queue)
{
clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
* netif_start_queue - allow transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
*/
static inline void netif_start_queue(struct net_device *dev)
{
netif_tx_start_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_start_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_start_queue(txq);
}
}
static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap()) {
netif_tx_start_queue(dev_queue);
return;
}
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &dev_queue->state))
__netif_schedule(dev_queue->qdisc);
}
/**
* netif_wake_queue - restart transmit
* @dev: network device
*
* Allow upper layers to call the device hard_start_xmit routine.
* Used for flow control when transmit resources are available.
*/
static inline void netif_wake_queue(struct net_device *dev)
{
netif_tx_wake_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_wake_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_wake_queue(txq);
}
}
static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
{
set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
* netif_stop_queue - stop transmitted packets
* @dev: network device
*
* Stop upper layers calling the device hard_start_xmit routine.
* Used for flow control when transmit resources are unavailable.
*/
static inline void netif_stop_queue(struct net_device *dev)
{
netif_tx_stop_queue(netdev_get_tx_queue(dev, 0));
}
static inline void netif_tx_stop_all_queues(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
}
}
static inline int netif_tx_queue_stopped(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
}
/**
* netif_queue_stopped - test if transmit queue is flowblocked
* @dev: network device
*
* Test if transmit queue on device is currently unable to send.
*/
static inline int netif_queue_stopped(const struct net_device *dev)
{
return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
}
static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
{
return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
}
/**
* netif_running - test if up
* @dev: network device
*
* Test if the device has been brought up.
*/
static inline int netif_running(const struct net_device *dev)
{
return test_bit(__LINK_STATE_START, &dev->state);
}
/*
* Routines to manage the subqueues on a device. We only need start
* stop, and a check if it's stopped. All other device management is
* done at the overall netdevice level.
* Also test the device if we're multiqueue.
*/
/**
* netif_start_subqueue - allow sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Start individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
netif_tx_start_queue(txq);
}
/**
* netif_stop_subqueue - stop sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Stop individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
netif_tx_stop_queue(txq);
}
/**
* netif_subqueue_stopped - test status of subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Check individual transmit queue of a device with multiple transmit queues.
*/
static inline int __netif_subqueue_stopped(const struct net_device *dev,
u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
return netif_tx_queue_stopped(txq);
}
static inline int netif_subqueue_stopped(const struct net_device *dev,
struct sk_buff *skb)
{
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
/**
* netif_wake_subqueue - allow sending packets on subqueue
* @dev: network device
* @queue_index: sub queue index
*
* Resume individual transmit queue of a device with multiple transmit queues.
*/
static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
{
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
#ifdef CONFIG_NETPOLL_TRAP
if (netpoll_trap())
return;
#endif
if (test_and_clear_bit(__QUEUE_STATE_XOFF, &txq->state))
__netif_schedule(txq->qdisc);
}
/**
* netif_is_multiqueue - test if device has multiple transmit queues
* @dev: network device
*
* Check if device has multiple transmit queues
*/
static inline int netif_is_multiqueue(const struct net_device *dev)
{
return (dev->num_tx_queues > 1);
}
/* Use this variant when it is known for sure that it
* is executing from hardware interrupt context or with hardware interrupts
* disabled.
*/
extern void dev_kfree_skb_irq(struct sk_buff *skb);
/* Use this variant in places where it could be invoked
* from either hardware interrupt or other context, with hardware interrupts
* either disabled or enabled.
*/
extern void dev_kfree_skb_any(struct sk_buff *skb);
#define HAVE_NETIF_RX 1
extern int netif_rx(struct sk_buff *skb);
extern int netif_rx_ni(struct sk_buff *skb);
#define HAVE_NETIF_RECEIVE_SKB 1
extern int netif_receive_skb(struct sk_buff *skb);
extern gro_result_t dev_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb);
extern gro_result_t napi_gro_receive(struct napi_struct *napi,
struct sk_buff *skb);
extern void napi_reuse_skb(struct napi_struct *napi,
struct sk_buff *skb);
extern struct sk_buff * napi_get_frags(struct napi_struct *napi);
extern gro_result_t napi_frags_finish(struct napi_struct *napi,
struct sk_buff *skb,
gro_result_t ret);
extern struct sk_buff * napi_frags_skb(struct napi_struct *napi);
extern gro_result_t napi_gro_frags(struct napi_struct *napi);
static inline void napi_free_frags(struct napi_struct *napi)
{
kfree_skb(napi->skb);
napi->skb = NULL;
}
extern int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data);
extern void netdev_rx_handler_unregister(struct net_device *dev);
extern void netif_nit_deliver(struct sk_buff *skb);
extern int dev_valid_name(const char *name);
extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *);
extern int dev_ethtool(struct net *net, struct ifreq *);
extern unsigned dev_get_flags(const struct net_device *);
extern int __dev_change_flags(struct net_device *, unsigned int flags);
extern int dev_change_flags(struct net_device *, unsigned);
extern void __dev_notify_flags(struct net_device *, unsigned int old_flags);
extern int dev_change_name(struct net_device *, const char *);
extern int dev_set_alias(struct net_device *, const char *, size_t);
extern int dev_change_net_namespace(struct net_device *,
struct net *, const char *);
extern int dev_set_mtu(struct net_device *, int);
extern int dev_set_mac_address(struct net_device *,
struct sockaddr *);
extern int dev_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev,
struct netdev_queue *txq);
extern int dev_forward_skb(struct net_device *dev,
struct sk_buff *skb);
extern int netdev_budget;
/* Called by rtnetlink.c:rtnl_unlock() */
extern void netdev_run_todo(void);
/**
* dev_put - release reference to device
* @dev: network device
*
* Release reference to device to allow it to be freed.
*/
static inline void dev_put(struct net_device *dev)
{
atomic_dec(&dev->refcnt);
}
/**
* dev_hold - get reference to device
* @dev: network device
*
* Hold reference to device to keep it from being freed.
*/
static inline void dev_hold(struct net_device *dev)
{
atomic_inc(&dev->refcnt);
}
/* Carrier loss detection, dial on demand. The functions netif_carrier_on
* and _off may be called from IRQ context, but it is caller
* who is responsible for serialization of these calls.
*
* The name carrier is inappropriate, these functions should really be
* called netif_lowerlayer_*() because they represent the state of any
* kind of lower layer not just hardware media.
*/
extern void linkwatch_fire_event(struct net_device *dev);
extern void linkwatch_forget_dev(struct net_device *dev);
/**
* netif_carrier_ok - test if carrier present
* @dev: network device
*
* Check if carrier is present on device
*/
static inline int netif_carrier_ok(const struct net_device *dev)
{
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
extern unsigned long dev_trans_start(struct net_device *dev);
extern void __netdev_watchdog_up(struct net_device *dev);
extern void netif_carrier_on(struct net_device *dev);
extern void netif_carrier_off(struct net_device *dev);
extern void netif_notify_peers(struct net_device *dev);
/**
* netif_dormant_on - mark device as dormant.
* @dev: network device
*
* Mark device as dormant (as per RFC2863).
*
* The dormant state indicates that the relevant interface is not
* actually in a condition to pass packets (i.e., it is not 'up') but is
* in a "pending" state, waiting for some external event. For "on-
* demand" interfaces, this new state identifies the situation where the
* interface is waiting for events to place it in the up state.
*
*/
static inline void netif_dormant_on(struct net_device *dev)
{
if (!test_and_set_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_dormant_off - set device as not dormant.
* @dev: network device
*
* Device is not in dormant state.
*/
static inline void netif_dormant_off(struct net_device *dev)
{
if (test_and_clear_bit(__LINK_STATE_DORMANT, &dev->state))
linkwatch_fire_event(dev);
}
/**
* netif_dormant - test if carrier present
* @dev: network device
*
* Check if carrier is present on device
*/
static inline int netif_dormant(const struct net_device *dev)
{
return test_bit(__LINK_STATE_DORMANT, &dev->state);
}
/**
* netif_oper_up - test if device is operational
* @dev: network device
*
* Check if carrier is operational
*/
static inline int netif_oper_up(const struct net_device *dev)
{
return (dev->operstate == IF_OPER_UP ||
dev->operstate == IF_OPER_UNKNOWN /* backward compat */);
}
/**
* netif_device_present - is device available or removed
* @dev: network device
*
* Check if device has not been removed from system.
*/
static inline int netif_device_present(struct net_device *dev)
{
return test_bit(__LINK_STATE_PRESENT, &dev->state);
}
extern void netif_device_detach(struct net_device *dev);
extern void netif_device_attach(struct net_device *dev);
/*
* Network interface message level settings
*/
#define HAVE_NETIF_MSG 1
enum {
NETIF_MSG_DRV = 0x0001,
NETIF_MSG_PROBE = 0x0002,
NETIF_MSG_LINK = 0x0004,
NETIF_MSG_TIMER = 0x0008,
NETIF_MSG_IFDOWN = 0x0010,
NETIF_MSG_IFUP = 0x0020,
NETIF_MSG_RX_ERR = 0x0040,
NETIF_MSG_TX_ERR = 0x0080,
NETIF_MSG_TX_QUEUED = 0x0100,
NETIF_MSG_INTR = 0x0200,
NETIF_MSG_TX_DONE = 0x0400,
NETIF_MSG_RX_STATUS = 0x0800,
NETIF_MSG_PKTDATA = 0x1000,
NETIF_MSG_HW = 0x2000,
NETIF_MSG_WOL = 0x4000,
};
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK)
#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER)
#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN)
#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP)
#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR)
#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR)
#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED)
#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR)
#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE)
#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS)
#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA)
#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW)
#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL)
static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
{
/* use default */
if (debug_value < 0 || debug_value >= (sizeof(u32) * 8))
return default_msg_enable_bits;
if (debug_value == 0) /* no output */
return 0;
/* set low N bits */
return (1 << debug_value) - 1;
}
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
{
spin_lock(&txq->_xmit_lock);
txq->xmit_lock_owner = cpu;
}
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
txq->xmit_lock_owner = smp_processor_id();
}
static inline int __netif_tx_trylock(struct netdev_queue *txq)
{
int ok = spin_trylock(&txq->_xmit_lock);
if (likely(ok))
txq->xmit_lock_owner = smp_processor_id();
return ok;
}
static inline void __netif_tx_unlock(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock(&txq->_xmit_lock);
}
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
{
txq->xmit_lock_owner = -1;
spin_unlock_bh(&txq->_xmit_lock);
}
static inline void txq_trans_update(struct netdev_queue *txq)
{
if (txq->xmit_lock_owner != -1)
txq->trans_start = jiffies;
}
/**
* netif_tx_lock - grab network device transmit lock
* @dev: network device
*
* Get network device transmit lock
*/
static inline void netif_tx_lock(struct net_device *dev)
{
unsigned int i;
int cpu;
spin_lock(&dev->tx_global_lock);
cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
/* We are the only thread of execution doing a
* freeze, but we have to grab the _xmit_lock in
* order to synchronize with threads which are in
* the ->hard_start_xmit() handler and already
* checked the frozen bit.
*/
__netif_tx_lock(txq, cpu);
set_bit(__QUEUE_STATE_FROZEN, &txq->state);
__netif_tx_unlock(txq);
}
}
static inline void netif_tx_lock_bh(struct net_device *dev)
{
local_bh_disable();
netif_tx_lock(dev);
}
static inline void netif_tx_unlock(struct net_device *dev)
{
unsigned int i;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
/* No need to grab the _xmit_lock here. If the
* queue is not stopped for another reason, we
* force a schedule.
*/
clear_bit(__QUEUE_STATE_FROZEN, &txq->state);
netif_schedule_queue(txq);
}
spin_unlock(&dev->tx_global_lock);
}
static inline void netif_tx_unlock_bh(struct net_device *dev)
{
netif_tx_unlock(dev);
local_bh_enable();
}
#define HARD_TX_LOCK(dev, txq, cpu) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_lock(txq, cpu); \
} \
}
#define HARD_TX_UNLOCK(dev, txq) { \
if ((dev->features & NETIF_F_LLTX) == 0) { \
__netif_tx_unlock(txq); \
} \
}
static inline void netif_tx_disable(struct net_device *dev)
{
unsigned int i;
int cpu;
local_bh_disable();
cpu = smp_processor_id();
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
__netif_tx_lock(txq, cpu);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
local_bh_enable();
}
static inline void netif_addr_lock(struct net_device *dev)
{
spin_lock(&dev->addr_list_lock);
}
static inline void netif_addr_lock_bh(struct net_device *dev)
{
spin_lock_bh(&dev->addr_list_lock);
}
static inline void netif_addr_unlock(struct net_device *dev)
{
spin_unlock(&dev->addr_list_lock);
}
static inline void netif_addr_unlock_bh(struct net_device *dev)
{
spin_unlock_bh(&dev->addr_list_lock);
}
/*
* dev_addrs walker. Should be used only for read access. Call with
* rcu_read_lock held.
*/
#define for_each_dev_addr(dev, ha) \
list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
/* Support for loadable net-drivers */
extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
void (*setup)(struct net_device *),
unsigned int queue_count);
#define alloc_netdev(sizeof_priv, name, setup) \
alloc_netdev_mq(sizeof_priv, name, setup, 1)
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
/* General hardware address lists handling functions */
extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len, unsigned char addr_type);
extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len, unsigned char addr_type);
extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
struct netdev_hw_addr_list *from_list,
int addr_len);
extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
extern void __hw_addr_init(struct netdev_hw_addr_list *list);
/* Functions used for device addresses handling */
extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
unsigned char addr_type);
extern int dev_addr_add_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
extern int dev_addr_del_multiple(struct net_device *to_dev,
struct net_device *from_dev,
unsigned char addr_type);
extern void dev_addr_flush(struct net_device *dev);
extern int dev_addr_init(struct net_device *dev);
/* Functions used for unicast addresses handling */
extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
extern int dev_uc_sync(struct net_device *to, struct net_device *from);
extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
extern void dev_uc_flush(struct net_device *dev);
extern void dev_uc_init(struct net_device *dev);
/* Functions used for multicast addresses handling */
extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
extern int dev_mc_sync(struct net_device *to, struct net_device *from);
extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
extern void dev_mc_flush(struct net_device *dev);
extern void dev_mc_init(struct net_device *dev);
/* Functions used for secondary unicast and multicast support */
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
extern int dev_set_promiscuity(struct net_device *dev, int inc);
extern int dev_set_allmulti(struct net_device *dev, int inc);
extern void netdev_state_change(struct net_device *dev);
extern int netdev_bonding_change(struct net_device *dev,
unsigned long event);
extern void netdev_features_change(struct net_device *dev);
/* Load a device via the kmod */
extern void dev_load(struct net *net, const char *name);
extern void dev_mcast_init(void);
extern const struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev);
extern void dev_txq_stats_fold(const struct net_device *dev, struct net_device_stats *stats);
extern int netdev_max_backlog;
extern int netdev_tstamp_prequeue;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb);
extern struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features);
#ifdef CONFIG_BUG
extern void netdev_rx_csum_fault(struct net_device *dev);
#else
static inline void netdev_rx_csum_fault(struct net_device *dev)
{
}
#endif
/* rx skb timestamps */
extern void net_enable_timestamp(void);
extern void net_disable_timestamp(void);
#ifdef CONFIG_PROC_FS
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
extern void netdev_class_remove_file(struct class_attribute *class_attr);
extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len);
extern void linkwatch_run_queue(void);
unsigned long netdev_increment_features(unsigned long all, unsigned long one,
unsigned long mask);
unsigned long netdev_fix_features(unsigned long features, const char *name);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
static inline int net_gso_ok(int features, int gso_type)
{
int feature = gso_type << NETIF_F_GSO_SHIFT;
return (features & feature) == feature;
}
static inline int skb_gso_ok(struct sk_buff *skb, int features)
{
return net_gso_ok(features, skb_shinfo(skb)->gso_type) &&
(!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST));
}
static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
{
return skb_is_gso(skb) &&
(!skb_gso_ok(skb, dev->features) ||
unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
}
static inline void netif_set_gso_max_size(struct net_device *dev,
unsigned int size)
{
dev->gso_max_size = size;
}
extern int __skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master);
static inline int skb_bond_should_drop(struct sk_buff *skb,
struct net_device *master)
{
if (master)
return __skb_bond_should_drop(skb, master);
return 0;
}
extern struct pernet_operations __net_initdata loopback_net_ops;
static inline int dev_ethtool_get_settings(struct net_device *dev,
struct ethtool_cmd *cmd)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
return -EOPNOTSUPP;
return dev->ethtool_ops->get_settings(dev, cmd);
}
static inline u32 dev_ethtool_get_rx_csum(struct net_device *dev)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_rx_csum)
return 0;
return dev->ethtool_ops->get_rx_csum(dev);
}
static inline u32 dev_ethtool_get_flags(struct net_device *dev)
{
if (!dev->ethtool_ops || !dev->ethtool_ops->get_flags)
return 0;
return dev->ethtool_ops->get_flags(dev);
}
/* Logging, debugging and troubleshooting/diagnostic helpers. */
/* netdev_printk helpers, similar to dev_printk */
static inline const char *netdev_name(const struct net_device *dev)
{
if (dev->reg_state != NETREG_REGISTERED)
return "(unregistered net_device)";
return dev->name;
}
extern int netdev_printk(const char *level, const struct net_device *dev,
const char *format, ...)
__attribute__ ((format (printf, 3, 4)));
extern int netdev_emerg(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_alert(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_crit(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_err(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_warn(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_notice(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
extern int netdev_info(const struct net_device *dev, const char *format, ...)
__attribute__ ((format (printf, 2, 3)));
#if defined(DEBUG)
#define netdev_dbg(__dev, format, args...) \
netdev_printk(KERN_DEBUG, __dev, format, ##args)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netdev_dbg(__dev, format, args...) \
do { \
dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \
netdev_name(__dev), ##args); \
} while (0)
#else
#define netdev_dbg(__dev, format, args...) \
({ \
if (0) \
netdev_printk(KERN_DEBUG, __dev, format, ##args); \
0; \
})
#endif
#if defined(VERBOSE_DEBUG)
#define netdev_vdbg netdev_dbg
#else
#define netdev_vdbg(dev, format, args...) \
({ \
if (0) \
netdev_printk(KERN_DEBUG, dev, format, ##args); \
0; \
})
#endif
/*
* netdev_WARN() acts like dev_printk(), but with the key difference
* of using a WARN/WARN_ON to get the message out, including the
* file/line information and a backtrace.
*/
#define netdev_WARN(dev, format, args...) \
WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args);
/* netif printk helpers, similar to netdev_printk */
#define netif_printk(priv, type, level, dev, fmt, args...) \
do { \
if (netif_msg_##type(priv)) \
netdev_printk(level, (dev), fmt, ##args); \
} while (0)
#define netif_emerg(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_EMERG, dev, fmt, ##args)
#define netif_alert(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_ALERT, dev, fmt, ##args)
#define netif_crit(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_CRIT, dev, fmt, ##args)
#define netif_err(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_ERR, dev, fmt, ##args)
#define netif_warn(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_WARNING, dev, fmt, ##args)
#define netif_notice(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_NOTICE, dev, fmt, ##args)
#define netif_info(priv, type, dev, fmt, args...) \
netif_printk(priv, type, KERN_INFO, (dev), fmt, ##args)
#if defined(DEBUG)
#define netif_dbg(priv, type, dev, format, args...) \
netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
#elif defined(CONFIG_DYNAMIC_DEBUG)
#define netif_dbg(priv, type, netdev, format, args...) \
do { \
if (netif_msg_##type(priv)) \
dynamic_dev_dbg((netdev)->dev.parent, \
"%s: " format, \
netdev_name(netdev), ##args); \
} while (0)
#else
#define netif_dbg(priv, type, dev, format, args...) \
({ \
if (0) \
netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
0; \
})
#endif
#if defined(VERBOSE_DEBUG)
#define netif_vdbg netdev_dbg
#else
#define netif_vdbg(priv, type, dev, format, args...) \
({ \
if (0) \
netif_printk(priv, type, KERN_DEBUG, dev, format, ##args); \
0; \
})
#endif
#endif /* __KERNEL__ */
#endif /* _LINUX_NETDEVICE_H */
|
2143_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_
#include "../include/v8-debug.h"
#include "allocation.h"
#include "apiutils.h"
#include "atomicops.h"
#include "builtins.h"
#include "contexts.h"
#include "execution.h"
#include "frames.h"
#include "date.h"
#include "global-handles.h"
#include "handles.h"
#include "hashmap.h"
#include "heap.h"
#include "optimizing-compiler-thread.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "runtime.h"
#include "zone.h"
namespace v8 {
namespace internal {
class Bootstrapper;
class CodeGenerator;
class CodeRange;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
class Counters;
class CpuFeatures;
class CpuProfiler;
class DeoptimizerData;
class Deserializer;
class EmptyStatement;
class ExternalReferenceTable;
class Factory;
class FunctionInfoListener;
class HandleScopeImplementer;
class HeapProfiler;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
class PreallocatedMemoryThread;
class RegExpStack;
class SaveContext;
class UnicodeCache;
class StringInputBuffer;
class StringTracker;
class StubCache;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
class VMState;
// 'void function pointer', used to roundtrip the
// ExternalReference::ExternalReferenceRedirector since we can not include
// assembler.h, where it is defined, here.
typedef void* ExternalReferenceRedirectorPointer();
#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug;
class Debugger;
class DebuggerAgent;
#endif
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
class Redirection;
class Simulator;
#endif
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
// this table of handles to the actual constants.
// Static indirection table for handles to constants. If a Result
// represents a constant, the data contains an index into this table
// of handles to the actual constants.
typedef ZoneList<Handle<Object> > ZoneObjectList;
#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
if (__isolate__->has_scheduled_exception()) { \
return __isolate__->PromoteScheduledException(); \
} \
} while (false)
#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
ASSERT((isolate)->has_pending_exception()); \
return (value); \
} \
} while (false)
#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
do { \
ASSERT(!(isolate)->has_pending_exception()); \
CHECK(!(call).is_null()); \
CHECK(!(isolate)->has_pending_exception()); \
} while (false)
#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
C(Handler, handler) \
C(CEntryFP, c_entry_fp) \
C(Context, context) \
C(PendingException, pending_exception) \
C(ExternalCaughtException, external_caught_exception) \
C(JSEntrySP, js_entry_sp)
// Platform-independent, reliable thread identifier.
class ThreadId {
public:
// Creates an invalid ThreadId.
ThreadId() : id_(kInvalidId) {}
// Returns ThreadId for current thread.
static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
// Returns invalid ThreadId (guaranteed not to be equal to any thread).
static ThreadId Invalid() { return ThreadId(kInvalidId); }
// Compares ThreadIds for equality.
INLINE(bool Equals(const ThreadId& other) const) {
return id_ == other.id_;
}
// Checks whether this ThreadId refers to any thread.
INLINE(bool IsValid() const) {
return id_ != kInvalidId;
}
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::GetCurrentThreadId).
int ToInteger() const { return id_; }
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::TerminateExecution).
static ThreadId FromInteger(int id) { return ThreadId(id); }
private:
static const int kInvalidId = -1;
explicit ThreadId(int id) : id_(id) {}
static int AllocateThreadId();
static int GetCurrentThreadId();
int id_;
static Atomic32 highest_thread_id_;
friend class Isolate;
};
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
// isolate being present.
ThreadLocalTop();
// Initialize the thread data.
void Initialize();
// Get the top C++ try catch handler or NULL if none are registered.
//
// This method is not guarenteed to return an address that can be
// used for comparison with addresses into the JS stack. If such an
// address is needed, use try_catch_handler_address.
v8::TryCatch* TryCatchHandler();
// Get the address of the top C++ try catch handler or NULL if
// none are registered.
//
// This method always returns an address that can be compared to
// pointers into the JavaScript stack. When running on actual
// hardware, try_catch_handler_address and TryCatchHandler return
// the same pointer. When running on a simulator with a separate JS
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
inline Address try_catch_handler_address() {
return try_catch_handler_address_;
}
// Set the address of the top C++ try catch handler.
inline void set_try_catch_handler_address(Address address) {
try_catch_handler_address_ = address;
}
void Free() {
ASSERT(!has_pending_message_);
ASSERT(!external_caught_exception_);
ASSERT(try_catch_handler_address_ == NULL);
}
Isolate* isolate_;
// The context where the current execution method is created and for variable
// lookups.
Context* context_;
ThreadId thread_id_;
MaybeObject* pending_exception_;
bool has_pending_message_;
Object* pending_message_obj_;
Script* pending_message_script_;
int pending_message_start_pos_;
int pending_message_end_pos_;
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
MaybeObject* scheduled_exception_;
bool external_caught_exception_;
SaveContext* save_context_;
v8::TryCatch* catcher_;
// Stack.
Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
Address external_callback_; // the external callback we're currently in
StateTag current_vm_state_;
// Generated code scratch locations.
int32_t formal_count_;
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
// Whether out of memory exceptions should be ignored.
bool ignore_out_of_memory_;
private:
void InitializeInternal();
Address try_catch_handler_address_;
};
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
V(v8::Debug::EventCallback, debug_event_callback, NULL) \
V(DebuggerAgent*, debugger_agent_instance, NULL)
#else
#define ISOLATE_DEBUGGER_INIT_LIST(V)
#endif
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
V(CommentStatistic, paged_space_comments_statistics, \
CommentStatistic::kMaxComments + 1)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
#endif
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
V(int, serialize_partial_snapshot_cache_length, 0) \
V(int, serialize_partial_snapshot_cache_capacity, 0) \
V(Object**, serialize_partial_snapshot_cache, NULL) \
/* Assembler state. */ \
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
V(bool, always_allow_natives_syntax, false) \
/* Part of the state of liveedit. */ \
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
/* State for CodeEntry in profile-generator. */ \
V(CodeGenerator*, current_code_generator, NULL) \
V(bool, jump_target_compiling_deferred_code, false) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
/* TODO(isolates): Release this on destruction? */ \
V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
/* SafeStackFrameIterator activations count. */ \
V(int, safe_stack_iterator_counter, 0) \
V(uint64_t, enabled_cpu_features, 0) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
class ThreadDataTable;
class EntryStackItem;
public:
~Isolate();
// A thread has a PerIsolateThreadData instance for each isolate that it has
// entered. That instance is allocated when the isolate is initially entered
// and reused on subsequent entries.
class PerIsolateThreadData {
public:
PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
: isolate_(isolate),
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
simulator_(NULL),
#endif
next_(NULL),
prev_(NULL) { }
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
uintptr_t stack_limit() const { return stack_limit_; }
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
}
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
return isolate_ == isolate && thread_id_.Equals(thread_id);
}
private:
Isolate* isolate_;
ThreadId thread_id_;
uintptr_t stack_limit_;
ThreadState* thread_state_;
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
PerIsolateThreadData* next_;
PerIsolateThreadData* prev_;
friend class Isolate;
friend class ThreadDataTable;
friend class EntryStackItem;
DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
};
enum AddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
#undef DECLARE_ENUM
kIsolateAddressCount
};
// Returns the PerIsolateThreadData for the current thread (or NULL if one is
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
Isolate* isolate = reinterpret_cast<Isolate*>(
Thread::GetExistingThreadLocal(isolate_key_));
ASSERT(isolate != NULL);
return isolate;
}
INLINE(static Isolate* UncheckedCurrent()) {
return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
}
// Usually called by Init(), but can be called early e.g. to allow
// testing components that require logging but not the whole
// isolate.
//
// Safe to call more than once.
void InitializeLoggingAndCounters();
bool Init(Deserializer* des);
bool IsInitialized() { return state_ == INITIALIZED; }
// True if at least one thread Enter'ed this isolate.
bool IsInUse() { return entry_stack_ != NULL; }
// Destroys the non-default isolates.
// Sets default isolate into "has_been_disposed" state rather then destroying,
// for legacy API reasons.
void TearDown();
bool IsDefaultIsolate() const { return this == default_isolate_; }
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
// Safe to call multiple times.
static void EnsureDefaultIsolate();
// Find the PerThread for this particular (isolate, thread) combination
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Get the debugger from the default isolate. Preinitializes the
// default isolate if needed.
static Debugger* GetDefaultIsolateDebugger();
#endif
// Get the stack guard from the default isolate. Preinitializes the
// default isolate if needed.
static StackGuard* GetDefaultIsolateStackGuard();
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
static Thread::LocalStorageKey isolate_key() {
return isolate_key_;
}
// Returns the key used to store process-wide thread IDs.
static Thread::LocalStorageKey thread_id_key() {
return thread_id_key_;
}
static Thread::LocalStorageKey per_isolate_thread_data_key();
// If a client attempts to create a Locker without specifying an isolate,
// we assume that the client is using legacy behavior. Set up the current
// thread to be inside the implicit isolate (or fail a check if we have
// switched to non-legacy behavior).
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
Mutex* break_access() { return break_access_; }
// Mutex for serializing access to debugger.
Mutex* debugger_access() { return debugger_access_; }
Address get_address_from_id(AddressId id);
// Access to top context (where the current function object was created).
Context* context() { return thread_local_top_.context_; }
void set_context(Context* context) {
ASSERT(context == NULL || context->IsContext());
thread_local_top_.context_ = context;
}
Context** context_address() { return &thread_local_top_.context_; }
SaveContext* save_context() {return thread_local_top_.save_context_; }
void set_save_context(SaveContext* save) {
thread_local_top_.save_context_ = save;
}
// Access to the map of "new Object()".
Map* empty_object_map() {
return context()->native_context()->object_function()->map();
}
// Access to current thread id.
ThreadId thread_id() { return thread_local_top_.thread_id_; }
void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
// Interface to pending exception.
MaybeObject* pending_exception() {
ASSERT(has_pending_exception());
return thread_local_top_.pending_exception_;
}
bool external_caught_exception() {
return thread_local_top_.external_caught_exception_;
}
void set_external_caught_exception(bool value) {
thread_local_top_.external_caught_exception_ = value;
}
void set_pending_exception(MaybeObject* exception) {
thread_local_top_.pending_exception_ = exception;
}
void clear_pending_exception() {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
MaybeObject** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
bool has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole();
}
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
thread_local_top_.pending_message_script_ = NULL;
}
v8::TryCatch* try_catch_handler() {
return thread_local_top_.TryCatchHandler();
}
Address try_catch_handler_address() {
return thread_local_top_.try_catch_handler_address();
}
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
v8::TryCatch* catcher() {
return thread_local_top_.catcher_;
}
void set_catcher(v8::TryCatch* catcher) {
thread_local_top_.catcher_ = catcher;
}
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
}
Address pending_message_obj_address() {
return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
}
Address has_pending_message_address() {
return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
}
Address pending_message_script_address() {
return reinterpret_cast<Address>(
&thread_local_top_.pending_message_script_);
}
MaybeObject* scheduled_exception() {
ASSERT(has_scheduled_exception());
return thread_local_top_.scheduled_exception_;
}
bool has_scheduled_exception() {
return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
}
void clear_scheduled_exception() {
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
bool IsExternallyCaught();
bool is_catchable_by_javascript(MaybeObject* exception) {
return (exception != Failure::OutOfMemoryException()) &&
(exception != heap()->termination_exception());
}
// Serializer.
void PushToPartialSnapshotCache(Object* obj);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
}
static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
inline Address* c_entry_fp_address() {
return &thread_local_top_.c_entry_fp_;
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
// Bottom JS entry (see StackTracer::Trace in log.cc).
static Address js_entry_sp(ThreadLocalTop* thread) {
return thread->js_entry_sp_;
}
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
}
// Generated code scratch locations.
void* formal_count_address() { return &thread_local_top_.formal_count_; }
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
Handle<GlobalObject> global_object() {
return Handle<GlobalObject>(context()->global_object());
}
// Returns the global proxy object of the current context.
Object* global_proxy() {
return context()->global_proxy();
}
Handle<JSBuiltinsObject> js_builtins_object() {
return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
}
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
void FreeThreadResources() { thread_local_top_.Free(); }
// This method is called by the api after operations that may throw
// exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned.
bool OptionalRescheduleException(bool is_bottom_call);
class ExceptionScope {
public:
explicit ExceptionScope(Isolate* isolate) :
// Scope currently can only be used for regular exceptions, not
// failures like OOM or termination exception.
isolate_(isolate),
pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
catcher_(isolate_->catcher())
{ }
~ExceptionScope() {
isolate_->set_catcher(catcher_);
isolate_->set_pending_exception(*pending_exception_);
}
private:
Isolate* isolate_;
Handle<Object> pending_exception_;
v8::TryCatch* catcher_;
};
void SetCaptureStackTraceForUncaughtExceptions(
bool capture,
int frame_limit,
StackTrace::StackTraceOptions options);
// Tells whether the current context has experienced an out of memory
// exception.
bool is_out_of_memory();
bool ignore_out_of_memory() {
return thread_local_top_.ignore_out_of_memory_;
}
void set_ignore_out_of_memory(bool value) {
thread_local_top_.ignore_out_of_memory_ = value;
}
void PrintCurrentStackTrace(FILE* out);
void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
void PrintStack();
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
Object* object,
Map* map,
unsigned int magic2));
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
bool MayNamedAccess(JSObject* receiver,
Object* key,
v8::AccessType type);
bool MayIndexedAccess(JSObject* receiver,
uint32_t index,
v8::AccessType type);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
Failure* Throw(Object* exception, MessageLocation* location = NULL);
// Re-throw an exception. This involves no error reporting since
// error reporting was handled when the exception was thrown
// originally.
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
Failure* PromoteScheduledException();
void DoThrow(Object* exception, MessageLocation* location);
// Checks if exception should be reported and finds out if it's
// caught externally.
bool ShouldReportException(bool* can_be_caught_externally,
bool catchable_by_javascript);
// Attempts to compute the current source location, storing the
// result in the target out parameter.
void ComputeLocation(MessageLocation* target);
// Override command line flag.
void TraceException(bool flag);
// Out of resource exception helpers.
Failure* StackOverflow();
Failure* TerminateExecution();
// Administration
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
// Returns the current native and global context.
Handle<Context> native_context();
Handle<Context> global_context();
// Returns the native context of the calling JavaScript code. That
// is, the native context of the top-most JavaScript frame.
Handle<Context> GetCallingNativeContext();
void RegisterTryCatchHandler(v8::TryCatch* that);
void UnregisterTryCatchHandler(v8::TryCatch* that);
char* ArchiveThread(char* to);
char* RestoreThread(char* from);
static const char* const kStackOverflowMessage;
static const int kUC16AlphabetSize = 256; // See StringSearchBase.
static const int kBMMaxShift = 250; // See StringSearchBase.
// Accessors.
#define GLOBAL_ACCESSOR(type, name, initialvalue) \
inline type name() const { \
ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
return name##_; \
} \
inline void set_##name(type value) { \
ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
name##_ = value; \
}
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR
#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
inline type* name() { \
ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
return &(name##_)[0]; \
}
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> name() { \
return Handle<type>(context()->native_context()->name()); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
Bootstrapper* bootstrapper() { return bootstrapper_; }
Counters* counters() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
ASSERT(counters_ != NULL);
return counters_;
}
CodeRange* code_range() { return code_range_; }
RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
Logger* logger() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
ASSERT(logger_ != NULL);
return logger_;
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
StatsTable* stats_table();
StubCache* stub_cache() { return stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
TranscendentalCache* transcendental_cache() const {
return transcendental_cache_;
}
MemoryAllocator* memory_allocator() {
return memory_allocator_;
}
KeyedLookupCache* keyed_lookup_cache() {
return keyed_lookup_cache_;
}
ContextSlotCache* context_slot_cache() {
return context_slot_cache_;
}
DescriptorLookupCache* descriptor_lookup_cache() {
return descriptor_lookup_cache_;
}
v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
return &handle_scope_data_;
}
HandleScopeImplementer* handle_scope_implementer() {
ASSERT(handle_scope_implementer_);
return handle_scope_implementer_;
}
Zone* runtime_zone() { return &runtime_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
}
InnerPointerToCodeCache* inner_pointer_to_code_cache() {
return inner_pointer_to_code_cache_;
}
StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
GlobalHandles* global_handles() { return global_handles_; }
ThreadManager* thread_manager() { return thread_manager_; }
ContextSwitcher* context_switcher() { return context_switcher_; }
void set_context_switcher(ContextSwitcher* switcher) {
context_switcher_ = switcher;
}
StringTracker* string_tracker() { return string_tracker_; }
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
return &jsregexp_uncanonicalize_;
}
unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
return &jsregexp_canonrange_;
}
StringInputBuffer* objects_string_compare_buffer_a() {
return &objects_string_compare_buffer_a_;
}
StringInputBuffer* objects_string_compare_buffer_b() {
return &objects_string_compare_buffer_b_;
}
StaticResource<StringInputBuffer>* objects_string_input_buffer() {
return &objects_string_input_buffer_;
}
RuntimeState* runtime_state() { return &runtime_state_; }
void set_fp_stubs_generated(bool value) {
fp_stubs_generated_ = value;
}
bool fp_stubs_generated() { return fp_stubs_generated_; }
StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
return &compiler_safe_string_input_buffer_;
}
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
has_installed_extensions_ = true;
}
bool has_installed_extensions() { return has_installed_extensions_; }
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize() {
return ®exp_macro_assembler_canonicalize_;
}
RegExpStack* regexp_stack() { return regexp_stack_; }
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
interp_canonicalize_mapping() {
return &interp_canonicalize_mapping_;
}
void* PreallocatedStorageNew(size_t size);
void PreallocatedStorageDelete(void* p);
void PreallocatedStorageInit(size_t size);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger() {
if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
return debugger_;
}
Debug* debug() {
if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
return debug_;
}
#endif
inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
#ifdef DEBUG
HistogramInfo* heap_histograms() { return heap_histograms_; }
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
}
HashMap* simulator_i_cache() { return simulator_i_cache_; }
void set_simulator_i_cache(HashMap* hash_map) {
simulator_i_cache_ = hash_map;
}
Redirection* simulator_redirection() {
return simulator_redirection_;
}
void set_simulator_redirection(Redirection* redirection) {
simulator_redirection_ = redirection;
}
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() {
return thread_local_top_.external_callback_;
}
void set_external_callback(Address callback) {
thread_local_top_.external_callback_ = callback;
}
StateTag current_vm_state() {
return thread_local_top_.current_vm_state_;
}
void SetCurrentVMState(StateTag state) {
if (RuntimeProfiler::IsEnabled()) {
// Make sure thread local top is initialized.
ASSERT(thread_local_top_.isolate_ == this);
StateTag current_state = thread_local_top_.current_vm_state_;
if (current_state != JS && state == JS) {
// Non-JS -> JS transition.
RuntimeProfiler::IsolateEnteredJS(this);
} else if (current_state == JS && state != JS) {
// JS -> non-JS transition.
ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
RuntimeProfiler::IsolateExitedJS(this);
} else {
// Other types of state transitions are not interesting to the
// runtime profiler, because they don't affect whether we're
// in JS or not.
ASSERT((current_state == JS) == (state == JS));
}
}
thread_local_top_.current_vm_state_ = state;
}
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
LookupResult* top_lookup_result() {
return thread_local_top_.top_lookup_result_;
}
void SetTopLookupResult(LookupResult* top) {
thread_local_top_.top_lookup_result_ = top;
}
bool context_exit_happened() {
return context_exit_happened_;
}
void set_context_exit_happened(bool context_exit_happened) {
context_exit_happened_ = context_exit_happened;
}
double time_millis_since_init() {
return OS::TimeCurrentMillis() - time_millis_at_init_;
}
DateCache* date_cache() {
return date_cache_;
}
void set_date_cache(DateCache* date_cache) {
if (date_cache != date_cache_) {
delete date_cache_;
}
date_cache_ = date_cache;
}
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
OptimizingCompilerThread* optimizing_compiler_thread() {
return &optimizing_compiler_thread_;
}
private:
Isolate();
friend struct GlobalState;
friend struct InitializeGlobalState;
enum State {
UNINITIALIZED, // Some components may not have been allocated.
INITIALIZED // All components are fully initialized.
};
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
// verified in Isolate::Init() using runtime checks.
State state_; // Will be padded to kApiPointerSize.
void* embedder_data_;
Heap heap_;
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
class ThreadDataTable {
public:
ThreadDataTable();
~ThreadDataTable();
PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
void RemoveAllThreads(Isolate* isolate);
private:
PerIsolateThreadData* list_;
};
// These items form a stack synchronously with threads Enter'ing and Exit'ing
// the Isolate. The top of the stack points to a thread which is currently
// running the Isolate. When the stack is empty, the Isolate is considered
// not entered by any thread and can be Disposed.
// If the same thread enters the Isolate more then once, the entry_count_
// is incremented rather then a new item pushed to the stack.
class EntryStackItem {
public:
EntryStackItem(PerIsolateThreadData* previous_thread_data,
Isolate* previous_isolate,
EntryStackItem* previous_item)
: entry_count(1),
previous_thread_data(previous_thread_data),
previous_isolate(previous_isolate),
previous_item(previous_item) { }
int entry_count;
PerIsolateThreadData* previous_thread_data;
Isolate* previous_isolate;
EntryStackItem* previous_item;
private:
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
static Mutex* process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
static Thread::LocalStorageKey thread_id_key_;
static Isolate* default_isolate_;
static ThreadDataTable* thread_data_table_;
void Deinit();
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
// Allocate and insert PerIsolateThreadData into the ThreadDataTable
// (regardless of whether such data already exists).
PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
// Find the PerThread for this particular (isolate, thread) combination.
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
// PreInits and returns a default isolate. Needed when a new thread tries
// to create a Locker for the first time (the lock itself is in the isolate).
static Isolate* GetDefaultIsolateForLocking();
// Initializes the current thread to run this Isolate.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
void Enter();
// Exits the current thread. The previosuly entered Isolate is restored
// for the thread.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
void Exit();
void PreallocatedMemoryThreadStart();
void PreallocatedMemoryThreadStop();
void InitializeThreadLocal();
void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void FillCache();
void PropagatePendingExceptionToExternalTryCatch();
void InitializeDebugger();
// Traverse prototype chain to find out whether the object is derived from
// the Error object.
bool IsErrorObject(Handle<Object> obj);
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
// The preallocated memory thread singleton.
PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
NoAllocationStringAllocator* preallocated_message_space_;
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
Mutex* break_access_;
Atomic32 debugger_initialized_;
Mutex* debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
StubCache* stub_cache_;
DeoptimizerData* deoptimizer_data_;
ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
TranscendentalCache* transcendental_cache_;
MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
PreallocatedStorage in_use_list_;
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
StringInputBuffer* write_input_buffer_;
GlobalHandles* global_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
StringInputBuffer objects_string_compare_buffer_a_;
StringInputBuffer objects_string_compare_buffer_b_;
StaticResource<StringInputBuffer> objects_string_input_buffer_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
bool context_exit_happened_;
// Time stamp at initialization.
double time_millis_at_init_;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;
#endif
#ifdef DEBUG
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
int code_kind_statistics_[Code::NUMBER_OF_KINDS];
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger_;
Debug* debug_;
#endif
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_;
ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
#undef GLOBAL_BACKING_STORE
#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
type name##_[length];
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
#undef GLOBAL_ARRAY_BACKING_STORE
#ifdef DEBUG
// This class is huge and has a number of fields controlled by
// preprocessor defines. Make sure the offsets of these fields agree
// between compilation units.
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
static const intptr_t name##_debug_offset_;
ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread optimizing_compiler_thread_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
friend class OptimizingCompilerThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
public:
inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
if (context_.is_null()) {
Isolate* isolate = Isolate::Current();
isolate->set_context(NULL);
isolate->set_save_context(prev_);
} else {
Isolate* isolate = context_->GetIsolate();
isolate->set_context(*context_);
isolate->set_save_context(prev_);
}
}
Handle<Context> context() { return context_; }
SaveContext* prev() { return prev_; }
// Returns true if this save context is below a given JavaScript frame.
bool IsBelowFrame(JavaScriptFrame* frame) {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
private:
Handle<Context> context_;
#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
Handle<Context> dummy_;
#endif
SaveContext* prev_;
Address c_entry_fp_;
};
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
AssertNoContextChange() :
scope_(Isolate::Current()),
context_(Isolate::Current()->context(), Isolate::Current()) {
}
~AssertNoContextChange() {
ASSERT(Isolate::Current()->context() == *context_);
}
private:
HandleScope scope_;
Handle<Context> context_;
#else
public:
AssertNoContextChange() { }
#endif
};
class ExecutionAccess BASE_EMBEDDED {
public:
explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
Lock(isolate);
}
~ExecutionAccess() { Unlock(isolate_); }
static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
static bool TryLock(Isolate* isolate) {
return isolate->break_access_->TryLock();
}
private:
Isolate* isolate_;
};
// Support for checking for stack-overflows in C++ code.
class StackLimitCheck BASE_EMBEDDED {
public:
explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
// Stack has overflowed in C++ code only if stack pointer exceeds the C++
// stack guard and the limits are not set to interrupt values.
// TODO(214): Stack overflows are ignored if a interrupt is pending. This
// code should probably always use the initial C++ limit.
return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
stack_guard->IsStackOverflow();
}
private:
Isolate* isolate_;
};
// Support for temporarily postponing interrupts. When the outermost
// postpone scope is left the interrupts will be re-enabled and any
// interrupts that occurred while in the scope will be taken into
// account.
class PostponeInterruptsScope BASE_EMBEDDED {
public:
explicit PostponeInterruptsScope(Isolate* isolate)
: stack_guard_(isolate->stack_guard()) {
stack_guard_->thread_local_.postpone_interrupts_nesting_++;
stack_guard_->DisableInterrupts();
}
~PostponeInterruptsScope() {
if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
stack_guard_->EnableInterrupts();
}
}
private:
StackGuard* stack_guard_;
};
// Temporary macros for accessing current isolate and its subobjects.
// They provide better readability, especially when used a lot in the code.
#define HEAP (v8::internal::Isolate::Current()->heap())
#define FACTORY (v8::internal::Isolate::Current()->factory())
#define ISOLATE (v8::internal::Isolate::Current())
#define LOGGER (v8::internal::Isolate::Current()->logger())
// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
return native_context()->out_of_memory()->IsTrue();
}
// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
native_context()->set_out_of_memory(HEAP->true_value());
}
} } // namespace v8::internal
#endif // V8_ISOLATE_H_
|
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ISOLATE_H_
#define V8_ISOLATE_H_
#include "../include/v8-debug.h"
#include "allocation.h"
#include "apiutils.h"
#include "atomicops.h"
#include "builtins.h"
#include "contexts.h"
#include "execution.h"
#include "frames.h"
#include "date.h"
#include "global-handles.h"
#include "handles.h"
#include "hashmap.h"
#include "heap.h"
#include "optimizing-compiler-thread.h"
#include "regexp-stack.h"
#include "runtime-profiler.h"
#include "runtime.h"
#include "zone.h"
namespace v8 {
namespace internal {
class Bootstrapper;
class CodeGenerator;
class CodeRange;
class CompilationCache;
class ContextSlotCache;
class ContextSwitcher;
class Counters;
class CpuFeatures;
class CpuProfiler;
class DeoptimizerData;
class Deserializer;
class EmptyStatement;
class ExternalReferenceTable;
class Factory;
class FunctionInfoListener;
class HandleScopeImplementer;
class HeapProfiler;
class InlineRuntimeFunctionsTable;
class NoAllocationStringAllocator;
class InnerPointerToCodeCache;
class PreallocatedMemoryThread;
class RegExpStack;
class SaveContext;
class UnicodeCache;
class StringInputBuffer;
class StringTracker;
class StubCache;
class ThreadManager;
class ThreadState;
class ThreadVisitor; // Defined in v8threads.h
class VMState;
// 'void function pointer', used to roundtrip the
// ExternalReference::ExternalReferenceRedirector since we can not include
// assembler.h, where it is defined, here.
typedef void* ExternalReferenceRedirectorPointer();
#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug;
class Debugger;
class DebuggerAgent;
#endif
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
class Redirection;
class Simulator;
#endif
// Static indirection table for handles to constants. If a frame
// element represents a constant, the data contains an index into
// this table of handles to the actual constants.
// Static indirection table for handles to constants. If a Result
// represents a constant, the data contains an index into this table
// of handles to the actual constants.
typedef ZoneList<Handle<Object> > ZoneObjectList;
#define RETURN_IF_SCHEDULED_EXCEPTION(isolate) \
do { \
Isolate* __isolate__ = (isolate); \
if (__isolate__->has_scheduled_exception()) { \
return __isolate__->PromoteScheduledException(); \
} \
} while (false)
#define RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, value) \
do { \
if ((call).is_null()) { \
ASSERT((isolate)->has_pending_exception()); \
return (value); \
} \
} while (false)
#define CHECK_NOT_EMPTY_HANDLE(isolate, call) \
do { \
ASSERT(!(isolate)->has_pending_exception()); \
CHECK(!(call).is_null()); \
CHECK(!(isolate)->has_pending_exception()); \
} while (false)
#define RETURN_IF_EMPTY_HANDLE(isolate, call) \
RETURN_IF_EMPTY_HANDLE_VALUE(isolate, call, Failure::Exception())
#define FOR_EACH_ISOLATE_ADDRESS_NAME(C) \
C(Handler, handler) \
C(CEntryFP, c_entry_fp) \
C(Context, context) \
C(PendingException, pending_exception) \
C(ExternalCaughtException, external_caught_exception) \
C(JSEntrySP, js_entry_sp)
// Platform-independent, reliable thread identifier.
class ThreadId {
public:
// Creates an invalid ThreadId.
ThreadId() : id_(kInvalidId) {}
// Returns ThreadId for current thread.
static ThreadId Current() { return ThreadId(GetCurrentThreadId()); }
// Returns invalid ThreadId (guaranteed not to be equal to any thread).
static ThreadId Invalid() { return ThreadId(kInvalidId); }
// Compares ThreadIds for equality.
INLINE(bool Equals(const ThreadId& other) const) {
return id_ == other.id_;
}
// Checks whether this ThreadId refers to any thread.
INLINE(bool IsValid() const) {
return id_ != kInvalidId;
}
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::GetCurrentThreadId).
int ToInteger() const { return id_; }
// Converts ThreadId to an integer representation
// (required for public API: V8::V8::TerminateExecution).
static ThreadId FromInteger(int id) { return ThreadId(id); }
private:
static const int kInvalidId = -1;
explicit ThreadId(int id) : id_(id) {}
static int AllocateThreadId();
static int GetCurrentThreadId();
int id_;
static Atomic32 highest_thread_id_;
friend class Isolate;
};
class ThreadLocalTop BASE_EMBEDDED {
public:
// Does early low-level initialization that does not depend on the
// isolate being present.
ThreadLocalTop();
// Initialize the thread data.
void Initialize();
// Get the top C++ try catch handler or NULL if none are registered.
//
// This method is not guarenteed to return an address that can be
// used for comparison with addresses into the JS stack. If such an
// address is needed, use try_catch_handler_address.
v8::TryCatch* TryCatchHandler();
// Get the address of the top C++ try catch handler or NULL if
// none are registered.
//
// This method always returns an address that can be compared to
// pointers into the JavaScript stack. When running on actual
// hardware, try_catch_handler_address and TryCatchHandler return
// the same pointer. When running on a simulator with a separate JS
// stack, try_catch_handler_address returns a JS stack address that
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
inline Address try_catch_handler_address() {
return try_catch_handler_address_;
}
// Set the address of the top C++ try catch handler.
inline void set_try_catch_handler_address(Address address) {
try_catch_handler_address_ = address;
}
void Free() {
ASSERT(!has_pending_message_);
ASSERT(!external_caught_exception_);
ASSERT(try_catch_handler_address_ == NULL);
}
Isolate* isolate_;
// The context where the current execution method is created and for variable
// lookups.
Context* context_;
ThreadId thread_id_;
MaybeObject* pending_exception_;
bool has_pending_message_;
Object* pending_message_obj_;
Script* pending_message_script_;
int pending_message_start_pos_;
int pending_message_end_pos_;
// Use a separate value for scheduled exceptions to preserve the
// invariants that hold about pending_exception. We may want to
// unify them later.
MaybeObject* scheduled_exception_;
bool external_caught_exception_;
SaveContext* save_context_;
v8::TryCatch* catcher_;
// Stack.
Address c_entry_fp_; // the frame pointer of the top c entry frame
Address handler_; // try-blocks are chained through the stack
#ifdef USE_SIMULATOR
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
#endif // USE_SIMULATOR
Address js_entry_sp_; // the stack pointer of the bottom JS entry frame
Address external_callback_; // the external callback we're currently in
StateTag current_vm_state_;
// Generated code scratch locations.
int32_t formal_count_;
// Call back function to report unsafe JS accesses.
v8::FailedAccessCheckCallback failed_access_check_callback_;
// Head of the list of live LookupResults.
LookupResult* top_lookup_result_;
// Whether out of memory exceptions should be ignored.
bool ignore_out_of_memory_;
private:
void InitializeInternal();
Address try_catch_handler_address_;
};
#ifdef ENABLE_DEBUGGER_SUPPORT
#define ISOLATE_DEBUGGER_INIT_LIST(V) \
V(v8::Debug::EventCallback, debug_event_callback, NULL) \
V(DebuggerAgent*, debugger_agent_instance, NULL)
#else
#define ISOLATE_DEBUGGER_INIT_LIST(V)
#endif
#ifdef DEBUG
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V) \
V(CommentStatistic, paged_space_comments_statistics, \
CommentStatistic::kMaxComments + 1)
#else
#define ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
#endif
#define ISOLATE_INIT_ARRAY_LIST(V) \
/* SerializerDeserializer state. */ \
V(int32_t, jsregexp_static_offsets_vector, kJSRegexpStaticOffsetsVectorSize) \
V(int, bad_char_shift_table, kUC16AlphabetSize) \
V(int, good_suffix_shift_table, (kBMMaxShift + 1)) \
V(int, suffix_table, (kBMMaxShift + 1)) \
V(uint32_t, private_random_seed, 2) \
ISOLATE_INIT_DEBUG_ARRAY_LIST(V)
typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
#define ISOLATE_INIT_LIST(V) \
/* SerializerDeserializer state. */ \
V(int, serialize_partial_snapshot_cache_length, 0) \
V(int, serialize_partial_snapshot_cache_capacity, 0) \
V(Object**, serialize_partial_snapshot_cache, NULL) \
/* Assembler state. */ \
/* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */ \
V(byte*, assembler_spare_buffer, NULL) \
V(FatalErrorCallback, exception_behavior, NULL) \
V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL) \
V(v8::Debug::MessageHandler, message_handler, NULL) \
/* To distinguish the function templates, so that we can find them in the */ \
/* function cache of the native context. */ \
V(int, next_serial_number, 0) \
V(ExternalReferenceRedirectorPointer*, external_reference_redirector, NULL) \
V(bool, always_allow_natives_syntax, false) \
/* Part of the state of liveedit. */ \
V(FunctionInfoListener*, active_function_info_listener, NULL) \
/* State for Relocatable. */ \
V(Relocatable*, relocatable_top, NULL) \
/* State for CodeEntry in profile-generator. */ \
V(CodeGenerator*, current_code_generator, NULL) \
V(bool, jump_target_compiling_deferred_code, false) \
V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \
V(Object*, string_stream_current_security_token, NULL) \
/* TODO(isolates): Release this on destruction? */ \
V(int*, irregexp_interpreter_backtrack_stack_cache, NULL) \
/* Serializer state. */ \
V(ExternalReferenceTable*, external_reference_table, NULL) \
/* AstNode state. */ \
V(int, ast_node_id, 0) \
V(unsigned, ast_node_count, 0) \
/* SafeStackFrameIterator activations count. */ \
V(int, safe_stack_iterator_counter, 0) \
V(uint64_t, enabled_cpu_features, 0) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
// These forward declarations are required to make the friend declarations in
// PerIsolateThreadData work on some older versions of gcc.
class ThreadDataTable;
class EntryStackItem;
public:
~Isolate();
// A thread has a PerIsolateThreadData instance for each isolate that it has
// entered. That instance is allocated when the isolate is initially entered
// and reused on subsequent entries.
class PerIsolateThreadData {
public:
PerIsolateThreadData(Isolate* isolate, ThreadId thread_id)
: isolate_(isolate),
thread_id_(thread_id),
stack_limit_(0),
thread_state_(NULL),
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
simulator_(NULL),
#endif
next_(NULL),
prev_(NULL) { }
Isolate* isolate() const { return isolate_; }
ThreadId thread_id() const { return thread_id_; }
void set_stack_limit(uintptr_t value) { stack_limit_ = value; }
uintptr_t stack_limit() const { return stack_limit_; }
ThreadState* thread_state() const { return thread_state_; }
void set_thread_state(ThreadState* value) { thread_state_ = value; }
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator() const { return simulator_; }
void set_simulator(Simulator* simulator) {
simulator_ = simulator;
}
#endif
bool Matches(Isolate* isolate, ThreadId thread_id) const {
return isolate_ == isolate && thread_id_.Equals(thread_id);
}
private:
Isolate* isolate_;
ThreadId thread_id_;
uintptr_t stack_limit_;
ThreadState* thread_state_;
#if !defined(__arm__) && defined(V8_TARGET_ARCH_ARM) || \
!defined(__mips__) && defined(V8_TARGET_ARCH_MIPS)
Simulator* simulator_;
#endif
PerIsolateThreadData* next_;
PerIsolateThreadData* prev_;
friend class Isolate;
friend class ThreadDataTable;
friend class EntryStackItem;
DISALLOW_COPY_AND_ASSIGN(PerIsolateThreadData);
};
enum AddressId {
#define DECLARE_ENUM(CamelName, hacker_name) k##CamelName##Address,
FOR_EACH_ISOLATE_ADDRESS_NAME(DECLARE_ENUM)
#undef DECLARE_ENUM
kIsolateAddressCount
};
// Returns the PerIsolateThreadData for the current thread (or NULL if one is
// not currently set).
static PerIsolateThreadData* CurrentPerIsolateThreadData() {
return reinterpret_cast<PerIsolateThreadData*>(
Thread::GetThreadLocal(per_isolate_thread_data_key_));
}
// Returns the isolate inside which the current thread is running.
INLINE(static Isolate* Current()) {
Isolate* isolate = reinterpret_cast<Isolate*>(
Thread::GetExistingThreadLocal(isolate_key_));
ASSERT(isolate != NULL);
return isolate;
}
INLINE(static Isolate* UncheckedCurrent()) {
return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
}
// Usually called by Init(), but can be called early e.g. to allow
// testing components that require logging but not the whole
// isolate.
//
// Safe to call more than once.
void InitializeLoggingAndCounters();
bool Init(Deserializer* des);
bool IsInitialized() { return state_ == INITIALIZED; }
// True if at least one thread Enter'ed this isolate.
bool IsInUse() { return entry_stack_ != NULL; }
// Destroys the non-default isolates.
// Sets default isolate into "has_been_disposed" state rather then destroying,
// for legacy API reasons.
void TearDown();
bool IsDefaultIsolate() const { return this == default_isolate_; }
// Ensures that process-wide resources and the default isolate have been
// allocated. It is only necessary to call this method in rare cases, for
// example if you are using V8 from within the body of a static initializer.
// Safe to call multiple times.
static void EnsureDefaultIsolate();
// Find the PerThread for this particular (isolate, thread) combination
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
#ifdef ENABLE_DEBUGGER_SUPPORT
// Get the debugger from the default isolate. Preinitializes the
// default isolate if needed.
static Debugger* GetDefaultIsolateDebugger();
#endif
// Get the stack guard from the default isolate. Preinitializes the
// default isolate if needed.
static StackGuard* GetDefaultIsolateStackGuard();
// Returns the key used to store the pointer to the current isolate.
// Used internally for V8 threads that do not execute JavaScript but still
// are part of the domain of an isolate (like the context switcher).
static Thread::LocalStorageKey isolate_key() {
return isolate_key_;
}
// Returns the key used to store process-wide thread IDs.
static Thread::LocalStorageKey thread_id_key() {
return thread_id_key_;
}
static Thread::LocalStorageKey per_isolate_thread_data_key();
// If a client attempts to create a Locker without specifying an isolate,
// we assume that the client is using legacy behavior. Set up the current
// thread to be inside the implicit isolate (or fail a check if we have
// switched to non-legacy behavior).
static void EnterDefaultIsolate();
// Mutex for serializing access to break control structures.
Mutex* break_access() { return break_access_; }
// Mutex for serializing access to debugger.
Mutex* debugger_access() { return debugger_access_; }
Address get_address_from_id(AddressId id);
// Access to top context (where the current function object was created).
Context* context() { return thread_local_top_.context_; }
void set_context(Context* context) {
ASSERT(context == NULL || context->IsContext());
thread_local_top_.context_ = context;
}
Context** context_address() { return &thread_local_top_.context_; }
SaveContext* save_context() {return thread_local_top_.save_context_; }
void set_save_context(SaveContext* save) {
thread_local_top_.save_context_ = save;
}
// Access to the map of "new Object()".
Map* empty_object_map() {
return context()->native_context()->object_function()->map();
}
// Access to current thread id.
ThreadId thread_id() { return thread_local_top_.thread_id_; }
void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; }
// Interface to pending exception.
MaybeObject* pending_exception() {
ASSERT(has_pending_exception());
return thread_local_top_.pending_exception_;
}
bool external_caught_exception() {
return thread_local_top_.external_caught_exception_;
}
void set_external_caught_exception(bool value) {
thread_local_top_.external_caught_exception_ = value;
}
void set_pending_exception(MaybeObject* exception) {
thread_local_top_.pending_exception_ = exception;
}
void clear_pending_exception() {
thread_local_top_.pending_exception_ = heap_.the_hole_value();
}
MaybeObject** pending_exception_address() {
return &thread_local_top_.pending_exception_;
}
bool has_pending_exception() {
return !thread_local_top_.pending_exception_->IsTheHole();
}
void clear_pending_message() {
thread_local_top_.has_pending_message_ = false;
thread_local_top_.pending_message_obj_ = heap_.the_hole_value();
thread_local_top_.pending_message_script_ = NULL;
}
v8::TryCatch* try_catch_handler() {
return thread_local_top_.TryCatchHandler();
}
Address try_catch_handler_address() {
return thread_local_top_.try_catch_handler_address();
}
bool* external_caught_exception_address() {
return &thread_local_top_.external_caught_exception_;
}
v8::TryCatch* catcher() {
return thread_local_top_.catcher_;
}
void set_catcher(v8::TryCatch* catcher) {
thread_local_top_.catcher_ = catcher;
}
MaybeObject** scheduled_exception_address() {
return &thread_local_top_.scheduled_exception_;
}
Address pending_message_obj_address() {
return reinterpret_cast<Address>(&thread_local_top_.pending_message_obj_);
}
Address has_pending_message_address() {
return reinterpret_cast<Address>(&thread_local_top_.has_pending_message_);
}
Address pending_message_script_address() {
return reinterpret_cast<Address>(
&thread_local_top_.pending_message_script_);
}
MaybeObject* scheduled_exception() {
ASSERT(has_scheduled_exception());
return thread_local_top_.scheduled_exception_;
}
bool has_scheduled_exception() {
return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
}
void clear_scheduled_exception() {
thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
}
bool IsExternallyCaught();
bool is_catchable_by_javascript(MaybeObject* exception) {
return (exception != Failure::OutOfMemoryException()) &&
(exception != heap()->termination_exception());
}
// Serializer.
void PushToPartialSnapshotCache(Object* obj);
// JS execution stack (see frames.h).
static Address c_entry_fp(ThreadLocalTop* thread) {
return thread->c_entry_fp_;
}
static Address handler(ThreadLocalTop* thread) { return thread->handler_; }
inline Address* c_entry_fp_address() {
return &thread_local_top_.c_entry_fp_;
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
// Bottom JS entry (see StackTracer::Trace in log.cc).
static Address js_entry_sp(ThreadLocalTop* thread) {
return thread->js_entry_sp_;
}
inline Address* js_entry_sp_address() {
return &thread_local_top_.js_entry_sp_;
}
// Generated code scratch locations.
void* formal_count_address() { return &thread_local_top_.formal_count_; }
// Returns the global object of the current context. It could be
// a builtin object, or a JS global object.
Handle<GlobalObject> global_object() {
return Handle<GlobalObject>(context()->global_object());
}
// Returns the global proxy object of the current context.
Object* global_proxy() {
return context()->global_proxy();
}
Handle<JSBuiltinsObject> js_builtins_object() {
return Handle<JSBuiltinsObject>(thread_local_top_.context_->builtins());
}
static int ArchiveSpacePerThread() { return sizeof(ThreadLocalTop); }
void FreeThreadResources() { thread_local_top_.Free(); }
// This method is called by the api after operations that may throw
// exceptions. If an exception was thrown and not handled by an external
// handler the exception is scheduled to be rethrown when we return to running
// JavaScript code. If an exception is scheduled true is returned.
bool OptionalRescheduleException(bool is_bottom_call);
class ExceptionScope {
public:
explicit ExceptionScope(Isolate* isolate) :
// Scope currently can only be used for regular exceptions, not
// failures like OOM or termination exception.
isolate_(isolate),
pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()),
catcher_(isolate_->catcher())
{ }
~ExceptionScope() {
isolate_->set_catcher(catcher_);
isolate_->set_pending_exception(*pending_exception_);
}
private:
Isolate* isolate_;
Handle<Object> pending_exception_;
v8::TryCatch* catcher_;
};
void SetCaptureStackTraceForUncaughtExceptions(
bool capture,
int frame_limit,
StackTrace::StackTraceOptions options);
// Tells whether the current context has experienced an out of memory
// exception.
bool is_out_of_memory();
bool ignore_out_of_memory() {
return thread_local_top_.ignore_out_of_memory_;
}
void set_ignore_out_of_memory(bool value) {
thread_local_top_.ignore_out_of_memory_ = value;
}
void PrintCurrentStackTrace(FILE* out);
void PrintStackTrace(FILE* out, char* thread_data);
void PrintStack(StringStream* accumulator);
void PrintStack();
Handle<String> StackTraceString();
NO_INLINE(void PushStackTraceAndDie(unsigned int magic,
Object* object,
Map* map,
unsigned int magic2));
Handle<JSArray> CaptureCurrentStackTrace(
int frame_limit,
StackTrace::StackTraceOptions options);
void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
// set.
bool MayNamedAccess(JSObject* receiver,
Object* key,
v8::AccessType type);
bool MayIndexedAccess(JSObject* receiver,
uint32_t index,
v8::AccessType type);
void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
void ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type);
// Exception throwing support. The caller should use the result
// of Throw() as its return value.
Failure* Throw(Object* exception, MessageLocation* location = NULL);
// Re-throw an exception. This involves no error reporting since
// error reporting was handled when the exception was thrown
// originally.
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
Failure* PromoteScheduledException();
void DoThrow(Object* exception, MessageLocation* location);
// Checks if exception should be reported and finds out if it's
// caught externally.
bool ShouldReportException(bool* can_be_caught_externally,
bool catchable_by_javascript);
// Attempts to compute the current source location, storing the
// result in the target out parameter.
void ComputeLocation(MessageLocation* target);
// Override command line flag.
void TraceException(bool flag);
// Out of resource exception helpers.
Failure* StackOverflow();
Failure* TerminateExecution();
// Administration
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
// Returns the current native and global context.
Handle<Context> native_context();
Handle<Context> global_context();
// Returns the native context of the calling JavaScript code. That
// is, the native context of the top-most JavaScript frame.
Handle<Context> GetCallingNativeContext();
void RegisterTryCatchHandler(v8::TryCatch* that);
void UnregisterTryCatchHandler(v8::TryCatch* that);
char* ArchiveThread(char* to);
char* RestoreThread(char* from);
static const char* const kStackOverflowMessage;
static const int kUC16AlphabetSize = 256; // See StringSearchBase.
static const int kBMMaxShift = 250; // See StringSearchBase.
// Accessors.
#define GLOBAL_ACCESSOR(type, name, initialvalue) \
inline type name() const { \
ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
return name##_; \
} \
inline void set_##name(type value) { \
ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
name##_ = value; \
}
ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
#undef GLOBAL_ACCESSOR
#define GLOBAL_ARRAY_ACCESSOR(type, name, length) \
inline type* name() { \
ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_); \
return &(name##_)[0]; \
}
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
#undef GLOBAL_ARRAY_ACCESSOR
#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \
Handle<type> name() { \
return Handle<type>(context()->native_context()->name()); \
}
NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
#undef NATIVE_CONTEXT_FIELD_ACCESSOR
Bootstrapper* bootstrapper() { return bootstrapper_; }
Counters* counters() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
ASSERT(counters_ != NULL);
return counters_;
}
CodeRange* code_range() { return code_range_; }
RuntimeProfiler* runtime_profiler() { return runtime_profiler_; }
CompilationCache* compilation_cache() { return compilation_cache_; }
Logger* logger() {
// Call InitializeLoggingAndCounters() if logging is needed before
// the isolate is fully initialized.
ASSERT(logger_ != NULL);
return logger_;
}
StackGuard* stack_guard() { return &stack_guard_; }
Heap* heap() { return &heap_; }
StatsTable* stats_table();
StubCache* stub_cache() { return stub_cache_; }
DeoptimizerData* deoptimizer_data() { return deoptimizer_data_; }
ThreadLocalTop* thread_local_top() { return &thread_local_top_; }
TranscendentalCache* transcendental_cache() const {
return transcendental_cache_;
}
MemoryAllocator* memory_allocator() {
return memory_allocator_;
}
KeyedLookupCache* keyed_lookup_cache() {
return keyed_lookup_cache_;
}
ContextSlotCache* context_slot_cache() {
return context_slot_cache_;
}
DescriptorLookupCache* descriptor_lookup_cache() {
return descriptor_lookup_cache_;
}
v8::ImplementationUtilities::HandleScopeData* handle_scope_data() {
return &handle_scope_data_;
}
HandleScopeImplementer* handle_scope_implementer() {
ASSERT(handle_scope_implementer_);
return handle_scope_implementer_;
}
Zone* runtime_zone() { return &runtime_zone_; }
UnicodeCache* unicode_cache() {
return unicode_cache_;
}
InnerPointerToCodeCache* inner_pointer_to_code_cache() {
return inner_pointer_to_code_cache_;
}
StringInputBuffer* write_input_buffer() { return write_input_buffer_; }
GlobalHandles* global_handles() { return global_handles_; }
ThreadManager* thread_manager() { return thread_manager_; }
ContextSwitcher* context_switcher() { return context_switcher_; }
void set_context_switcher(ContextSwitcher* switcher) {
context_switcher_ = switcher;
}
StringTracker* string_tracker() { return string_tracker_; }
unibrow::Mapping<unibrow::Ecma262UnCanonicalize>* jsregexp_uncanonicalize() {
return &jsregexp_uncanonicalize_;
}
unibrow::Mapping<unibrow::CanonicalizationRange>* jsregexp_canonrange() {
return &jsregexp_canonrange_;
}
StringInputBuffer* objects_string_compare_buffer_a() {
return &objects_string_compare_buffer_a_;
}
StringInputBuffer* objects_string_compare_buffer_b() {
return &objects_string_compare_buffer_b_;
}
StaticResource<StringInputBuffer>* objects_string_input_buffer() {
return &objects_string_input_buffer_;
}
RuntimeState* runtime_state() { return &runtime_state_; }
void set_fp_stubs_generated(bool value) {
fp_stubs_generated_ = value;
}
bool fp_stubs_generated() { return fp_stubs_generated_; }
StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
return &compiler_safe_string_input_buffer_;
}
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
has_installed_extensions_ = true;
}
bool has_installed_extensions() { return has_installed_extensions_; }
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
regexp_macro_assembler_canonicalize() {
return ®exp_macro_assembler_canonicalize_;
}
RegExpStack* regexp_stack() { return regexp_stack_; }
unibrow::Mapping<unibrow::Ecma262Canonicalize>*
interp_canonicalize_mapping() {
return &interp_canonicalize_mapping_;
}
void* PreallocatedStorageNew(size_t size);
void PreallocatedStorageDelete(void* p);
void PreallocatedStorageInit(size_t size);
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger() {
if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
return debugger_;
}
Debug* debug() {
if (!NoBarrier_Load(&debugger_initialized_)) InitializeDebugger();
return debug_;
}
#endif
inline bool IsDebuggerActive();
inline bool DebuggerHasBreakPoints();
#ifdef DEBUG
HistogramInfo* heap_histograms() { return heap_histograms_; }
JSObject::SpillInformation* js_spill_information() {
return &js_spill_information_;
}
int* code_kind_statistics() { return code_kind_statistics_; }
#endif
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized() { return simulator_initialized_; }
void set_simulator_initialized(bool initialized) {
simulator_initialized_ = initialized;
}
HashMap* simulator_i_cache() { return simulator_i_cache_; }
void set_simulator_i_cache(HashMap* hash_map) {
simulator_i_cache_ = hash_map;
}
Redirection* simulator_redirection() {
return simulator_redirection_;
}
void set_simulator_redirection(Redirection* redirection) {
simulator_redirection_ = redirection;
}
#endif
Factory* factory() { return reinterpret_cast<Factory*>(this); }
static const int kJSRegexpStaticOffsetsVectorSize = 128;
Address external_callback() {
return thread_local_top_.external_callback_;
}
void set_external_callback(Address callback) {
thread_local_top_.external_callback_ = callback;
}
StateTag current_vm_state() {
return thread_local_top_.current_vm_state_;
}
void SetCurrentVMState(StateTag state) {
if (RuntimeProfiler::IsEnabled()) {
// Make sure thread local top is initialized.
ASSERT(thread_local_top_.isolate_ == this);
StateTag current_state = thread_local_top_.current_vm_state_;
if (current_state != JS && state == JS) {
// Non-JS -> JS transition.
RuntimeProfiler::IsolateEnteredJS(this);
} else if (current_state == JS && state != JS) {
// JS -> non-JS transition.
ASSERT(RuntimeProfiler::IsSomeIsolateInJS());
RuntimeProfiler::IsolateExitedJS(this);
} else {
// Other types of state transitions are not interesting to the
// runtime profiler, because they don't affect whether we're
// in JS or not.
ASSERT((current_state == JS) == (state == JS));
}
}
thread_local_top_.current_vm_state_ = state;
}
void SetData(void* data) { embedder_data_ = data; }
void* GetData() { return embedder_data_; }
LookupResult* top_lookup_result() {
return thread_local_top_.top_lookup_result_;
}
void SetTopLookupResult(LookupResult* top) {
thread_local_top_.top_lookup_result_ = top;
}
bool context_exit_happened() {
return context_exit_happened_;
}
void set_context_exit_happened(bool context_exit_happened) {
context_exit_happened_ = context_exit_happened;
}
double time_millis_since_init() {
return OS::TimeCurrentMillis() - time_millis_at_init_;
}
DateCache* date_cache() {
return date_cache_;
}
void set_date_cache(DateCache* date_cache) {
if (date_cache != date_cache_) {
delete date_cache_;
}
date_cache_ = date_cache;
}
void IterateDeferredHandles(ObjectVisitor* visitor);
void LinkDeferredHandles(DeferredHandles* deferred_handles);
void UnlinkDeferredHandles(DeferredHandles* deferred_handles);
OptimizingCompilerThread* optimizing_compiler_thread() {
return &optimizing_compiler_thread_;
}
private:
Isolate();
friend struct GlobalState;
friend struct InitializeGlobalState;
enum State {
UNINITIALIZED, // Some components may not have been allocated.
INITIALIZED // All components are fully initialized.
};
// These fields are accessed through the API, offsets must be kept in sync
// with v8::internal::Internals (in include/v8.h) constants. This is also
// verified in Isolate::Init() using runtime checks.
State state_; // Will be padded to kApiPointerSize.
void* embedder_data_;
Heap heap_;
// The per-process lock should be acquired before the ThreadDataTable is
// modified.
class ThreadDataTable {
public:
ThreadDataTable();
~ThreadDataTable();
PerIsolateThreadData* Lookup(Isolate* isolate, ThreadId thread_id);
void Insert(PerIsolateThreadData* data);
void Remove(Isolate* isolate, ThreadId thread_id);
void Remove(PerIsolateThreadData* data);
void RemoveAllThreads(Isolate* isolate);
private:
PerIsolateThreadData* list_;
};
// These items form a stack synchronously with threads Enter'ing and Exit'ing
// the Isolate. The top of the stack points to a thread which is currently
// running the Isolate. When the stack is empty, the Isolate is considered
// not entered by any thread and can be Disposed.
// If the same thread enters the Isolate more then once, the entry_count_
// is incremented rather then a new item pushed to the stack.
class EntryStackItem {
public:
EntryStackItem(PerIsolateThreadData* previous_thread_data,
Isolate* previous_isolate,
EntryStackItem* previous_item)
: entry_count(1),
previous_thread_data(previous_thread_data),
previous_isolate(previous_isolate),
previous_item(previous_item) { }
int entry_count;
PerIsolateThreadData* previous_thread_data;
Isolate* previous_isolate;
EntryStackItem* previous_item;
private:
DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
};
// This mutex protects highest_thread_id_, thread_data_table_ and
// default_isolate_.
static Mutex* process_wide_mutex_;
static Thread::LocalStorageKey per_isolate_thread_data_key_;
static Thread::LocalStorageKey isolate_key_;
static Thread::LocalStorageKey thread_id_key_;
static Isolate* default_isolate_;
static ThreadDataTable* thread_data_table_;
void Deinit();
static void SetIsolateThreadLocals(Isolate* isolate,
PerIsolateThreadData* data);
// Allocate and insert PerIsolateThreadData into the ThreadDataTable
// (regardless of whether such data already exists).
PerIsolateThreadData* AllocatePerIsolateThreadData(ThreadId thread_id);
// Find the PerThread for this particular (isolate, thread) combination.
// If one does not yet exist, allocate a new one.
PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread();
// PreInits and returns a default isolate. Needed when a new thread tries
// to create a Locker for the first time (the lock itself is in the isolate).
static Isolate* GetDefaultIsolateForLocking();
// Initializes the current thread to run this Isolate.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
void Enter();
// Exits the current thread. The previosuly entered Isolate is restored
// for the thread.
// Not thread-safe. Multiple threads should not Enter/Exit the same isolate
// at the same time, this should be prevented using external locking.
void Exit();
void PreallocatedMemoryThreadStart();
void PreallocatedMemoryThreadStop();
void InitializeThreadLocal();
void PrintStackTrace(FILE* out, ThreadLocalTop* thread);
void MarkCompactPrologue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void MarkCompactEpilogue(bool is_compacting,
ThreadLocalTop* archived_thread_data);
void FillCache();
void PropagatePendingExceptionToExternalTryCatch();
void InitializeDebugger();
// Traverse prototype chain to find out whether the object is derived from
// the Error object.
bool IsErrorObject(Handle<Object> obj);
EntryStackItem* entry_stack_;
int stack_trace_nesting_level_;
StringStream* incomplete_message_;
// The preallocated memory thread singleton.
PreallocatedMemoryThread* preallocated_memory_thread_;
Address isolate_addresses_[kIsolateAddressCount + 1]; // NOLINT
NoAllocationStringAllocator* preallocated_message_space_;
Bootstrapper* bootstrapper_;
RuntimeProfiler* runtime_profiler_;
CompilationCache* compilation_cache_;
Counters* counters_;
CodeRange* code_range_;
Mutex* break_access_;
Atomic32 debugger_initialized_;
Mutex* debugger_access_;
Logger* logger_;
StackGuard stack_guard_;
StatsTable* stats_table_;
StubCache* stub_cache_;
DeoptimizerData* deoptimizer_data_;
ThreadLocalTop thread_local_top_;
bool capture_stack_trace_for_uncaught_exceptions_;
int stack_trace_for_uncaught_exceptions_frame_limit_;
StackTrace::StackTraceOptions stack_trace_for_uncaught_exceptions_options_;
TranscendentalCache* transcendental_cache_;
MemoryAllocator* memory_allocator_;
KeyedLookupCache* keyed_lookup_cache_;
ContextSlotCache* context_slot_cache_;
DescriptorLookupCache* descriptor_lookup_cache_;
v8::ImplementationUtilities::HandleScopeData handle_scope_data_;
HandleScopeImplementer* handle_scope_implementer_;
UnicodeCache* unicode_cache_;
Zone runtime_zone_;
PreallocatedStorage in_use_list_;
PreallocatedStorage free_list_;
bool preallocated_storage_preallocated_;
InnerPointerToCodeCache* inner_pointer_to_code_cache_;
StringInputBuffer* write_input_buffer_;
GlobalHandles* global_handles_;
ContextSwitcher* context_switcher_;
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_;
unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_;
StringInputBuffer objects_string_compare_buffer_a_;
StringInputBuffer objects_string_compare_buffer_b_;
StaticResource<StringInputBuffer> objects_string_input_buffer_;
unibrow::Mapping<unibrow::Ecma262Canonicalize>
regexp_macro_assembler_canonicalize_;
RegExpStack* regexp_stack_;
DateCache* date_cache_;
unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
// The garbage collector should be a little more aggressive when it knows
// that a context was recently exited.
bool context_exit_happened_;
// Time stamp at initialization.
double time_millis_at_init_;
#if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \
defined(V8_TARGET_ARCH_MIPS) && !defined(__mips__)
bool simulator_initialized_;
HashMap* simulator_i_cache_;
Redirection* simulator_redirection_;
#endif
#ifdef DEBUG
// A static array of histogram info for each type.
HistogramInfo heap_histograms_[LAST_TYPE + 1];
JSObject::SpillInformation js_spill_information_;
int code_kind_statistics_[Code::NUMBER_OF_KINDS];
#endif
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger* debugger_;
Debug* debug_;
#endif
#define GLOBAL_BACKING_STORE(type, name, initialvalue) \
type name##_;
ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
#undef GLOBAL_BACKING_STORE
#define GLOBAL_ARRAY_BACKING_STORE(type, name, length) \
type name##_[length];
ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_BACKING_STORE)
#undef GLOBAL_ARRAY_BACKING_STORE
#ifdef DEBUG
// This class is huge and has a number of fields controlled by
// preprocessor defines. Make sure the offsets of these fields agree
// between compilation units.
#define ISOLATE_FIELD_OFFSET(type, name, ignored) \
static const intptr_t name##_debug_offset_;
ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET)
ISOLATE_INIT_ARRAY_LIST(ISOLATE_FIELD_OFFSET)
#undef ISOLATE_FIELD_OFFSET
#endif
DeferredHandles* deferred_handles_head_;
OptimizingCompilerThread optimizing_compiler_thread_;
friend class ExecutionAccess;
friend class HandleScopeImplementer;
friend class IsolateInitializer;
friend class OptimizingCompilerThread;
friend class ThreadManager;
friend class Simulator;
friend class StackGuard;
friend class ThreadId;
friend class TestMemoryAllocatorScope;
friend class v8::Isolate;
friend class v8::Locker;
friend class v8::Unlocker;
DISALLOW_COPY_AND_ASSIGN(Isolate);
};
// If the GCC version is 4.1.x or 4.2.x an additional field is added to the
// class as a work around for a bug in the generated code found with these
// versions of GCC. See V8 issue 122 for details.
class SaveContext BASE_EMBEDDED {
public:
inline explicit SaveContext(Isolate* isolate);
~SaveContext() {
if (context_.is_null()) {
Isolate* isolate = Isolate::Current();
isolate->set_context(NULL);
isolate->set_save_context(prev_);
} else {
Isolate* isolate = context_->GetIsolate();
isolate->set_context(*context_);
isolate->set_save_context(prev_);
}
}
Handle<Context> context() { return context_; }
SaveContext* prev() { return prev_; }
// Returns true if this save context is below a given JavaScript frame.
bool IsBelowFrame(JavaScriptFrame* frame) {
return (c_entry_fp_ == 0) || (c_entry_fp_ > frame->sp());
}
private:
Handle<Context> context_;
#if __GNUC_VERSION__ >= 40100 && __GNUC_VERSION__ < 40300
Handle<Context> dummy_;
#endif
SaveContext* prev_;
Address c_entry_fp_;
};
class AssertNoContextChange BASE_EMBEDDED {
#ifdef DEBUG
public:
AssertNoContextChange() :
scope_(Isolate::Current()),
context_(Isolate::Current()->context(), Isolate::Current()) {
}
~AssertNoContextChange() {
ASSERT(Isolate::Current()->context() == *context_);
}
private:
HandleScope scope_;
Handle<Context> context_;
#else
public:
AssertNoContextChange() { }
#endif
};
class ExecutionAccess BASE_EMBEDDED {
public:
explicit ExecutionAccess(Isolate* isolate) : isolate_(isolate) {
Lock(isolate);
}
~ExecutionAccess() { Unlock(isolate_); }
static void Lock(Isolate* isolate) { isolate->break_access_->Lock(); }
static void Unlock(Isolate* isolate) { isolate->break_access_->Unlock(); }
static bool TryLock(Isolate* isolate) {
return isolate->break_access_->TryLock();
}
private:
Isolate* isolate_;
};
// Support for checking for stack-overflows in C++ code.
class StackLimitCheck BASE_EMBEDDED {
public:
explicit StackLimitCheck(Isolate* isolate) : isolate_(isolate) { }
inline bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
return reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit();
}
private:
Isolate* isolate_;
};
// Support for temporarily postponing interrupts. When the outermost
// postpone scope is left the interrupts will be re-enabled and any
// interrupts that occurred while in the scope will be taken into
// account.
class PostponeInterruptsScope BASE_EMBEDDED {
public:
explicit PostponeInterruptsScope(Isolate* isolate)
: stack_guard_(isolate->stack_guard()) {
stack_guard_->thread_local_.postpone_interrupts_nesting_++;
stack_guard_->DisableInterrupts();
}
~PostponeInterruptsScope() {
if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
stack_guard_->EnableInterrupts();
}
}
private:
StackGuard* stack_guard_;
};
// Temporary macros for accessing current isolate and its subobjects.
// They provide better readability, especially when used a lot in the code.
#define HEAP (v8::internal::Isolate::Current()->heap())
#define FACTORY (v8::internal::Isolate::Current()->factory())
#define ISOLATE (v8::internal::Isolate::Current())
#define LOGGER (v8::internal::Isolate::Current()->logger())
// Tells whether the native context is marked with out of memory.
inline bool Context::has_out_of_memory() {
return native_context()->out_of_memory()->IsTrue();
}
// Mark the native context with out of memory.
inline void Context::mark_out_of_memory() {
native_context()->set_out_of_memory(HEAP->true_value());
}
} } // namespace v8::internal
#endif // V8_ISOLATE_H_
|
2245_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* xdelta3 - delta compression tools and library
* Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
* 2009, 2010, 2011, 2012, 2013 Joshua P. MacDonald
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* This is all the extra stuff you need for convenience to users in a
* command line application. It contains these major components:
*
* 1. VCDIFF tools 2. external compression support (this is
* POSIX-specific). 3. a general read/write loop that handles all of
* the Xdelta decode/encode/VCDIFF-print functions 4. command-line
* interpreter 5. an Xdelta application header which stores default
* filename, external compression settings 6. output/error printing
* 7. basic file support and OS interface
*/
/* TODO list: 1. do exact gzip-like filename, stdout handling. make a
* .vcdiff extension, refuse to encode to stdout without -cf, etc.
* 2. Allow the user to add a comment string to the app header without
* disturbing the default behavior.
*/
/* On error handling and printing:
*
* The xdelta library sets stream->msg to indicate what condition
* caused an internal failure, but many failures originate here and
* are printed here. The return convention is 0 for success, as
* throughout Xdelta code, but special attention is required here for
* the operating system calls with different error handling. See the
* main_file_* routines. All errors in this file have a message
* printed at the time of occurance. Since some of these calls occur
* within calls to the library, the error may end up being printed
* again with a more general error message.
*/
/*********************************************************************/
#ifndef XD3_POSIX
#define XD3_POSIX 0
#endif
#ifndef XD3_STDIO
#define XD3_STDIO 0
#endif
#ifndef XD3_WIN32
#define XD3_WIN32 0
#endif
#ifndef NOT_MAIN
#define NOT_MAIN 0
#endif
/* Combines xd3_strerror() and strerror() */
const char* xd3_mainerror(int err_num);
#include "xdelta3-internal.h"
int
xsnprintf_func (char *str, int n, const char *fmt, ...)
{
va_list a;
int ret;
va_start (a, fmt);
ret = vsnprintf_func (str, n, fmt, a);
va_end (a);
if (ret < 0)
{
ret = n;
}
return ret;
}
/* If none are set, default to posix. */
#if (XD3_POSIX + XD3_STDIO + XD3_WIN32) == 0
#undef XD3_POSIX
#define XD3_POSIX 1
#endif
/* Handle externally-compressed inputs. */
#ifndef EXTERNAL_COMPRESSION
#define EXTERNAL_COMPRESSION 1
#endif
#define PRINTHDR_SPECIAL -4378291
/* The number of soft-config variables. */
#define XD3_SOFTCFG_VARCNT 7
/* this is used as in XPR(NT XD3_LIB_ERRMSG (stream, ret)) to print an
* error message from the library. */
#define XD3_LIB_ERRMSG(stream, ret) "%s: %s\n", \
xd3_errstring (stream), xd3_mainerror (ret)
#if XD3_POSIX
#include <unistd.h> /* close, read, write... */
#include <sys/types.h>
#include <fcntl.h>
#endif
#ifndef _WIN32
#include <unistd.h> /* lots */
#include <sys/time.h> /* gettimeofday() */
#include <sys/stat.h> /* stat() and fstat() */
#else
#if defined(_MSC_VER)
#define strtoll _strtoi64
#endif
#include <sys/types.h>
#include <sys/stat.h>
#ifndef WIFEXITED
# define WIFEXITED(stat) (((*((int *) &(stat))) & 0xff) == 0)
#endif
#ifndef WEXITSTATUS
# define WEXITSTATUS(stat) (((*((int *) &(stat))) >> 8) & 0xff)
#endif
#ifndef S_ISREG
//# ifdef S_IFREG
//# define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
//# else
# define S_ISREG(m) 1
//# endif
#endif /* !S_ISREG */
// For standard input/output handles
static STARTUPINFO winStartupInfo;
#endif
/**********************************************************************
ENUMS and TYPES
*********************************************************************/
/* These flags (mainly pertaining to main_read() operations) are set
* in the main_file->flags variable. All are related to with external
* decompression support.
*
* RD_FIRST causes the external decompression check when the input is
* first read.
*
* RD_NONEXTERNAL disables external decompression for reading a
* compressed input, in the case of Xdelta inputs. Note: Xdelta is
* supported as an external compression type, which makes is the
* reason for this flag. An example to justify this is: to create a
* delta between two files that are VCDIFF-compressed. Two external
* Xdelta decoders are run to supply decompressed source and target
* inputs to the Xdelta encoder. */
typedef enum
{
RD_FIRST = (1 << 0),
RD_NONEXTERNAL = (1 << 1),
RD_DECOMPSET = (1 << 2),
RD_MAININPUT = (1 << 3),
} xd3_read_flags;
/* Main commands. For example, CMD_PRINTHDR is the "xdelta printhdr"
* command. */
typedef enum
{
CMD_NONE = 0,
CMD_PRINTHDR,
CMD_PRINTHDRS,
CMD_PRINTDELTA,
CMD_RECODE,
CMD_MERGE_ARG,
CMD_MERGE,
#if XD3_ENCODER
CMD_ENCODE,
#endif
CMD_DECODE,
CMD_TEST,
CMD_CONFIG,
} xd3_cmd;
#if XD3_ENCODER
#define CMD_DEFAULT CMD_ENCODE
#define IS_ENCODE(cmd) (cmd == CMD_ENCODE)
#else
#define CMD_DEFAULT CMD_DECODE
#define IS_ENCODE(cmd) (0)
#endif
typedef struct _main_merge main_merge;
typedef struct _main_merge_list main_merge_list;
/* Various strings and magic values used to detect and call external
* compression. See below for examples. */
struct _main_extcomp
{
const char *recomp_cmdname;
const char *recomp_options;
const char *decomp_cmdname;
const char *decomp_options;
const char *ident;
const char *magic;
usize_t magic_size;
int flags;
};
/* Merge state: */
struct _main_merge_list
{
main_merge_list *next;
main_merge_list *prev;
};
struct _main_merge
{
const char *filename;
main_merge_list link;
};
XD3_MAKELIST(main_merge_list,main_merge,link);
/* TODO: really need to put options in a struct so that internal
* callers can easily reset state. */
#define DEFAULT_VERBOSE 0
/* Program options: various command line flags and options. */
static int option_stdout = 0;
static int option_force = 0;
static int option_verbose = DEFAULT_VERBOSE;
static int option_quiet = 0;
static int option_use_appheader = 1;
static uint8_t* option_appheader = NULL;
static int option_use_secondary = 0;
static const char* option_secondary = NULL;
static int option_use_checksum = 1;
static int option_use_altcodetable = 0;
static const char* option_smatch_config = NULL;
static int option_no_compress = 0;
static int option_no_output = 0; /* do not write output */
static const char *option_source_filename = NULL;
static int option_level = XD3_DEFAULT_LEVEL;
static usize_t option_iopt_size = XD3_DEFAULT_IOPT_SIZE;
static usize_t option_winsize = XD3_DEFAULT_WINSIZE;
/* Note: option_srcwinsz is restricted from [16Kb, 4Gb], because
* addresses in the large hash checksum are 32 bits. The flag is read
* as xoff_t, so that 4Gb != 0. */
static xoff_t option_srcwinsz = XD3_DEFAULT_SRCWINSZ;
static usize_t option_sprevsz = XD3_DEFAULT_SPREVSZ;
/* These variables are supressed to avoid their use w/o support. main() warns
* appropriately when external compression is not enabled. */
#if EXTERNAL_COMPRESSION
static int num_subprocs = 0;
static int option_force2 = 0;
static int option_decompress_inputs = 1;
static int option_recompress_outputs = 1;
#endif
/* This is for comparing "printdelta" output without attention to
* copy-instruction modes. */
#if VCDIFF_TOOLS
static int option_print_cpymode = 1; /* Note: see reset_defaults(). */
#endif
/* Static variables */
IF_DEBUG(static int main_mallocs = 0;)
static char* program_name = NULL;
static uint8_t* appheader_used = NULL;
static uint8_t* main_bdata = NULL;
static usize_t main_bsize = 0;
/* Hacks for VCDIFF tools, recode command. */
static int allow_fake_source = 0;
/* recode_stream is used by both recode/merge for reading vcdiff inputs */
static xd3_stream *recode_stream = NULL;
/* merge_stream is used by merge commands for storing the source encoding */
static xd3_stream *merge_stream = NULL;
/* This array of compressor types is compiled even if EXTERNAL_COMPRESSION is
* false just so the program knows the mapping of IDENT->NAME. */
static main_extcomp extcomp_types[] =
{
{ "bzip2", "-c", "bzip2", "-dc", "B", "BZh", 3, 0 },
{ "gzip", "-c", "gzip", "-dc", "G", "\037\213", 2, 0 },
{ "compress", "-c", "uncompress", "-c", "Z", "\037\235", 2, 0 },
/* Xz is lzma with a magic number http://tukaani.org/xz/format.html */
{ "xz", "-c", "xz", "-dc", "Y", "\xfd\x37\x7a\x58\x5a\x00", 2, 0 },
};
static int main_input (xd3_cmd cmd, main_file *ifile,
main_file *ofile, main_file *sfile);
static void main_get_appheader (xd3_stream *stream, main_file *ifile,
main_file *output, main_file *sfile);
static int main_getblk_func (xd3_stream *stream,
xd3_source *source,
xoff_t blkno);
static void main_free (void *ptr);
static void* main_malloc (size_t size);
static int main_file_stat (main_file *xfile, xoff_t *size);
static int main_file_seek (main_file *xfile, xoff_t pos);
static int main_read_primary_input (main_file *file,
uint8_t *buf,
size_t size,
size_t *nread);
static const char* main_format_bcnt (xoff_t r, shortbuf *buf);
static int main_help (void);
#if XD3_ENCODER
static int xd3_merge_input_output (xd3_stream *stream,
xd3_whole_state *source);
#endif
/* The code in xdelta3-blk.h is essentially part of this unit, see
* comments there. */
#include "xdelta3-blkcache.h"
void (*xprintf_message_func)(const char*msg) = NULL;
void
xprintf (const char *fmt, ...)
{
char buf[1000];
va_list a;
int size;
va_start (a, fmt);
size = vsnprintf_func (buf, 1000, fmt, a);
va_end (a);
if (size < 0)
{
size = sizeof(buf) - 1;
buf[size] = 0;
}
if (xprintf_message_func != NULL) {
xprintf_message_func(buf);
} else {
size_t ignore = fwrite(buf, 1, size, stderr);
(void) ignore;
}
}
static int
main_version (void)
{
/* $Format: " XPR(NTR \"Xdelta version $Xdelta3Version$, Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, Joshua MacDonald\\n\");" $ */
XPR(NTR "Xdelta version 3.0.8, Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Joshua MacDonald\n");
XPR(NTR "Xdelta comes with ABSOLUTELY NO WARRANTY.\n");
XPR(NTR "This is free software, and you are welcome to redistribute it\n");
XPR(NTR "under certain conditions; see \"COPYING\" for details.\n");
return EXIT_SUCCESS;
}
static int
main_config (void)
{
main_version ();
XPR(NTR "EXTERNAL_COMPRESSION=%d\n", EXTERNAL_COMPRESSION);
XPR(NTR "GENERIC_ENCODE_TABLES=%d\n", GENERIC_ENCODE_TABLES);
XPR(NTR "GENERIC_ENCODE_TABLES_COMPUTE=%d\n", GENERIC_ENCODE_TABLES_COMPUTE);
XPR(NTR "REGRESSION_TEST=%d\n", REGRESSION_TEST);
XPR(NTR "SECONDARY_DJW=%d\n", SECONDARY_DJW);
XPR(NTR "SECONDARY_FGK=%d\n", SECONDARY_FGK);
XPR(NTR "SECONDARY_LZMA=%d\n", SECONDARY_LZMA);
XPR(NTR "UNALIGNED_OK=%d\n", UNALIGNED_OK);
XPR(NTR "VCDIFF_TOOLS=%d\n", VCDIFF_TOOLS);
XPR(NTR "XD3_ALLOCSIZE=%d\n", XD3_ALLOCSIZE);
XPR(NTR "XD3_DEBUG=%d\n", XD3_DEBUG);
XPR(NTR "XD3_ENCODER=%d\n", XD3_ENCODER);
XPR(NTR "XD3_POSIX=%d\n", XD3_POSIX);
XPR(NTR "XD3_STDIO=%d\n", XD3_STDIO);
XPR(NTR "XD3_WIN32=%d\n", XD3_WIN32);
XPR(NTR "XD3_USE_LARGEFILE64=%d\n", XD3_USE_LARGEFILE64);
XPR(NTR "XD3_DEFAULT_LEVEL=%d\n", XD3_DEFAULT_LEVEL);
XPR(NTR "XD3_DEFAULT_IOPT_SIZE=%d\n", XD3_DEFAULT_IOPT_SIZE);
XPR(NTR "XD3_DEFAULT_SPREVSZ=%d\n", XD3_DEFAULT_SPREVSZ);
XPR(NTR "XD3_DEFAULT_SRCWINSZ=%d\n", XD3_DEFAULT_SRCWINSZ);
XPR(NTR "XD3_DEFAULT_WINSIZE=%d\n", XD3_DEFAULT_WINSIZE);
XPR(NTR "XD3_HARDMAXWINSIZE=%d\n", XD3_HARDMAXWINSIZE);
XPR(NTR "sizeof(void*)=%d\n", (int)sizeof(void*));
XPR(NTR "sizeof(int)=%d\n", (int)sizeof(int));
XPR(NTR "sizeof(size_t)=%d\n", (int)sizeof(size_t));
XPR(NTR "sizeof(uint32_t)=%d\n", (int)sizeof(uint32_t));
XPR(NTR "sizeof(uint64_t)=%d\n", (int)sizeof(uint64_t));
XPR(NTR "sizeof(usize_t)=%d\n", (int)sizeof(usize_t));
XPR(NTR "sizeof(xoff_t)=%d\n", (int)sizeof(xoff_t));
return EXIT_SUCCESS;
}
static void
reset_defaults(void)
{
option_stdout = 0;
option_force = 0;
option_verbose = DEFAULT_VERBOSE;
option_quiet = 0;
option_appheader = NULL;
option_use_secondary = 0;
option_secondary = NULL;
option_use_altcodetable = 0;
option_smatch_config = NULL;
option_no_compress = 0;
option_no_output = 0;
option_source_filename = NULL;
program_name = NULL;
appheader_used = NULL;
main_bdata = NULL;
main_bsize = 0;
allow_fake_source = 0;
option_smatch_config = NULL;
main_lru_reset();
option_use_appheader = 1;
option_use_checksum = 1;
#if EXTERNAL_COMPRESSION
option_force2 = 0;
option_decompress_inputs = 1;
option_recompress_outputs = 1;
num_subprocs = 0;
#endif
#if VCDIFF_TOOLS
option_print_cpymode = 1;
#endif
option_level = XD3_DEFAULT_LEVEL;
option_iopt_size = XD3_DEFAULT_IOPT_SIZE;
option_winsize = XD3_DEFAULT_WINSIZE;
option_srcwinsz = XD3_DEFAULT_SRCWINSZ;
option_sprevsz = XD3_DEFAULT_SPREVSZ;
}
static void*
main_malloc1 (size_t size)
{
void* r = malloc (size);
if (r == NULL) { XPR(NT "malloc: %s\n", xd3_mainerror (ENOMEM)); }
return r;
}
void* main_bufalloc (size_t size) {
#if XD3_WIN32
return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
return main_malloc1(size);
#endif
}
static void*
main_malloc (size_t size)
{
void *r = main_malloc1 (size);
if (r) { IF_DEBUG (main_mallocs += 1); }
return r;
}
static void*
main_alloc (void *opaque,
size_t items,
usize_t size)
{
return main_malloc1 (items * size);
}
static void
main_free1 (void *opaque, void *ptr)
{
free (ptr);
}
static void
main_free (void *ptr)
{
if (ptr)
{
IF_DEBUG (main_mallocs -= 1);
main_free1 (NULL, ptr);
IF_DEBUG (XD3_ASSERT(main_mallocs >= 0));
}
}
void main_buffree (void *ptr) {
#if XD3_WIN32
VirtualFree(ptr, 0, MEM_RELEASE);
#else
main_free1(NULL, ptr);
#endif
}
/* This ensures that (ret = errno) always indicates failure, in case errno was
* accidentally not set. If this prints there's a bug somewhere. */
static int
get_errno (void)
{
#ifndef _WIN32
if (errno == 0)
{
XPR(NT "you found a bug: expected errno != 0\n");
errno = XD3_INTERNAL;
}
return errno;
#else
DWORD err_num = GetLastError();
if (err_num == NO_ERROR)
{
err_num = XD3_INTERNAL;
}
return err_num;
#endif
}
const char*
xd3_mainerror(int err_num) {
#ifndef _WIN32
const char* x = xd3_strerror (err_num);
if (x != NULL)
{
return x;
}
return strerror(err_num);
#else
static char err_buf[256];
const char* x = xd3_strerror (err_num);
if (x != NULL)
{
return x;
}
memset (err_buf, 0, 256);
FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, err_num,
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
err_buf, 256, NULL);
if (err_buf[0] != 0 && err_buf[strlen(err_buf) - 1] == '\n')
{
err_buf[strlen(err_buf) - 1] = 0;
}
return err_buf;
#endif
}
static long
get_millisecs_now (void)
{
#ifndef _WIN32
struct timeval tv;
gettimeofday (& tv, NULL);
return (tv.tv_sec) * 1000L + (tv.tv_usec) / 1000;
#else
SYSTEMTIME st;
FILETIME ft;
__int64 *pi = (__int64*)&ft;
GetLocalTime(&st);
SystemTimeToFileTime(&st, &ft);
return (long)((*pi) / 10000);
#endif
}
/* Always >= 1 millisec, right? */
static long
get_millisecs_since (void)
{
static long last = 0;
long now = get_millisecs_now();
long diff = now - last;
last = now;
return diff;
}
static const char*
main_format_bcnt (xoff_t r, shortbuf *buf)
{
static const char* fmts[] = { "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB" };
usize_t i;
for (i = 0; i < SIZEOF_ARRAY(fmts) - 1; i += 1)
{
xoff_t new_r;
if (r == 0)
{
short_sprintf (*buf, "0 %s", fmts[i]);
return buf->buf;
}
if (r >= 1 && r < 10)
{
short_sprintf (*buf, "%.2f %s", (double) r, fmts[i]);
return buf->buf;
}
if (r >= 10 && r < 100)
{
short_sprintf (*buf, "%.1f %s", (double) r, fmts[i]);
return buf->buf;
}
if (r >= 100 && r < 1000)
{
short_sprintf (*buf, "%"Q"u %s", r, fmts[i]);
return buf->buf;
}
new_r = r / 1024;
if (new_r < 10)
{
short_sprintf (*buf, "%.2f %s", (double) r / 1024.0, fmts[i + 1]);
return buf->buf;
}
if (new_r < 100)
{
short_sprintf (*buf, "%.1f %s", (double) r / 1024.0, fmts[i + 1]);
return buf->buf;
}
r = new_r;
}
XD3_ASSERT (0);
return "";
}
static char*
main_format_rate (xoff_t bytes, long millis, shortbuf *buf)
{
xoff_t r = (xoff_t)(1.0 * bytes / (1.0 * millis / 1000.0));
static shortbuf lbuf;
main_format_bcnt (r, &lbuf);
short_sprintf (*buf, "%s/s", lbuf.buf);
return buf->buf;
}
static char*
main_format_millis (long millis, shortbuf *buf)
{
if (millis < 1000)
{
short_sprintf (*buf, "%lu ms", millis);
}
else if (millis < 10000)
{
short_sprintf (*buf, "%.1f sec", millis / 1000.0);
}
else
{
short_sprintf (*buf, "%lu sec", millis / 1000L);
}
return buf->buf;
}
/* A safe version of strtol for xoff_t. */
static int
main_strtoxoff (const char* s, xoff_t *xo, char which)
{
char *e;
xoff_t x;
XD3_ASSERT(s && *s != 0);
{
/* Should check LONG_MIN, LONG_MAX, LLONG_MIN, LLONG_MAX? */
#if SIZEOF_XOFF_T == 4
long xx = strtol (s, &e, 0);
#else
long long xx = strtoll (s, &e, 0);
#endif
if (xx < 0)
{
XPR(NT "-%c: negative integer: %s\n", which, s);
return EXIT_FAILURE;
}
x = xx;
}
if (*e != 0)
{
XPR(NT "-%c: invalid integer: %s\n", which, s);
return EXIT_FAILURE;
}
(*xo) = x;
return 0;
}
static int
main_atoux (const char* arg, xoff_t *xo, xoff_t low,
xoff_t high, char which)
{
xoff_t x;
int ret;
if ((ret = main_strtoxoff (arg, & x, which))) { return ret; }
if (x < low)
{
XPR(NT "-%c: minimum value: %"Q"u\n", which, low);
return EXIT_FAILURE;
}
if (high != 0 && x > high)
{
XPR(NT "-%c: maximum value: %"Q"u\n", which, high);
return EXIT_FAILURE;
}
(*xo) = x;
return 0;
}
static int
main_atou (const char* arg, usize_t *uo, usize_t low,
usize_t high, char which)
{
int ret;
xoff_t xo;
if ((ret = main_atoux (arg, &xo, low, high, which)))
{
return ret;
}
*uo = (usize_t)xo;
return 0;
}
/******************************************************************
FILE BASICS
******************************************************************/
/* With all the variation in file system-call semantics, arguments,
* return values and error-handling for the POSIX and STDIO file APIs,
* the insides of these functions make me sick, which is why these
* wrappers exist. */
#define XOPEN_OPNAME (xfile->mode == XO_READ ? "read" : "write")
#define XOPEN_STDIO (xfile->mode == XO_READ ? "rb" : "wb")
#define XOPEN_POSIX (xfile->mode == XO_READ ? \
O_RDONLY : O_WRONLY | O_CREAT | O_TRUNC)
#define XOPEN_MODE (xfile->mode == XO_READ ? 0 : 0666)
#define XF_ERROR(op, name, ret) \
do { if (!option_quiet) { XPR(NT "file %s failed: %s: %s: %s\n", (op), \
XOPEN_OPNAME, (name), xd3_mainerror (ret)); } } while (0)
#if XD3_STDIO
#define XFNO(f) fileno(f->file)
#define XSTDOUT_XF(f) { (f)->file = stdout; (f)->filename = "/dev/stdout"; }
#define XSTDIN_XF(f) { (f)->file = stdin; (f)->filename = "/dev/stdin"; }
#elif XD3_POSIX
#define XFNO(f) f->file
#define XSTDOUT_XF(f) \
{ (f)->file = STDOUT_FILENO; (f)->filename = "/dev/stdout"; }
#define XSTDIN_XF(f) \
{ (f)->file = STDIN_FILENO; (f)->filename = "/dev/stdin"; }
#elif XD3_WIN32
#define XFNO(f) -1
#define XSTDOUT_XF(f) { \
(f)->file = GetStdHandle(STD_OUTPUT_HANDLE); \
(f)->filename = "(stdout)"; \
}
#define XSTDIN_XF(f) { \
(f)->file = GetStdHandle(STD_INPUT_HANDLE); \
(f)->filename = "(stdin)"; \
}
#endif
void
main_file_init (main_file *xfile)
{
memset (xfile, 0, sizeof (*xfile));
#if XD3_POSIX
xfile->file = -1;
#endif
#if XD3_WIN32
xfile->file = INVALID_HANDLE_VALUE;
#endif
}
int
main_file_isopen (main_file *xfile)
{
#if XD3_STDIO
return xfile->file != NULL;
#elif XD3_POSIX
return xfile->file != -1;
#elif XD3_WIN32
return xfile->file != INVALID_HANDLE_VALUE;
#endif
}
int
main_file_close (main_file *xfile)
{
int ret = 0;
if (! main_file_isopen (xfile))
{
return 0;
}
#if XD3_STDIO
ret = fclose (xfile->file);
xfile->file = NULL;
#elif XD3_POSIX
ret = close (xfile->file);
xfile->file = -1;
#elif XD3_WIN32
if (!CloseHandle(xfile->file)) {
ret = get_errno ();
}
xfile->file = INVALID_HANDLE_VALUE;
#endif
if (ret != 0) { XF_ERROR ("close", xfile->filename, ret = get_errno ()); }
return ret;
}
void
main_file_cleanup (main_file *xfile)
{
XD3_ASSERT (xfile != NULL);
if (main_file_isopen (xfile))
{
main_file_close (xfile);
}
if (xfile->snprintf_buf != NULL)
{
main_free(xfile->snprintf_buf);
xfile->snprintf_buf = NULL;
}
if (xfile->filename_copy != NULL)
{
main_free(xfile->filename_copy);
xfile->filename_copy = NULL;
}
}
int
main_file_open (main_file *xfile, const char* name, int mode)
{
int ret = 0;
xfile->mode = mode;
XD3_ASSERT (name != NULL);
XD3_ASSERT (! main_file_isopen (xfile));
if (name[0] == 0)
{
XPR(NT "invalid file name: empty string\n");
return XD3_INVALID;
}
#if XD3_STDIO
xfile->file = fopen (name, XOPEN_STDIO);
ret = (xfile->file == NULL) ? get_errno () : 0;
#elif XD3_POSIX
/* TODO: Should retry this call if interrupted, similar to read/write */
if ((ret = open (name, XOPEN_POSIX, XOPEN_MODE)) < 0)
{
ret = get_errno ();
}
else
{
xfile->file = ret;
ret = 0;
}
#elif XD3_WIN32
xfile->file = CreateFile(name,
(mode == XO_READ) ? GENERIC_READ : GENERIC_WRITE,
FILE_SHARE_READ,
NULL,
(mode == XO_READ) ?
OPEN_EXISTING :
(option_force ? CREATE_ALWAYS : CREATE_NEW),
FILE_ATTRIBUTE_NORMAL,
NULL);
if (xfile->file == INVALID_HANDLE_VALUE)
{
ret = get_errno ();
}
#endif
if (ret) { XF_ERROR ("open", name, ret); }
else { xfile->realname = name; xfile->nread = 0; }
return ret;
}
int
main_file_stat (main_file *xfile, xoff_t *size)
{
int ret = 0;
#if XD3_WIN32
if (GetFileType(xfile->file) != FILE_TYPE_DISK)
{
return -1;
}
# if (_WIN32_WINNT >= 0x0500)
{
LARGE_INTEGER li;
if (GetFileSizeEx(xfile->file, &li) == 0)
{
return get_errno ();
}
*size = li.QuadPart;
}
# else
{
DWORD filesize = GetFileSize(xfile->file, NULL);
if (filesize == INVALID_FILE_SIZE)
{
return get_errno ()
}
*size = filesize;
}
# endif
#else
struct stat sbuf;
if (fstat (XFNO (xfile), & sbuf) < 0)
{
ret = get_errno ();
return ret;
}
if (! S_ISREG (sbuf.st_mode))
{
return ESPIPE;
}
(*size) = sbuf.st_size;
#endif
return ret;
}
int
main_file_exists (main_file *xfile)
{
struct stat sbuf;
return stat (xfile->filename, & sbuf) == 0 && S_ISREG (sbuf.st_mode);
}
#if (XD3_POSIX || EXTERNAL_COMPRESSION)
/* POSIX-generic code takes a function pointer to read() or write().
* This calls the function repeatedly until the buffer is full or EOF.
* The NREAD parameter is not set for write, NULL is passed. Return
* is signed, < 0 indicate errors, otherwise byte count. */
typedef int (xd3_posix_func) (int fd, uint8_t *buf, usize_t size);
static int
xd3_posix_io (int fd, uint8_t *buf, size_t size,
xd3_posix_func *func, size_t *nread)
{
int ret;
size_t nproc = 0;
while (nproc < size)
{
size_t tryread = min(size - nproc, 1U << 30);
ssize_t result = (*func) (fd, buf + nproc, tryread);
if (result < 0)
{
ret = get_errno ();
if (ret != EAGAIN && ret != EINTR)
{
return ret;
}
continue;
}
if (nread != NULL && result == 0) { break; }
nproc += result;
}
if (nread != NULL) { (*nread) = nproc; }
return 0;
}
#endif
#if XD3_WIN32
static int
xd3_win32_io (HANDLE file, uint8_t *buf, size_t size,
int is_read, size_t *nread)
{
int ret = 0;
size_t nproc = 0;
while (nproc < size)
{
DWORD nproc2 = 0; /* hmm */
DWORD nremain = size - nproc;
if ((is_read ?
ReadFile (file, buf + nproc, nremain, &nproc2, NULL) :
WriteFile (file, buf + nproc, nremain, &nproc2, NULL)) == 0)
{
ret = get_errno();
if (ret != ERROR_HANDLE_EOF && ret != ERROR_BROKEN_PIPE)
{
return ret;
}
/* By falling through here, we'll break this loop in the
* read case in case of eof or broken pipe. */
}
nproc += nproc2;
if (nread != NULL && nproc2 == 0) { break; }
}
if (nread != NULL) { (*nread) = nproc; }
return 0;
}
#endif
/* POSIX is unbuffered, while STDIO is buffered. main_file_read()
* should always be called on blocks. */
int
main_file_read (main_file *ifile,
uint8_t *buf,
size_t size,
size_t *nread,
const char *msg)
{
int ret = 0;
#if XD3_STDIO
size_t result;
result = fread (buf, 1, size, ifile->file);
if (result < size && ferror (ifile->file))
{
ret = get_errno ();
}
else
{
*nread = result;
}
#elif XD3_POSIX
ret = xd3_posix_io (ifile->file, buf, size, (xd3_posix_func*) &read, nread);
#elif XD3_WIN32
ret = xd3_win32_io (ifile->file, buf, size, 1 /* is_read */, nread);
#endif
if (ret)
{
XPR(NT "%s: %s: %s\n", msg, ifile->filename, xd3_mainerror (ret));
}
else
{
if (option_verbose > 4) { XPR(NT "read %s: %zu bytes\n",
ifile->filename, (*nread)); }
ifile->nread += (*nread);
}
return ret;
}
int
main_file_write (main_file *ofile, uint8_t *buf, usize_t size, const char *msg)
{
int ret = 0;
#if XD3_STDIO
usize_t result;
result = fwrite (buf, 1, size, ofile->file);
if (result != size) { ret = get_errno (); }
#elif XD3_POSIX
ret = xd3_posix_io (ofile->file, buf, size, (xd3_posix_func*) &write, NULL);
#elif XD3_WIN32
ret = xd3_win32_io (ofile->file, buf, size, 0, NULL);
#endif
if (ret)
{
XPR(NT "%s: %s: %s\n", msg, ofile->filename, xd3_mainerror (ret));
}
else
{
if (option_verbose > 5) { XPR(NT "write %s: %u bytes\n",
ofile->filename, size); }
ofile->nwrite += size;
}
return ret;
}
static int
main_file_seek (main_file *xfile, xoff_t pos)
{
int ret = 0;
#if XD3_STDIO
if (fseek (xfile->file, pos, SEEK_SET) != 0) { ret = get_errno (); }
#elif XD3_POSIX
if ((xoff_t) lseek (xfile->file, pos, SEEK_SET) != pos)
{ ret = get_errno (); }
#elif XD3_WIN32
# if (_WIN32_WINNT >= 0x0500)
LARGE_INTEGER move, out;
move.QuadPart = pos;
if (SetFilePointerEx(xfile->file, move, &out, FILE_BEGIN) == 0)
{
ret = get_errno ();
}
# else
if (SetFilePointer(xfile->file, (LONG)pos, NULL, FILE_BEGIN) ==
INVALID_SET_FILE_POINTER)
{
ret = get_errno ();
}
# endif
#endif
return ret;
}
/* This function simply writes the stream output buffer, if there is
* any, for encode, decode and recode commands. (The VCDIFF tools use
* main_print_func()). */
static int
main_write_output (xd3_stream* stream, main_file *ofile)
{
int ret;
if (option_no_output)
{
return 0;
}
if (stream->avail_out > 0 &&
(ret = main_file_write (ofile, stream->next_out,
stream->avail_out, "write failed")))
{
return ret;
}
return 0;
}
static int
main_set_secondary_flags (xd3_config *config)
{
int ret;
if (option_use_secondary)
{
/* The default secondary compressor is DJW, if it's compiled. */
if (option_secondary == NULL)
{
if (SECONDARY_DJW)
{
config->flags |= XD3_SEC_DJW;
}
}
else
{
if (strcmp (option_secondary, "fgk") == 0 && SECONDARY_FGK)
{
config->flags |= XD3_SEC_FGK;
}
else if (strcmp (option_secondary, "lzma") == 0 && SECONDARY_LZMA)
{
config->flags |= XD3_SEC_LZMA;
}
else if (strncmp (option_secondary, "djw", 3) == 0 && SECONDARY_DJW)
{
usize_t level = XD3_DEFAULT_SECONDARY_LEVEL;
config->flags |= XD3_SEC_DJW;
if (strlen (option_secondary) > 3 &&
(ret = main_atou (option_secondary + 3,
&level,
0, 9, 'S')) != 0 &&
!option_quiet)
{
return XD3_INVALID;
}
/* XD3_SEC_NOXXXX flags disable secondary compression on
* a per-section basis. For djw, ngroups=1 indicates
* minimum work, ngroups=0 uses default settings, which
* is > 1 groups by default. */
if (level < 1) { config->flags |= XD3_SEC_NODATA; }
if (level < 7) { config->sec_data.ngroups = 1; }
else { config->sec_data.ngroups = 0; }
if (level < 3) { config->flags |= XD3_SEC_NOINST; }
if (level < 8) { config->sec_inst.ngroups = 1; }
else { config->sec_inst.ngroups = 0; }
if (level < 5) { config->flags |= XD3_SEC_NOADDR; }
if (level < 9) { config->sec_addr.ngroups = 1; }
else { config->sec_addr.ngroups = 0; }
}
else if (strcmp (option_secondary, "none") == 0 && SECONDARY_DJW)
{
/* No secondary */
}
else
{
if (!option_quiet)
{
XPR(NT "unrecognized secondary compressor type: %s\n",
option_secondary);
return XD3_INVALID;
}
}
}
}
return 0;
}
/******************************************************************
VCDIFF TOOLS
*****************************************************************/
#if VCDIFF_TOOLS
#include "xdelta3-merge.h"
/* The following macros let VCDIFF print using main_file_write(),
* for example:
*
* VC(UT "trying to be portable: %d\n", x)VE;
*/
#define SNPRINTF_BUFSIZE 1024
#define VC do { if (((ret = xsnprintf_func
#define UT (char*)xfile->snprintf_buf, SNPRINTF_BUFSIZE,
#define VE ) >= SNPRINTF_BUFSIZE \
&& (ret = main_print_overflow(ret)) != 0) \
|| (ret = main_file_write(xfile, xfile->snprintf_buf, \
(usize_t)ret, "print")) != 0) \
{ return ret; } } while (0)
static int
main_print_overflow (int x)
{
XPR(NT "internal print buffer overflow: %d bytes\n", x);
return XD3_INTERNAL;
}
/* This function prints a single VCDIFF window. */
static int
main_print_window (xd3_stream* stream, main_file *xfile)
{
int ret;
usize_t size = 0;
VC(UT " Offset Code Type1 Size1 @Addr1 + Type2 Size2 @Addr2\n")VE;
while (stream->inst_sect.buf < stream->inst_sect.buf_max)
{
usize_t code = stream->inst_sect.buf[0];
const uint8_t *addr_before = stream->addr_sect.buf;
const uint8_t *inst_before = stream->inst_sect.buf;
usize_t addr_bytes;
usize_t inst_bytes;
usize_t size_before = size;
if ((ret = xd3_decode_instruction (stream)))
{
XPR(NT "instruction decode error at %"Q"u: %s\n",
stream->dec_winstart + size, stream->msg);
return ret;
}
addr_bytes = (usize_t)(stream->addr_sect.buf - addr_before);
inst_bytes = (usize_t)(stream->inst_sect.buf - inst_before);
VC(UT " %06"Q"u %03u %s %6u", stream->dec_winstart + size,
option_print_cpymode ? code : 0,
xd3_rtype_to_string ((xd3_rtype) stream->dec_current1.type,
option_print_cpymode),
stream->dec_current1.size)VE;
if (stream->dec_current1.type != XD3_NOOP)
{
if (stream->dec_current1.type >= XD3_CPY)
{
if (stream->dec_current1.addr >= stream->dec_cpylen)
{
VC(UT " T@%-6u",
stream->dec_current1.addr - stream->dec_cpylen)VE;
}
else
{
VC(UT " S@%-6"Q"u",
stream->dec_cpyoff + stream->dec_current1.addr)VE;
}
}
else
{
VC(UT " ")VE;
}
size += stream->dec_current1.size;
}
if (stream->dec_current2.type != XD3_NOOP)
{
VC(UT " %s %6u",
xd3_rtype_to_string ((xd3_rtype) stream->dec_current2.type,
option_print_cpymode),
stream->dec_current2.size)VE;
if (stream->dec_current2.type >= XD3_CPY)
{
if (stream->dec_current2.addr >= stream->dec_cpylen)
{
VC(UT " T@%-6u",
stream->dec_current2.addr - stream->dec_cpylen)VE;
}
else
{
VC(UT " S@%-6"Q"u",
stream->dec_cpyoff + stream->dec_current2.addr)VE;
}
}
size += stream->dec_current2.size;
}
VC(UT "\n")VE;
if (option_verbose &&
addr_bytes + inst_bytes >= (size - size_before) &&
(stream->dec_current1.type >= XD3_CPY ||
stream->dec_current2.type >= XD3_CPY))
{
VC(UT " %06"Q"u (inefficiency) %u encoded as %u bytes\n",
stream->dec_winstart + size_before,
size - size_before,
addr_bytes + inst_bytes)VE;
}
}
if (stream->dec_tgtlen != size && (stream->flags & XD3_SKIP_WINDOW) == 0)
{
XPR(NT "target window size inconsistency");
return XD3_INTERNAL;
}
if (stream->dec_position != stream->dec_maxpos)
{
XPR(NT "target window position inconsistency");
return XD3_INTERNAL;
}
if (stream->addr_sect.buf != stream->addr_sect.buf_max)
{
XPR(NT "address section inconsistency");
return XD3_INTERNAL;
}
return 0;
}
static int
main_print_vcdiff_file (main_file *xfile, main_file *file, const char *type)
{
int ret; /* Used by above macros */
if (file->filename)
{
VC(UT "XDELTA filename (%s): %s\n", type,
file->filename)VE;
}
if (file->compressor)
{
VC(UT "XDELTA ext comp (%s): %s\n", type,
file->compressor->recomp_cmdname)VE;
}
return 0;
}
/* This function prints a VCDIFF input, mainly for debugging purposes. */
static int
main_print_func (xd3_stream* stream, main_file *xfile)
{
int ret;
if (option_no_output)
{
return 0;
}
if (xfile->snprintf_buf == NULL)
{
if ((xfile->snprintf_buf =
(uint8_t*)main_malloc(SNPRINTF_BUFSIZE)) == NULL)
{
return ENOMEM;
}
}
if (stream->dec_winstart == 0)
{
VC(UT "VCDIFF version: 0\n")VE;
VC(UT "VCDIFF header size: %d\n",
stream->dec_hdrsize)VE;
VC(UT "VCDIFF header indicator: ")VE;
if ((stream->dec_hdr_ind & VCD_SECONDARY) != 0)
VC(UT "VCD_SECONDARY ")VE;
if ((stream->dec_hdr_ind & VCD_CODETABLE) != 0)
VC(UT "VCD_CODETABLE ")VE;
if ((stream->dec_hdr_ind & VCD_APPHEADER) != 0)
VC(UT "VCD_APPHEADER ")VE;
if (stream->dec_hdr_ind == 0)
VC(UT "none")VE;
VC(UT "\n")VE;
IF_SEC(VC(UT "VCDIFF secondary compressor: %s\n",
stream->sec_type ? stream->sec_type->name : "none")VE);
IF_NSEC(VC(UT "VCDIFF secondary compressor: unsupported\n")VE);
if (stream->dec_hdr_ind & VCD_APPHEADER)
{
uint8_t *apphead;
usize_t appheadsz;
ret = xd3_get_appheader (stream, & apphead, & appheadsz);
if (ret == 0 && appheadsz > 0)
{
int sq = option_quiet;
main_file i, o, s;
XD3_ASSERT (apphead != NULL);
VC(UT "VCDIFF application header: ")VE;
if ((ret = main_file_write (xfile, apphead,
appheadsz, "print")) != 0)
{ return ret; }
VC(UT "\n")VE;
main_file_init (& i);
main_file_init (& o);
main_file_init (& s);
option_quiet = 1;
main_get_appheader (stream, &i, & o, & s);
option_quiet = sq;
if ((ret = main_print_vcdiff_file (xfile, & o, "output")))
{ return ret; }
if ((ret = main_print_vcdiff_file (xfile, & s, "source")))
{ return ret; }
main_file_cleanup (& i);
main_file_cleanup (& o);
main_file_cleanup (& s);
}
}
}
else
{
VC(UT "\n")VE;
}
VC(UT "VCDIFF window number: %"Q"u\n", stream->current_window)VE;
VC(UT "VCDIFF window indicator: ")VE;
if ((stream->dec_win_ind & VCD_SOURCE) != 0) VC(UT "VCD_SOURCE ")VE;
if ((stream->dec_win_ind & VCD_TARGET) != 0) VC(UT "VCD_TARGET ")VE;
if ((stream->dec_win_ind & VCD_ADLER32) != 0) VC(UT "VCD_ADLER32 ")VE;
if (stream->dec_win_ind == 0) VC(UT "none")VE;
VC(UT "\n")VE;
if ((stream->dec_win_ind & VCD_ADLER32) != 0)
{
VC(UT "VCDIFF adler32 checksum: %08X\n",
(usize_t)stream->dec_adler32)VE;
}
if (stream->dec_del_ind != 0)
{
VC(UT "VCDIFF delta indicator: ")VE;
if ((stream->dec_del_ind & VCD_DATACOMP) != 0) VC(UT "VCD_DATACOMP ")VE;
if ((stream->dec_del_ind & VCD_INSTCOMP) != 0) VC(UT "VCD_INSTCOMP ")VE;
if ((stream->dec_del_ind & VCD_ADDRCOMP) != 0) VC(UT "VCD_ADDRCOMP ")VE;
if (stream->dec_del_ind == 0) VC(UT "none")VE;
VC(UT "\n")VE;
}
if (stream->dec_winstart != 0)
{
VC(UT "VCDIFF window at offset: %"Q"u\n", stream->dec_winstart)VE;
}
if (SRCORTGT (stream->dec_win_ind))
{
VC(UT "VCDIFF copy window length: %u\n",
(usize_t)stream->dec_cpylen)VE;
VC(UT "VCDIFF copy window offset: %"Q"u\n",
stream->dec_cpyoff)VE;
}
VC(UT "VCDIFF delta encoding length: %u\n",
(usize_t)stream->dec_enclen)VE;
VC(UT "VCDIFF target window length: %u\n",
(usize_t)stream->dec_tgtlen)VE;
VC(UT "VCDIFF data section length: %u\n",
(usize_t)stream->data_sect.size)VE;
VC(UT "VCDIFF inst section length: %u\n",
(usize_t)stream->inst_sect.size)VE;
VC(UT "VCDIFF addr section length: %u\n",
(usize_t)stream->addr_sect.size)VE;
ret = 0;
if ((stream->flags & XD3_JUST_HDR) != 0)
{
/* Print a header -- finished! */
ret = PRINTHDR_SPECIAL;
}
else if ((stream->flags & XD3_SKIP_WINDOW) == 0)
{
ret = main_print_window (stream, xfile);
}
return ret;
}
static int
main_recode_copy (xd3_stream* stream,
xd3_output* output,
xd3_desect* input)
{
int ret;
XD3_ASSERT(output != NULL);
XD3_ASSERT(output->next_page == NULL);
if ((ret = xd3_decode_allocate (recode_stream,
input->size,
&output->base,
&output->avail)))
{
XPR(NT XD3_LIB_ERRMSG (stream, ret));
return ret;
}
memcpy (output->base,
/* Note: decoder advances buf, so get base of buffer with
* buf_max - size */
input->buf_max - input->size,
input->size);
output->next = input->size;
return 0;
}
// Re-encode one window
static int
main_recode_func (xd3_stream* stream, main_file *ofile)
{
int ret;
xd3_source decode_source;
XD3_ASSERT(stream->dec_state == DEC_FINISH);
XD3_ASSERT(recode_stream->enc_state == ENC_INIT ||
recode_stream->enc_state == ENC_INPUT);
// Copy partial decoder output to partial encoder inputs
if ((ret = main_recode_copy (recode_stream,
DATA_HEAD(recode_stream),
&stream->data_sect)) ||
(ret = main_recode_copy (recode_stream,
INST_HEAD(recode_stream),
&stream->inst_sect)) ||
(ret = main_recode_copy (recode_stream,
ADDR_HEAD(recode_stream),
&stream->addr_sect)))
{
return ret;
}
// This jumps to xd3_emit_hdr()
recode_stream->enc_state = ENC_FLUSH;
recode_stream->avail_in = stream->dec_tgtlen;
if (SRCORTGT (stream->dec_win_ind))
{
recode_stream->src = & decode_source;
decode_source.srclen = stream->dec_cpylen;
decode_source.srcbase = stream->dec_cpyoff;
}
if (option_use_checksum &&
(stream->dec_win_ind & VCD_ADLER32) != 0)
{
recode_stream->flags |= XD3_ADLER32_RECODE;
recode_stream->recode_adler32 = stream->dec_adler32;
}
if (option_use_appheader != 0 &&
option_appheader != NULL)
{
xd3_set_appheader (recode_stream, option_appheader,
(usize_t) strlen ((char*) option_appheader));
}
else if (option_use_appheader != 0 &&
option_appheader == NULL)
{
if (stream->dec_appheader != NULL)
{
xd3_set_appheader (recode_stream,
stream->dec_appheader, stream->dec_appheadsz);
}
}
// Output loop
for (;;)
{
switch((ret = xd3_encode_input (recode_stream)))
{
case XD3_INPUT: {
/* finished recoding one window */
stream->total_out = recode_stream->total_out;
return 0;
}
case XD3_OUTPUT: {
/* main_file_write below */
break;
}
case XD3_GOTHEADER:
case XD3_WINSTART:
case XD3_WINFINISH: {
/* ignore */
continue;
}
case XD3_GETSRCBLK:
case 0: {
return XD3_INTERNAL;
}
default:
return ret;
}
if ((ret = main_write_output (recode_stream, ofile)))
{
return ret;
}
xd3_consume_output (recode_stream);
}
}
#endif /* VCDIFF_TOOLS */
/*******************************************************************
VCDIFF merging
******************************************************************/
#if VCDIFF_TOOLS
/* Modifies static state. */
static int
main_init_recode_stream (void)
{
int ret;
int stream_flags = XD3_ADLER32_NOVER | XD3_SKIP_EMIT;
int recode_flags;
xd3_config recode_config;
XD3_ASSERT (recode_stream == NULL);
if ((recode_stream = (xd3_stream*) main_malloc(sizeof(xd3_stream))) == NULL)
{
return ENOMEM;
}
recode_flags = (stream_flags & XD3_SEC_TYPE);
recode_config.alloc = main_alloc;
recode_config.freef = main_free1;
xd3_init_config(&recode_config, recode_flags);
if ((ret = main_set_secondary_flags (&recode_config)) ||
(ret = xd3_config_stream (recode_stream, &recode_config)) ||
(ret = xd3_encode_init_partial (recode_stream)) ||
(ret = xd3_whole_state_init (recode_stream)))
{
XPR(NT XD3_LIB_ERRMSG (recode_stream, ret));
xd3_free_stream (recode_stream);
recode_stream = NULL;
return ret;
}
return 0;
}
/* This processes the sequence of -m arguments. The final input
* is processed as part of the ordinary main_input() loop. */
static int
main_merge_arguments (main_merge_list* merges)
{
int ret = 0;
int count = 0;
main_merge *merge = NULL;
xd3_stream merge_input;
if (main_merge_list_empty (merges))
{
return 0;
}
if ((ret = xd3_config_stream (& merge_input, NULL)) ||
(ret = xd3_whole_state_init (& merge_input)))
{
XPR(NT XD3_LIB_ERRMSG (& merge_input, ret));
return ret;
}
merge = main_merge_list_front (merges);
while (!main_merge_list_end (merges, merge))
{
main_file mfile;
main_file_init (& mfile);
mfile.filename = merge->filename;
mfile.flags = RD_NONEXTERNAL;
if ((ret = main_file_open (& mfile, merge->filename, XO_READ)))
{
goto error;
}
ret = main_input (CMD_MERGE_ARG, & mfile, NULL, NULL);
if (ret == 0)
{
if (count++ == 0)
{
/* The first merge source is the next merge input. */
xd3_swap_whole_state (& recode_stream->whole_target,
& merge_input.whole_target);
}
else
{
/* Merge the recode_stream with merge_input. */
ret = xd3_merge_input_output (recode_stream,
& merge_input.whole_target);
/* Save the next merge source in merge_input. */
xd3_swap_whole_state (& recode_stream->whole_target,
& merge_input.whole_target);
}
}
main_file_cleanup (& mfile);
if (recode_stream != NULL)
{
xd3_free_stream (recode_stream);
main_free (recode_stream);
recode_stream = NULL;
}
if (main_bdata != NULL)
{
main_buffree (main_bdata);
main_bdata = NULL;
main_bsize = 0;
}
if (ret != 0)
{
goto error;
}
merge = main_merge_list_next (merge);
}
XD3_ASSERT (merge_stream == NULL);
if ((merge_stream = (xd3_stream*) main_malloc (sizeof(xd3_stream))) == NULL)
{
ret = ENOMEM;
goto error;
}
if ((ret = xd3_config_stream (merge_stream, NULL)) ||
(ret = xd3_whole_state_init (merge_stream)))
{
XPR(NT XD3_LIB_ERRMSG (& merge_input, ret));
goto error;
}
xd3_swap_whole_state (& merge_stream->whole_target,
& merge_input.whole_target);
ret = 0;
error:
xd3_free_stream (& merge_input);
return ret;
}
/* This processes each window of the final merge input. This routine
* does not output, it buffers the entire delta into memory. */
static int
main_merge_func (xd3_stream* stream, main_file *no_write)
{
int ret;
if ((ret = xd3_whole_append_window (stream)))
{
return ret;
}
return 0;
}
/* This is called after all windows have been read, as a final step in
* main_input(). This is only called for the final merge step. */
static int
main_merge_output (xd3_stream *stream, main_file *ofile)
{
int ret;
usize_t inst_pos = 0;
xoff_t output_pos = 0;
xd3_source recode_source;
usize_t window_num = 0;
int at_least_once = 0;
/* merge_stream is set if there were arguments. this stream's input
* needs to be applied to the merge_stream source. */
if ((merge_stream != NULL) &&
(ret = xd3_merge_input_output (stream,
& merge_stream->whole_target)))
{
XPR(NT XD3_LIB_ERRMSG (stream, ret));
return ret;
}
if (option_use_appheader != 0 &&
option_appheader != NULL)
{
xd3_set_appheader (recode_stream, option_appheader,
(usize_t) strlen ((char*) option_appheader));
}
/* Enter the ENC_INPUT state and bypass the next_in == NULL test
* and (leftover) input buffering logic. */
XD3_ASSERT(recode_stream->enc_state == ENC_INIT);
recode_stream->enc_state = ENC_INPUT;
recode_stream->next_in = main_bdata;
recode_stream->flags |= XD3_FLUSH;
/* This encodes the entire target. */
while (inst_pos < stream->whole_target.instlen || !at_least_once)
{
xoff_t window_start = output_pos;
int window_srcset = 0;
xoff_t window_srcmin = 0;
xoff_t window_srcmax = 0;
usize_t window_pos = 0;
usize_t window_size;
/* at_least_once ensures that we encode at least one window,
* which handles the 0-byte case. */
at_least_once = 1;
XD3_ASSERT (recode_stream->enc_state == ENC_INPUT);
if ((ret = xd3_encode_input (recode_stream)) != XD3_WINSTART)
{
XPR(NT "invalid merge state: %s\n", xd3_mainerror (ret));
return XD3_INVALID;
}
/* Window sizes must match from the input to the output, so that
* target copies are in-range (and so that checksums carry
* over). */
XD3_ASSERT (window_num < stream->whole_target.wininfolen);
window_size = stream->whole_target.wininfo[window_num].length;
/* Output position should also match. */
if (output_pos != stream->whole_target.wininfo[window_num].offset)
{
XPR(NT "internal merge error: offset mismatch\n");
return XD3_INVALID;
}
if (option_use_checksum &&
(stream->dec_win_ind & VCD_ADLER32) != 0)
{
recode_stream->flags |= XD3_ADLER32_RECODE;
recode_stream->recode_adler32 =
stream->whole_target.wininfo[window_num].adler32;
}
window_num++;
if (main_bsize < window_size)
{
main_buffree (main_bdata);
main_bdata = NULL;
main_bsize = 0;
if ((main_bdata = (uint8_t*)
main_bufalloc (window_size)) == NULL)
{
return ENOMEM;
}
main_bsize = window_size;
}
/* This encodes a single target window. */
while (window_pos < window_size &&
inst_pos < stream->whole_target.instlen)
{
xd3_winst *inst = &stream->whole_target.inst[inst_pos];
usize_t take = min(inst->size, window_size - window_pos);
xoff_t addr;
switch (inst->type)
{
case XD3_RUN:
if ((ret = xd3_emit_run (recode_stream, window_pos, take,
&stream->whole_target.adds[inst->addr])))
{
return ret;
}
break;
case XD3_ADD:
/* Adds are implicit, put them into the input buffer. */
memcpy (main_bdata + window_pos,
stream->whole_target.adds + inst->addr, take);
break;
default: /* XD3_COPY + copy mode */
if (inst->mode != 0)
{
if (window_srcset) {
window_srcmin = min(window_srcmin, inst->addr);
window_srcmax = max(window_srcmax, inst->addr + take);
} else {
window_srcset = 1;
window_srcmin = inst->addr;
window_srcmax = inst->addr + take;
}
addr = inst->addr;
}
else
{
XD3_ASSERT (inst->addr >= window_start);
addr = inst->addr - window_start;
}
IF_DEBUG2 (XPR(NTR "[merge copy] winpos %u take %u addr %"Q"u mode %u\n",
window_pos, take, addr, inst->mode));
if ((ret = xd3_found_match (recode_stream, window_pos, take,
addr, inst->mode != 0)))
{
return ret;
}
break;
}
window_pos += take;
output_pos += take;
if (take == inst->size)
{
inst_pos += 1;
}
else
{
/* Modify the instruction for the next pass. */
if (inst->type != XD3_RUN)
{
inst->addr += take;
}
inst->size -= take;
}
}
xd3_avail_input (recode_stream, main_bdata, window_pos);
recode_stream->enc_state = ENC_INSTR;
if (window_srcset) {
recode_stream->srcwin_decided = 1;
recode_stream->src = &recode_source;
recode_source.srclen = (usize_t)(window_srcmax - window_srcmin);
recode_source.srcbase = window_srcmin;
recode_stream->taroff = recode_source.srclen;
XD3_ASSERT (recode_source.srclen != 0);
} else {
recode_stream->srcwin_decided = 0;
recode_stream->src = NULL;
recode_stream->taroff = 0;
}
for (;;)
{
switch ((ret = xd3_encode_input (recode_stream)))
{
case XD3_INPUT: {
goto done_window;
}
case XD3_OUTPUT: {
/* main_file_write below */
break;
}
case XD3_GOTHEADER:
case XD3_WINSTART:
case XD3_WINFINISH: {
/* ignore */
continue;
}
case XD3_GETSRCBLK:
case 0: {
return XD3_INTERNAL;
}
default:
return ret;
}
if ((ret = main_write_output(recode_stream, ofile)))
{
return ret;
}
xd3_consume_output (recode_stream);
}
done_window:
(void) 0;
}
return 0;
}
#endif
/*******************************************************************
Input decompression, output recompression
******************************************************************/
#if EXTERNAL_COMPRESSION
/* This is tricky POSIX-specific code with lots of fork(), pipe(),
* dup(), waitpid(), and exec() business. Most of this code
* originated in PRCS1, which did automatic package-file
* decompression. It works with both XD3_POSIX and XD3_STDIO file
* disciplines.
*
* To automatically detect compressed inputs requires a child process
* to reconstruct the input stream, which was advanced in order to
* detect compression, because it may not be seekable. In other
* words, the main program reads part of the input stream, and if it
* detects a compressed input it then forks a pipe copier process,
* which copies the first-read block out of the main-program's memory,
* then streams the remaining compressed input into the
* input-decompression pipe.
*/
#include <signal.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/wait.h>
/* Remember which pipe FD is which. */
#define PIPE_READ_FD 0
#define PIPE_WRITE_FD 1
#define MAX_SUBPROCS 4 /* max(source + copier + output,
source + copier + input + copier). */
static pid_t ext_subprocs[MAX_SUBPROCS];
/* Like write(), applies to a fd instead of a main_file, for the pipe
* copier subprocess. Does not print an error, to facilitate ignoring
* trailing garbage, see main_pipe_copier(). */
static int
main_pipe_write (int outfd, uint8_t *exist_buf, usize_t remain)
{
int ret;
if ((ret = xd3_posix_io (outfd, exist_buf, remain,
(xd3_posix_func*) &write, NULL)))
{
return ret;
}
return 0;
}
/* A simple error-reporting waitpid interface. */
static int
main_waitpid_check(pid_t pid)
{
int status;
int ret = 0;
if (waitpid (pid, & status, 0) < 0)
{
ret = get_errno ();
XPR(NT "external compression [pid %d] wait: %s\n",
pid, xd3_mainerror (ret));
}
else if (! WIFEXITED (status))
{
// SIGPIPE will be delivered to the child process whenever it
// writes data after this process closes the pipe,
// happens if xdelta does not require access to the entire
// source file. Considered normal.
if (! WIFSIGNALED (status) || WTERMSIG (status) != SIGPIPE)
{
ret = ECHILD;
XPR(NT "external compression [pid %d] signal %d\n", pid,
WIFSIGNALED (status) ? WTERMSIG (status) : WSTOPSIG (status));
}
else if (option_verbose)
{
XPR(NT "external compression sigpipe\n");
}
}
else if (WEXITSTATUS (status) != 0)
{
ret = ECHILD;
if (option_verbose > 1)
{
/* Presumably, the error was printed by the subprocess. */
XPR(NT "external compression [pid %d] exit %d\n",
pid, WEXITSTATUS (status));
}
}
return ret;
}
/* Wait for any existing child processes to check for abnormal exit. */
static int
main_external_compression_finish (void)
{
int i;
int ret;
for (i = 0; i < num_subprocs; i += 1)
{
if (! ext_subprocs[i]) { continue; }
if ((ret = main_waitpid_check (ext_subprocs[i])))
{
return ret;
}
ext_subprocs[i] = 0;
}
return 0;
}
/* Kills any outstanding compression process. */
static void
main_external_compression_cleanup (void)
{
int i;
for (i = 0; i < num_subprocs; i += 1)
{
if (! ext_subprocs[i]) { continue; }
kill (ext_subprocs[i], SIGTERM);
ext_subprocs[i] = 0;
}
}
/* This runs as a forked process of main_input_decompress_setup() to
* copy input to the decompression process. First, the available
* input is copied out of the existing buffer, then the buffer is
* reused to continue reading from the compressed input file. */
static int
main_pipe_copier (uint8_t *pipe_buf,
usize_t pipe_bufsize,
size_t nread,
main_file *ifile,
int outfd)
{
int ret;
xoff_t skipped = 0;
/* Prevent SIGPIPE signals, allow EPIPE return values instead. This
* is safe to comment-out, except that the -F flag will not work
* properly (the parent would need to treat WTERMSIG(status) ==
* SIGPIPE). */
struct sigaction sa;
sa.sa_handler = SIG_IGN;
sigaction (SIGPIPE, &sa, NULL);
for (;;)
{
/* force_drain will be set when option_force and EPIPE cause us
* to skip data. This is reset each time through the loop, so
* the break condition below works. */
int force_drain = 0;
if (nread > 0 && (ret = main_pipe_write (outfd, pipe_buf, nread)))
{
if (ret == EPIPE)
{
/* This causes the loop to continue reading until nread
* == 0. */
skipped += nread;
force_drain = 1;
}
else
{
XPR(NT "pipe write failed: %s\n", xd3_mainerror (ret));
return ret;
}
}
if (nread < pipe_bufsize && !force_drain)
{
break;
}
if ((ret = main_file_read (ifile, pipe_buf, pipe_bufsize,
& nread, "pipe read failed")) < 0)
{
return ret;
}
}
if (option_verbose && skipped != 0)
{
XPR(NT "skipping %"Q"u bytes in %s\n",
skipped, ifile->filename);
}
return 0;
}
/* This function is called after we have read some amount of data from
* the input file and detected a compressed input. Here we start a
* decompression subprocess by forking twice. The first process runs
* the decompression command, the second process copies data to the
* input of the first. */
static int
main_input_decompress_setup (const main_extcomp *decomp,
main_file *ifile,
uint8_t *input_buf,
usize_t input_bufsize,
uint8_t *pipe_buf,
usize_t pipe_bufsize,
usize_t pipe_avail,
size_t *nread)
{
/* The two pipes: input and output file descriptors. */
int outpipefd[2], inpipefd[2];
int input_fd = -1; /* The resulting input_fd (output of decompression). */
pid_t decomp_id, copier_id; /* The two subprocs. */
int ret;
outpipefd[0] = outpipefd[1] = -1;
inpipefd[0] = inpipefd[1] = -1;
if (pipe (outpipefd) || pipe (inpipefd))
{
XPR(NT "pipe failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
if ((decomp_id = fork ()) < 0)
{
XPR(NT "fork failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
/* The first child runs the decompression process: */
if (decomp_id == 0)
{
if (option_verbose > 2)
{
XPR(NT "external decompression pid %d\n", getpid ());
}
/* Setup pipes: write to the outpipe, read from the inpipe. */
if (dup2 (outpipefd[PIPE_WRITE_FD], STDOUT_FILENO) < 0 ||
dup2 (inpipefd[PIPE_READ_FD], STDIN_FILENO) < 0 ||
close (outpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_WRITE_FD]) ||
close (inpipefd[PIPE_READ_FD]) ||
close (inpipefd[PIPE_WRITE_FD]) ||
execlp (decomp->decomp_cmdname, decomp->decomp_cmdname,
decomp->decomp_options,
option_force2 ? "-f" : NULL,
NULL))
{
XPR(NT "child process %s failed to execute: %s\n",
decomp->decomp_cmdname, xd3_mainerror (get_errno ()));
}
_exit (127);
}
XD3_ASSERT(num_subprocs < MAX_SUBPROCS);
ext_subprocs[num_subprocs++] = decomp_id;
if ((copier_id = fork ()) < 0)
{
XPR(NT "fork failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
/* The second child runs the copier process: */
if (copier_id == 0)
{
int exitval = 0;
if (option_verbose > 2)
{
XPR(NT "child pipe-copier pid %d\n", getpid ());
}
if (close (inpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_WRITE_FD]) ||
main_pipe_copier (pipe_buf, pipe_bufsize, pipe_avail,
ifile, inpipefd[PIPE_WRITE_FD]) ||
close (inpipefd[PIPE_WRITE_FD]))
{
XPR(NT "child copier process failed: %s\n",
xd3_mainerror (get_errno ()));
exitval = 1;
}
_exit (exitval);
}
XD3_ASSERT(num_subprocs < MAX_SUBPROCS);
ext_subprocs[num_subprocs++] = copier_id;
/* The parent closes both pipes after duplicating the output of
* compression. */
input_fd = dup (outpipefd[PIPE_READ_FD]);
if (input_fd < 0 ||
main_file_close (ifile) ||
close (outpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_WRITE_FD]) ||
close (inpipefd[PIPE_READ_FD]) ||
close (inpipefd[PIPE_WRITE_FD]))
{
XPR(NT "dup/close failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#if XD3_STDIO
/* Note: fdopen() acquires the fd, closes it when finished. */
if ((ifile->file = fdopen (input_fd, "r")) == NULL)
{
XPR(NT "fdopen failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#elif XD3_POSIX
ifile->file = input_fd;
#endif
ifile->compressor = decomp;
/* Now the input file is decompressed. */
return main_file_read (ifile, input_buf, input_bufsize,
nread, "input decompression failed");
pipe_cleanup:
close (input_fd);
close (outpipefd[PIPE_READ_FD]);
close (outpipefd[PIPE_WRITE_FD]);
close (inpipefd[PIPE_READ_FD]);
close (inpipefd[PIPE_WRITE_FD]);
return ret;
}
/* This routine is called when the first buffer of input data is read
* by the main program (unless input decompression is disabled by
* command-line option). If it recognizes the magic number of a known
* input type it invokes decompression.
*
* Skips decompression if the decompression type or the file type is
* RD_NONEXTERNAL.
*
* Behaves exactly like main_file_read, otherwise.
*
* This function uses a separate buffer to read the first small block
* of input. If a compressed input is detected, the separate buffer
* is passed to the pipe copier. This avoids using the same size
* buffer in both cases. */
static int
main_secondary_decompress_check (main_file *file,
uint8_t *input_buf,
size_t input_size,
size_t *nread)
{
int ret;
usize_t i;
usize_t try_read = min (input_size, XD3_ALLOCSIZE);
size_t check_nread = 0;
uint8_t check_buf[XD3_ALLOCSIZE]; /* TODO: stack limit */
const main_extcomp *decompressor = NULL;
if ((ret = main_file_read (file, check_buf,
try_read,
& check_nread, "input read failed")))
{
return ret;
}
if (file->flags & RD_DECOMPSET)
{
/* This allows the application header to override the magic
* number, for whatever reason. */
decompressor = file->compressor;
}
else
{
for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1)
{
const main_extcomp *decomp = & extcomp_types[i];
if (check_nread > decomp->magic_size)
{
/* The following expr checks if we are trying to read a
* VCDIFF input, in which case do not treat it as
* "secondary" decompression. */
int skip_this_type = (decomp->flags & RD_NONEXTERNAL) &&
(file->flags & RD_NONEXTERNAL);
if (skip_this_type)
{
continue;
}
if (memcmp (check_buf, decomp->magic, decomp->magic_size) == 0)
{
decompressor = decomp;
break;
}
}
}
}
if (decompressor != NULL)
{
if (! option_quiet)
{
XPR(NT "externally compressed input: %s %s%s < %s\n",
decompressor->decomp_cmdname,
decompressor->decomp_options,
(option_force2 ? " -f" : ""),
file->filename);
if (file->flags & RD_MAININPUT)
{
XPR(NT
"WARNING: the encoder is automatically decompressing the input file;\n");
XPR(NT
"WARNING: the decoder will automatically recompress the output file;\n");
XPR(NT
"WARNING: this may result in different compressed data and checksums\n");
XPR(NT
"WARNING: despite being identical data; if this is an issue, use -D\n");
XPR(NT
"WARNING: to avoid decompression and/or use -R to avoid recompression\n");
XPR(NT
"WARNING: and/or manually decompress the input file; if you know the\n");
XPR(NT
"WARNING: compression settings that will produce identical output\n");
XPR(NT
"WARNING: you may set those flags using the environment (e.g., GZIP=-9)\n");
}
}
file->size_known = 0;
return main_input_decompress_setup (decompressor, file,
input_buf, input_size,
check_buf, XD3_ALLOCSIZE,
check_nread, nread);
}
/* Now read the rest of the input block. */
(*nread) = 0;
if (check_nread == try_read)
{
ret = main_file_read (file,
input_buf + try_read,
input_size - try_read,
nread,
"input read failed");
}
memcpy (input_buf, check_buf, check_nread);
(*nread) += check_nread;
return 0;
}
/* Initiate re-compression of the output stream. This is easier than
* input decompression because we know beforehand that the stream will
* be compressed, whereas the input has already been read when we
* decide it should be decompressed. Thus, it only requires one
* subprocess and one pipe. */
static int
main_recompress_output (main_file *ofile)
{
pid_t recomp_id; /* One subproc. */
int pipefd[2]; /* One pipe. */
int output_fd = -1;
int ret;
const main_extcomp *recomp = ofile->compressor;
pipefd[0] = pipefd[1] = -1;
if (pipe (pipefd))
{
XPR(NT "pipe failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
if ((recomp_id = fork ()) < 0)
{
XPR(NT "fork failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
/* The child runs the recompression process: */
if (recomp_id == 0)
{
if (option_verbose > 2)
{
XPR(NT "external recompression pid %d\n", getpid ());
}
/* Setup pipes: write to the output file, read from the pipe. */
if (dup2 (XFNO (ofile), STDOUT_FILENO) < 0 ||
dup2 (pipefd[PIPE_READ_FD], STDIN_FILENO) < 0 ||
close (pipefd[PIPE_READ_FD]) ||
close (pipefd[PIPE_WRITE_FD]) ||
execlp (recomp->recomp_cmdname, recomp->recomp_cmdname,
recomp->recomp_options,
option_force2 ? "-f" : NULL,
NULL))
{
XPR(NT "child process %s failed to execute: %s\n",
recomp->recomp_cmdname, xd3_mainerror (get_errno ()));
}
_exit (127);
}
XD3_ASSERT(num_subprocs < MAX_SUBPROCS);
ext_subprocs[num_subprocs++] = recomp_id;
/* The parent closes both pipes after duplicating the output-fd for
* writing to the compression pipe. */
output_fd = dup (pipefd[PIPE_WRITE_FD]);
if (output_fd < 0 ||
main_file_close (ofile) ||
close (pipefd[PIPE_READ_FD]) ||
close (pipefd[PIPE_WRITE_FD]))
{
XPR(NT "close failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#if XD3_STDIO
/* Note: fdopen() acquires the fd, closes it when finished. */
if ((ofile->file = fdopen (output_fd, "w")) == NULL)
{
XPR(NT "fdopen failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#elif XD3_POSIX
ofile->file = output_fd;
#endif
/* Now the output file will be compressed. */
return 0;
pipe_cleanup:
close (output_fd);
close (pipefd[PIPE_READ_FD]);
close (pipefd[PIPE_WRITE_FD]);
return ret;
}
#endif /* EXTERNAL_COMPRESSION */
/* Identify the compressor that was used based on its ident string,
* which is passed in the application header. */
static const main_extcomp*
main_ident_compressor (const char *ident)
{
usize_t i;
for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1)
{
if (strcmp (extcomp_types[i].ident, ident) == 0)
{
return & extcomp_types[i];
}
}
return NULL;
}
/* Return the main_extcomp record to use for this identifier, if possible. */
static const main_extcomp*
main_get_compressor (const char *ident)
{
const main_extcomp *ext = main_ident_compressor (ident);
if (ext == NULL)
{
if (! option_quiet)
{
XPR(NT "warning: cannot recompress output: "
"unrecognized external compression ID: %s\n", ident);
}
return NULL;
}
else if (! EXTERNAL_COMPRESSION)
{
if (! option_quiet)
{
XPR(NT "warning: external support not compiled: "
"original input was compressed: %s\n", ext->recomp_cmdname);
}
return NULL;
}
else
{
return ext;
}
}
/*********************************************************************
APPLICATION HEADER
*******************************************************************/
#if XD3_ENCODER
static const char*
main_apphead_string (const char* x)
{
const char *y;
if (x == NULL) { return ""; }
if (strcmp (x, "/dev/stdin") == 0 ||
strcmp (x, "/dev/stdout") == 0 ||
strcmp (x, "/dev/stderr") == 0) { return "-"; }
// TODO: this is not portable
return (y = strrchr (x, '/')) == NULL ? x : y + 1;
}
static int
main_set_appheader (xd3_stream *stream, main_file *input, main_file *sfile)
{
/* The user may disable the application header. Once the appheader
* is set, this disables setting it again. */
if (appheader_used || ! option_use_appheader) { return 0; }
/* The user may specify the application header, otherwise format the
default header. */
if (option_appheader)
{
appheader_used = option_appheader;
}
else
{
const char *iname;
const char *icomp;
const char *sname;
const char *scomp;
usize_t len;
iname = main_apphead_string (input->filename);
icomp = (input->compressor == NULL) ? "" : input->compressor->ident;
len = (usize_t) strlen (iname) + (usize_t) strlen (icomp) + 2;
if (sfile->filename != NULL)
{
sname = main_apphead_string (sfile->filename);
scomp = (sfile->compressor == NULL) ? "" : sfile->compressor->ident;
len += (usize_t) strlen (sname) + (usize_t) strlen (scomp) + 2;
}
else
{
sname = scomp = "";
}
if ((appheader_used = (uint8_t*) main_malloc (len)) == NULL)
{
return ENOMEM;
}
if (sfile->filename == NULL)
{
snprintf_func ((char*)appheader_used, len, "%s/%s", iname, icomp);
}
else
{
snprintf_func ((char*)appheader_used, len, "%s/%s/%s/%s",
iname, icomp, sname, scomp);
}
}
xd3_set_appheader (stream, appheader_used,
(usize_t) strlen ((char*)appheader_used));
return 0;
}
#endif
static void
main_get_appheader_params (main_file *file, char **parsed,
int output, const char *type,
main_file *other)
{
/* Set the filename if it was not specified. If output, option_stdout (-c)
* overrides. */
if (file->filename == NULL &&
! (output && option_stdout) &&
strcmp (parsed[0], "-") != 0)
{
file->filename = parsed[0];
if (other->filename != NULL) {
/* Take directory from the other file, if it has one. */
/* TODO: This results in nonsense names like /dev/foo.tar.gz
* and probably the filename-default logic interferes with
* multi-file operation and the standard file extension?
* Possibly the name header is bad, should be off by default.
* Possibly we just want to remember external/compression
* settings. */
const char *last_slash = strrchr(other->filename, '/');
if (last_slash != NULL) {
usize_t dlen = (usize_t) (last_slash - other->filename);
XD3_ASSERT(file->filename_copy == NULL);
file->filename_copy =
(char*) main_malloc(dlen + 2 + (usize_t) strlen(file->filename));
strncpy(file->filename_copy, other->filename, dlen);
file->filename_copy[dlen] = '/';
strcpy(file->filename_copy + dlen + 1, parsed[0]);
file->filename = file->filename_copy;
}
}
if (! option_quiet)
{
XPR(NT "using default %s filename: %s\n", type, file->filename);
}
}
/* Set the compressor, initiate de/recompression later. */
if (file->compressor == NULL && *parsed[1] != 0)
{
file->flags |= RD_DECOMPSET;
file->compressor = main_get_compressor (parsed[1]);
}
}
static void
main_get_appheader (xd3_stream *stream, main_file *ifile,
main_file *output, main_file *sfile)
{
uint8_t *apphead;
usize_t appheadsz;
int ret;
/* The user may disable the application header. Once the appheader
* is set, this disables setting it again. */
if (! option_use_appheader) { return; }
ret = xd3_get_appheader (stream, & apphead, & appheadsz);
/* Ignore failure, it only means we haven't received a header yet. */
if (ret != 0) { return; }
if (appheadsz > 0)
{
char *start = (char*)apphead;
char *slash;
int place = 0;
char *parsed[4];
memset (parsed, 0, sizeof (parsed));
while ((slash = strchr (start, '/')) != NULL)
{
*slash = 0;
parsed[place++] = start;
start = slash + 1;
}
parsed[place++] = start;
/* First take the output parameters. */
if (place == 2 || place == 4)
{
main_get_appheader_params (output, parsed, 1, "output", ifile);
}
/* Then take the source parameters. */
if (place == 4)
{
main_get_appheader_params (sfile, parsed+2, 0, "source", ifile);
}
}
option_use_appheader = 0;
return;
}
/*********************************************************************
Main I/O routines
**********************************************************************/
/* This function acts like the above except it may also try to
* recognize a compressed input (source or target) when the first
* buffer of data is read. The EXTERNAL_COMPRESSION code is called to
* search for magic numbers. */
static int
main_read_primary_input (main_file *file,
uint8_t *buf,
size_t size,
size_t *nread)
{
#if EXTERNAL_COMPRESSION
if (option_decompress_inputs && file->flags & RD_FIRST)
{
file->flags &= ~RD_FIRST;
return main_secondary_decompress_check (file, buf, size, nread);
}
#endif
return main_file_read (file, buf, size, nread, "input read failed");
}
/* Open the main output file, sets a default file name, initiate
* recompression. This function is expected to fprint any error
* messages. */
static int
main_open_output (xd3_stream *stream, main_file *ofile)
{
int ret;
if (option_no_output)
{
return 0;
}
if (ofile->filename == NULL)
{
XSTDOUT_XF (ofile);
if (option_verbose > 1)
{
XPR(NT "using standard output: %s\n", ofile->filename);
}
}
else
{
/* Stat the file to check for overwrite. */
if (option_force == 0 && main_file_exists (ofile))
{
if (!option_quiet)
{
XPR(NT "to overwrite output file specify -f: %s\n",
ofile->filename);
}
return EEXIST;
}
if ((ret = main_file_open (ofile, ofile->filename, XO_WRITE)))
{
return ret;
}
if (option_verbose > 1) { XPR(NT "output %s\n", ofile->filename); }
}
#if EXTERNAL_COMPRESSION
/* Do output recompression. */
if (ofile->compressor != NULL && option_recompress_outputs == 1)
{
if (! option_quiet)
{
XPR(NT "externally compressed output: %s %s%s > %s\n",
ofile->compressor->recomp_cmdname,
ofile->compressor->recomp_options,
(option_force2 ? " -f" : ""),
ofile->filename);
}
if ((ret = main_recompress_output (ofile)))
{
return ret;
}
}
#endif
return 0;
}
static usize_t
main_get_winsize (main_file *ifile) {
xoff_t file_size = 0;
usize_t size = option_winsize;
static shortbuf iszbuf;
if (main_file_stat (ifile, &file_size) == 0)
{
size = (usize_t) min(file_size, (xoff_t) size);
}
size = max(size, XD3_ALLOCSIZE);
if (option_verbose > 1)
{
XPR(NT "input %s window size %s\n",
ifile->filename,
main_format_bcnt (size, &iszbuf));
}
return size;
}
/*********************************************************************
Main routines
********************************************************************/
/* This is a generic input function. It calls the xd3_encode_input or
* xd3_decode_input functions and makes calls to the various input
* handling routines above, which coordinate external decompression.
*/
static int
main_input (xd3_cmd cmd,
main_file *ifile,
main_file *ofile,
main_file *sfile)
{
int ret;
xd3_stream stream;
size_t nread = 0;
usize_t winsize;
int stream_flags = 0;
xd3_config config;
xd3_source source;
xoff_t last_total_in = 0;
xoff_t last_total_out = 0;
long start_time;
int stdout_only = 0;
int (*input_func) (xd3_stream*);
int (*output_func) (xd3_stream*, main_file *);
memset (& stream, 0, sizeof (stream));
memset (& source, 0, sizeof (source));
memset (& config, 0, sizeof (config));
config.alloc = main_alloc;
config.freef = main_free1;
config.iopt_size = option_iopt_size;
config.sprevsz = option_sprevsz;
do_src_fifo = 0;
start_time = get_millisecs_now ();
if (option_use_checksum) { stream_flags |= XD3_ADLER32; }
/* main_input setup. */
switch ((int) cmd)
{
#if VCDIFF_TOOLS
if (1) { case CMD_PRINTHDR: stream_flags |= XD3_JUST_HDR; }
else if (1) { case CMD_PRINTHDRS: stream_flags |= XD3_SKIP_WINDOW; }
else { case CMD_PRINTDELTA: stream_flags |= XD3_SKIP_EMIT; }
ifile->flags |= RD_NONEXTERNAL;
input_func = xd3_decode_input;
output_func = main_print_func;
stream_flags |= XD3_ADLER32_NOVER;
stdout_only = 1;
break;
case CMD_RECODE:
case CMD_MERGE:
case CMD_MERGE_ARG:
/* No source will be read */
stream_flags |= XD3_ADLER32_NOVER | XD3_SKIP_EMIT;
ifile->flags |= RD_NONEXTERNAL;
input_func = xd3_decode_input;
if ((ret = main_init_recode_stream ()))
{
return EXIT_FAILURE;
}
if (cmd == CMD_RECODE) { output_func = main_recode_func; }
else { output_func = main_merge_func; }
break;
#endif /* VCDIFF_TOOLS */
#if XD3_ENCODER
case CMD_ENCODE:
do_src_fifo = 1;
input_func = xd3_encode_input;
output_func = main_write_output;
if (option_no_compress) { stream_flags |= XD3_NOCOMPRESS; }
if (option_use_altcodetable) { stream_flags |= XD3_ALT_CODE_TABLE; }
if (option_smatch_config)
{
const char *s = option_smatch_config;
char *e;
int values[XD3_SOFTCFG_VARCNT];
int got;
config.smatch_cfg = XD3_SMATCH_SOFT;
for (got = 0; got < XD3_SOFTCFG_VARCNT; got += 1, s = e + 1)
{
values[got] = strtol (s, &e, 10);
if ((values[got] < 0) ||
(e == s) ||
(got < XD3_SOFTCFG_VARCNT-1 && *e == 0) ||
(got == XD3_SOFTCFG_VARCNT-1 && *e != 0))
{
XPR(NT "invalid string match specifier (-C) %d: %s\n",
got, s);
return EXIT_FAILURE;
}
}
config.smatcher_soft.large_look = values[0];
config.smatcher_soft.large_step = values[1];
config.smatcher_soft.small_look = values[2];
config.smatcher_soft.small_chain = values[3];
config.smatcher_soft.small_lchain = values[4];
config.smatcher_soft.max_lazy = values[5];
config.smatcher_soft.long_enough = values[6];
}
else
{
if (option_verbose > 2)
{
XPR(NT "compression level: %d\n", option_level);
}
if (option_level == 0)
{
stream_flags |= XD3_NOCOMPRESS;
config.smatch_cfg = XD3_SMATCH_FASTEST;
}
else if (option_level == 1)
{ config.smatch_cfg = XD3_SMATCH_FASTEST; }
else if (option_level == 2)
{ config.smatch_cfg = XD3_SMATCH_FASTER; }
else if (option_level <= 5)
{ config.smatch_cfg = XD3_SMATCH_FAST; }
else if (option_level == 6)
{ config.smatch_cfg = XD3_SMATCH_DEFAULT; }
else
{ config.smatch_cfg = XD3_SMATCH_SLOW; }
}
break;
#endif
case CMD_DECODE:
if (option_use_checksum == 0) { stream_flags |= XD3_ADLER32_NOVER; }
ifile->flags |= RD_NONEXTERNAL;
input_func = xd3_decode_input;
output_func = main_write_output;
break;
default:
XPR(NT "internal error\n");
return EXIT_FAILURE;
}
main_bsize = winsize = main_get_winsize (ifile);
if ((main_bdata = (uint8_t*) main_bufalloc (winsize)) == NULL)
{
return EXIT_FAILURE;
}
config.winsize = winsize;
config.getblk = main_getblk_func;
config.flags = stream_flags;
if ((ret = main_set_secondary_flags (&config)) ||
(ret = xd3_config_stream (& stream, & config)))
{
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
return EXIT_FAILURE;
}
#if VCDIFF_TOOLS
if ((cmd == CMD_MERGE || cmd == CMD_MERGE_ARG) &&
(ret = xd3_whole_state_init (& stream)))
{
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
return EXIT_FAILURE;
}
#endif
if (cmd != CMD_DECODE)
{
/* When not decoding, set source now. The decoder delays this
* step until XD3_GOTHEADER. */
if (sfile && sfile->filename != NULL)
{
if ((ret = main_set_source (& stream, cmd, sfile, & source)))
{
return EXIT_FAILURE;
}
XD3_ASSERT(stream.src != NULL);
}
}
if (cmd == CMD_PRINTHDR ||
cmd == CMD_PRINTHDRS ||
cmd == CMD_PRINTDELTA ||
cmd == CMD_RECODE)
{
if (sfile->filename == NULL)
{
allow_fake_source = 1;
sfile->filename = "<placeholder>";
main_set_source (& stream, cmd, sfile, & source);
}
}
/* This times each window. */
get_millisecs_since ();
/* Main input loop. */
do
{
xoff_t input_offset;
xoff_t input_remain;
usize_t try_read;
input_offset = ifile->nread;
input_remain = XOFF_T_MAX - input_offset;
try_read = (usize_t) min ((xoff_t) config.winsize, input_remain);
if ((ret = main_read_primary_input (ifile, main_bdata,
try_read, & nread)))
{
return EXIT_FAILURE;
}
/* If we've reached EOF tell the stream to flush. */
if (nread < try_read)
{
stream.flags |= XD3_FLUSH;
}
#if XD3_ENCODER
/* After the first main_read_primary_input completes, we know
* all the information needed to encode the application
* header. */
if (cmd == CMD_ENCODE &&
(ret = main_set_appheader (& stream, ifile, sfile)))
{
return EXIT_FAILURE;
}
#endif
xd3_avail_input (& stream, main_bdata, nread);
/* If we read zero bytes after encoding at least one window... */
if (nread == 0 && stream.current_window > 0) {
break;
}
again:
ret = input_func (& stream);
switch (ret)
{
case XD3_INPUT:
continue;
case XD3_GOTHEADER:
{
XD3_ASSERT (stream.current_window == 0);
/* Need to process the appheader as soon as possible. It may
* contain a suggested default filename/decompression routine for
* the ofile, and it may contain default/decompression routine for
* the sources. */
if (cmd == CMD_DECODE)
{
/* May need to set the sfile->filename if none was given. */
main_get_appheader (& stream, ifile, ofile, sfile);
/* Now open the source file. */
if ((sfile->filename != NULL) &&
(ret = main_set_source (& stream, cmd, sfile, & source)))
{
return EXIT_FAILURE;
}
}
}
/* FALLTHROUGH */
case XD3_WINSTART:
{
/* e.g., set or unset XD3_SKIP_WINDOW. */
goto again;
}
case XD3_OUTPUT:
{
/* Defer opening the output file until the stream produces its
* first output for both encoder and decoder, this way we
* delay long enough for the decoder to receive the
* application header. (Or longer if there are skipped
* windows, but I can't think of any reason not to delay
* open.) */
if (ofile != NULL &&
! main_file_isopen (ofile) &&
(ret = main_open_output (& stream, ofile)) != 0)
{
return EXIT_FAILURE;
}
if ((ret = output_func (& stream, ofile)) &&
(ret != PRINTHDR_SPECIAL))
{
return EXIT_FAILURE;
}
if (ret == PRINTHDR_SPECIAL)
{
xd3_abort_stream (& stream);
ret = EXIT_SUCCESS;
goto done;
}
ret = 0;
xd3_consume_output (& stream);
goto again;
}
case XD3_WINFINISH:
{
if (IS_ENCODE (cmd) || cmd == CMD_DECODE || cmd == CMD_RECODE)
{
if (! option_quiet && IS_ENCODE (cmd) &&
main_file_isopen (sfile))
{
/* Warn when no source copies are found */
if (option_verbose && ! xd3_encoder_used_source (& stream))
{
XPR(NT "warning: input window %"Q"u..%"Q"u has "
"no source copies\n",
stream.current_window * winsize,
(stream.current_window+1) * winsize);
XD3_ASSERT (stream.src != NULL);
}
/* Limited i-buffer size affects source copies
* when the sourcewin is decided early. */
if (option_verbose > 1 &&
stream.srcwin_decided_early &&
stream.i_slots_used > stream.iopt_size)
{
XPR(NT "warning: input position %"Q"u overflowed "
"instruction buffer, needed %u (vs. %u), "
"consider changing -I\n",
stream.current_window * winsize,
stream.i_slots_used, stream.iopt_size);
}
}
if (option_verbose)
{
shortbuf rrateavg, wrateavg, tm;
shortbuf rdb, wdb;
shortbuf trdb, twdb;
shortbuf srcpos;
long millis = get_millisecs_since ();
usize_t this_read = (usize_t)(stream.total_in -
last_total_in);
usize_t this_write = (usize_t)(stream.total_out -
last_total_out);
last_total_in = stream.total_in;
last_total_out = stream.total_out;
if (option_verbose > 1)
{
XPR(NT "%"Q"u: in %s (%s): out %s (%s): "
"total in %s: out %s: %s: srcpos %s\n",
stream.current_window,
main_format_bcnt (this_read, &rdb),
main_format_rate (this_read, millis, &rrateavg),
main_format_bcnt (this_write, &wdb),
main_format_rate (this_write, millis, &wrateavg),
main_format_bcnt (stream.total_in, &trdb),
main_format_bcnt (stream.total_out, &twdb),
main_format_millis (millis, &tm),
main_format_bcnt (sfile->source_position, &srcpos));
}
else
{
XPR(NT "%"Q"u: in %s: out %s: total in %s: "
"out %s: %s\n",
stream.current_window,
main_format_bcnt (this_read, &rdb),
main_format_bcnt (this_write, &wdb),
main_format_bcnt (stream.total_in, &trdb),
main_format_bcnt (stream.total_out, &twdb),
main_format_millis (millis, &tm));
}
}
}
goto again;
}
default:
/* input_func() error */
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
if (! option_quiet && ret == XD3_INVALID_INPUT)
{
XPR(NT "normally this indicates that the source file is incorrect\n");
XPR(NT "please verify the source file with sha1sum or equivalent\n");
}
return EXIT_FAILURE;
}
}
while (nread == config.winsize);
done:
/* Close the inputs. (ifile must be open, sfile may be open) */
main_file_close (ifile);
if (sfile != NULL)
{
main_file_close (sfile);
}
#if VCDIFF_TOOLS
if (cmd == CMD_MERGE &&
(ret = main_merge_output (& stream, ofile)))
{
return EXIT_FAILURE;
}
if (cmd == CMD_MERGE_ARG)
{
xd3_swap_whole_state (& stream.whole_target,
& recode_stream->whole_target);
}
#endif /* VCDIFF_TOOLS */
/* If output file is not open yet because of delayed-open, it means
* we never encountered a window in the delta, but it could have had
* a VCDIFF header? TODO: solve this elsewhere. For now, it prints
* "nothing to output" below, but the check doesn't happen in case
* of option_no_output. */
if (! option_no_output && ofile != NULL)
{
if (!stdout_only && ! main_file_isopen (ofile))
{
XPR(NT "nothing to output: %s\n", ifile->filename);
return EXIT_FAILURE;
}
/* Have to close the output before calling
* main_external_compression_finish, or else it hangs. */
if (main_file_close (ofile) != 0)
{
return EXIT_FAILURE;
}
}
#if EXTERNAL_COMPRESSION
if ((ret = main_external_compression_finish ()))
{
XPR(NT "external compression commands failed\n");
return EXIT_FAILURE;
}
#endif
if ((ret = xd3_close_stream (& stream)))
{
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
return EXIT_FAILURE;
}
#if XD3_ENCODER
if (option_verbose > 1 && cmd == CMD_ENCODE)
{
XPR(NT "scanner configuration: %s\n", stream.smatcher.name);
XPR(NT "target hash table size: %u\n", stream.small_hash.size);
if (sfile != NULL && sfile->filename != NULL)
{
XPR(NT "source hash table size: %u\n", stream.large_hash.size);
}
}
if (option_verbose > 2 && cmd == CMD_ENCODE)
{
XPR(NT "source copies: %"Q"u (%"Q"u bytes)\n",
stream.n_scpy, stream.l_scpy);
XPR(NT "target copies: %"Q"u (%"Q"u bytes)\n",
stream.n_tcpy, stream.l_tcpy);
XPR(NT "adds: %"Q"u (%"Q"u bytes)\n", stream.n_add, stream.l_add);
XPR(NT "runs: %"Q"u (%"Q"u bytes)\n", stream.n_run, stream.l_run);
}
#endif
xd3_free_stream (& stream);
if (option_verbose)
{
shortbuf tm;
long end_time = get_millisecs_now ();
xoff_t nwrite = ofile != NULL ? ofile->nwrite : 0;
XPR(NT "finished in %s; input %"Q"u output %"Q"u bytes (%0.2f%%)\n",
main_format_millis (end_time - start_time, &tm),
ifile->nread, nwrite, 100.0 * nwrite / ifile->nread);
}
return EXIT_SUCCESS;
}
/* free memory before exit, reset single-use variables. */
static void
main_cleanup (void)
{
if (appheader_used != NULL &&
appheader_used != option_appheader)
{
main_free (appheader_used);
appheader_used = NULL;
}
main_buffree (main_bdata);
main_bdata = NULL;
main_bsize = 0;
main_lru_cleanup();
if (recode_stream != NULL)
{
xd3_free_stream (recode_stream);
main_free (recode_stream);
recode_stream = NULL;
}
if (merge_stream != NULL)
{
xd3_free_stream (merge_stream);
main_free (merge_stream);
merge_stream = NULL;
}
XD3_ASSERT (main_mallocs == 0);
}
static void
setup_environment (int argc,
char **argv,
int *argc_out,
char ***argv_out,
char ***argv_free,
char **env_free)
{
int n, i, i0;
char *p, *v = getenv("XDELTA");
if (v == NULL) {
(*argc_out) = argc;
(*argv_out) = argv;
(*argv_free) = NULL;
(*env_free) = NULL;
return;
}
(*env_free) = (char*) main_malloc((usize_t) strlen(v) + 1);
strcpy(*env_free, v);
/* Space needed for extra args, at least # of spaces */
n = argc + 1;
for (p = *env_free; *p != 0; ) {
if (*p++ == ' ') {
n++;
}
}
(*argv_free) = (char**) main_malloc(sizeof(char*) * (n + 1));
(*argv_out) = (*argv_free);
(*argv_out)[0] = argv[0];
(*argv_out)[n] = NULL;
i = 1;
for (p = *env_free; *p != 0; ) {
(*argv_out)[i++] = p;
while (*p != ' ' && *p != 0) {
p++;
}
while (*p == ' ') {
*p++ = 0;
}
}
for (i0 = 1; i0 < argc; i0++) {
(*argv_out)[i++] = argv[i0];
}
/* Counting spaces is an upper bound, argv stays NULL terminated. */
(*argc_out) = i;
while (i <= n) {
(*argv_out)[i++] = NULL;
}
}
#if PYTHON_MODULE || SWIG_MODULE || NOT_MAIN
int xd3_main_cmdline (int argc, char **argv)
#else
int main (int argc, char **argv)
#endif
{
static const char *flags =
"0123456789cdefhnqvDFJNORTVs:m:B:C:E:I:L:O:M:P:W:A::S::";
xd3_cmd cmd;
main_file ifile;
main_file ofile;
main_file sfile;
main_merge_list merge_order;
main_merge *merge;
int my_optind;
const char *my_optarg;
const char *my_optstr;
const char *sfilename;
int env_argc;
char **env_argv;
char **free_argv; /* malloc() in setup_environment() */
char *free_value; /* malloc() in setup_environment() */
int ret;
#ifdef _WIN32
GetStartupInfo(&winStartupInfo);
setvbuf(stderr, NULL, _IONBF, 0); /* Do not buffer stderr */
#endif
main_file_init (& ifile);
main_file_init (& ofile);
main_file_init (& sfile);
main_merge_list_init (& merge_order);
reset_defaults();
free_argv = NULL;
free_value = NULL;
setup_environment(argc, argv, &env_argc, &env_argv,
&free_argv, &free_value);
cmd = CMD_NONE;
sfilename = NULL;
my_optind = 1;
argv = env_argv;
argc = env_argc;
program_name = env_argv[0];
takearg:
my_optarg = NULL;
my_optstr = argv[my_optind];
/* This doesn't use getopt() because it makes trouble for -P & python which
* reenter main() and thus care about freeing all memory. I never had much
* trust for getopt anyway, it's too opaque. This implements a fairly
* standard non-long-option getopt with support for named operations (e.g.,
* "xdelta3 [encode|decode|printhdr...] < in > out"). */
if (my_optstr)
{
if (*my_optstr == '-') { my_optstr += 1; }
else if (cmd == CMD_NONE) { goto nonflag; }
else { my_optstr = NULL; }
}
while (my_optstr)
{
const char *s;
my_optarg = NULL;
if ((ret = *my_optstr++) == 0) { my_optind += 1; goto takearg; }
/* Option handling: first check for one ':' following the option in
* flags, then check for two. The syntax allows:
*
* 1. -Afoo defines optarg="foo"
* 2. -A foo defines optarg="foo"
* 3. -A "" defines optarg="" (allows empty-string)
* 4. -A [EOA or -moreargs] error (mandatory case)
* 5. -A [EOA -moreargs] defines optarg=NULL (optional case)
* 6. -A=foo defines optarg="foo"
* 7. -A= defines optarg="" (mandatory case)
* 8. -A= defines optarg=NULL (optional case)
*
* See tests in test_command_line_arguments().
*/
s = strchr (flags, ret);
if (s && s[1] && s[1] == ':')
{
int option = s[2] && s[2] == ':';
/* Case 1, set optarg to the remaining characters. */
my_optarg = my_optstr;
my_optstr = "";
/* Case 2-5 */
if (*my_optarg == 0)
{
/* Condition 4-5 */
int have_arg = (my_optind < (argc - 1) &&
*argv[my_optind+1] != '-');
if (! have_arg)
{
if (! option)
{
/* Case 4 */
XPR(NT "-%c: requires an argument\n", ret);
ret = EXIT_FAILURE;
goto cleanup;
}
/* Case 5. */
my_optarg = NULL;
}
else
{
/* Case 2-3. */
my_optarg = argv[++my_optind];
}
}
/* Case 6-8. */
else if (*my_optarg == '=')
{
/* Remove the = in all cases. */
my_optarg += 1;
if (option && *my_optarg == 0)
{
/* Case 8. */
my_optarg = NULL;
}
}
}
switch (ret)
{
/* case: if no '-' was found, maybe check for a command name. */
nonflag:
if (strcmp (my_optstr, "decode") == 0) { cmd = CMD_DECODE; }
else if (strcmp (my_optstr, "encode") == 0)
{
#if XD3_ENCODER
cmd = CMD_ENCODE;
#else
XPR(NT "encoder support not compiled\n");
return EXIT_FAILURE;
#endif
}
else if (strcmp (my_optstr, "config") == 0) { cmd = CMD_CONFIG; }
#if REGRESSION_TEST
else if (strcmp (my_optstr, "test") == 0) { cmd = CMD_TEST; }
#endif
#if VCDIFF_TOOLS
else if (strcmp (my_optstr, "printhdr") == 0) { cmd = CMD_PRINTHDR; }
else if (strcmp (my_optstr, "printhdrs") == 0)
{ cmd = CMD_PRINTHDRS; }
else if (strcmp (my_optstr, "printdelta") == 0)
{ cmd = CMD_PRINTDELTA; }
else if (strcmp (my_optstr, "recode") == 0) { cmd = CMD_RECODE; }
else if (strcmp (my_optstr, "merge") == 0) { cmd = CMD_MERGE; }
#endif
/* If no option was found and still no command, let the default
* command be encode. The remaining args are treated as
* filenames. */
if (cmd == CMD_NONE)
{
cmd = CMD_DEFAULT;
my_optstr = NULL;
break;
}
else
{
/* But if we find a command name, continue the getopt loop. */
my_optind += 1;
goto takearg;
}
/* gzip-like options */
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
option_level = ret - '0';
break;
case 'f': option_force = 1; break;
case 'F':
#if EXTERNAL_COMPRESSION
option_force2 = 1;
#else
XPR(NT "warning: -F option ignored, "
"external compression support was not compiled\n");
break;
#endif
case 'v': option_verbose += 1; option_quiet = 0; break;
case 'q': option_quiet = 1; option_verbose = 0; break;
case 'c': option_stdout = 1; break;
case 'd':
if (cmd == CMD_NONE) { cmd = CMD_DECODE; }
else { ret = main_help (); goto exit; }
break;
case 'e':
#if XD3_ENCODER
if (cmd == CMD_NONE) { cmd = CMD_ENCODE; }
else { ret = main_help (); goto exit; }
break;
#else
XPR(NT "encoder support not compiled\n");
return EXIT_FAILURE;
#endif
case 'n': option_use_checksum = 0; break;
case 'N': option_no_compress = 1; break;
case 'T': option_use_altcodetable = 1; break;
case 'C': option_smatch_config = my_optarg; break;
case 'J': option_no_output = 1; break;
case 'S': if (my_optarg == NULL)
{
option_use_secondary = 1;
option_secondary = "none";
}
else
{
option_use_secondary = 1;
option_secondary = my_optarg;
}
break;
case 'A': if (my_optarg == NULL) { option_use_appheader = 0; }
else { option_appheader = (uint8_t*) my_optarg; } break;
case 'B': {
xoff_t bsize;
if ((ret = main_atoux (my_optarg, & bsize,
XD3_MINSRCWINSZ, XD3_MAXSRCWINSZ, 'B')))
{
goto exit;
}
option_srcwinsz = bsize;
break;
}
case 'I':
if ((ret = main_atou (my_optarg, & option_iopt_size, 0,
0, 'I')))
{
goto exit;
}
break;
case 'P':
if ((ret = main_atou (my_optarg, & option_sprevsz, 0,
0, 'P')))
{
goto exit;
}
break;
case 'W':
if ((ret = main_atou (my_optarg, & option_winsize, XD3_ALLOCSIZE,
XD3_HARDMAXWINSIZE, 'W')))
{
goto exit;
}
break;
case 'D':
#if EXTERNAL_COMPRESSION == 0
if (option_verbose > 0)
{
XPR(NT "warning: -D option ignored, "
"external compression support was not compiled\n");
}
#else
option_decompress_inputs = 0;
#endif
break;
case 'R':
#if EXTERNAL_COMPRESSION == 0
if (option_verbose > 0)
{
XPR(NT "warning: -R option ignored, "
"external compression support was not compiled\n");
}
#else
option_recompress_outputs = 0;
#endif
break;
case 's':
if (sfilename != NULL)
{
XPR(NT "specify only one source file\n");
goto cleanup;
}
sfilename = my_optarg;
break;
case 'm':
if ((merge = (main_merge*)
main_malloc (sizeof (main_merge))) == NULL)
{
goto cleanup;
}
main_merge_list_push_back (& merge_order, merge);
merge->filename = my_optarg;
break;
case 'V':
ret = main_version (); goto exit;
default:
ret = main_help (); goto exit;
}
}
option_source_filename = sfilename;
/* In case there were no arguments, set the default command. */
if (cmd == CMD_NONE) { cmd = CMD_DEFAULT; }
argc -= my_optind;
argv += my_optind;
/* There may be up to two more arguments. */
if (argc > 2)
{
XPR(NT "too many filenames: %s ...\n", argv[2]);
goto cleanup;
}
ifile.flags = RD_FIRST | RD_MAININPUT;
sfile.flags = RD_FIRST;
sfile.filename = option_source_filename;
/* The infile takes the next argument, if there is one. But if not, infile
* is set to stdin. */
if (argc > 0)
{
ifile.filename = argv[0];
if ((ret = main_file_open (& ifile, ifile.filename, XO_READ)))
{
goto cleanup;
}
}
else
{
XSTDIN_XF (& ifile);
}
/* The ofile takes the following argument, if there is one. But if not, it
* is left NULL until the application header is processed. It will be set
* in main_open_output. */
if (argc > 1)
{
/* Check for conflicting arguments. */
if (option_stdout && ! option_quiet)
{
XPR(NT "warning: -c option overrides output filename: %s\n",
argv[1]);
}
if (! option_stdout) { ofile.filename = argv[1]; }
}
#if VCDIFF_TOOLS
if (cmd == CMD_MERGE &&
(ret = main_merge_arguments (&merge_order)))
{
goto cleanup;
}
#endif /* VCDIFF_TOOLS */
switch (cmd)
{
case CMD_PRINTHDR:
case CMD_PRINTHDRS:
case CMD_PRINTDELTA:
#if XD3_ENCODER
case CMD_ENCODE:
case CMD_RECODE:
case CMD_MERGE:
#endif
case CMD_DECODE:
ret = main_input (cmd, & ifile, & ofile, & sfile);
break;
#if REGRESSION_TEST
case CMD_TEST:
main_config ();
ret = xd3_selftest ();
break;
#endif
case CMD_CONFIG:
ret = main_config ();
break;
default:
ret = main_help ();
break;
}
if (0)
{
cleanup:
ret = EXIT_FAILURE;
exit:
(void)0;
}
#if EXTERNAL_COMPRESSION
main_external_compression_cleanup ();
#endif
main_file_cleanup (& ifile);
main_file_cleanup (& ofile);
main_file_cleanup (& sfile);
while (! main_merge_list_empty (& merge_order))
{
merge = main_merge_list_pop_front (& merge_order);
main_free (merge);
}
main_free (free_argv);
main_free (free_value);
main_cleanup ();
fflush (stdout);
fflush (stderr);
return ret;
}
static int
main_help (void)
{
main_version();
/* Note: update wiki when command-line features change */
XPR(NTR "usage: xdelta3 [command/options] [input [output]]\n");
XPR(NTR "make patch:\n");
XPR(NTR "\n");
XPR(NTR " xdelta3.exe -e -s old_file new_file delta_file\n");
XPR(NTR "\n");
XPR(NTR "apply patch:\n");
XPR(NTR "\n");
XPR(NTR " xdelta3.exe -d -s old_file delta_file decoded_new_file\n");
XPR(NTR "\n");
XPR(NTR "special command names:\n");
XPR(NTR " config prints xdelta3 configuration\n");
XPR(NTR " decode decompress the input\n");
XPR(NTR " encode compress the input%s\n",
XD3_ENCODER ? "" : " [Not compiled]");
#if REGRESSION_TEST
XPR(NTR " test run the builtin tests\n");
#endif
#if VCDIFF_TOOLS
XPR(NTR "special commands for VCDIFF inputs:\n");
XPR(NTR " printdelta print information about the entire delta\n");
XPR(NTR " printhdr print information about the first window\n");
XPR(NTR " printhdrs print information about all windows\n");
XPR(NTR " recode encode with new application/secondary settings\n");
XPR(NTR " merge merge VCDIFF inputs (see below)\n");
#endif
XPR(NTR "merge patches:\n");
XPR(NTR "\n");
XPR(NTR " xdelta3 merge -m 1.vcdiff -m 2.vcdiff 3.vcdiff merged.vcdiff\n");
XPR(NTR "\n");
XPR(NTR "standard options:\n");
XPR(NTR " -0 .. -9 compression level\n");
XPR(NTR " -c use stdout\n");
XPR(NTR " -d decompress\n");
XPR(NTR " -e compress%s\n",
XD3_ENCODER ? "" : " [Not compiled]");
XPR(NTR " -f force (overwrite, ignore trailing garbage)\n");
#if EXTERNAL_COMPRESSION
XPR(NTR " -F force the external-compression subprocess\n");
#endif
XPR(NTR " -h show help\n");
XPR(NTR " -q be quiet\n");
XPR(NTR " -v be verbose (max 2)\n");
XPR(NTR " -V show version\n");
XPR(NTR "memory options:\n");
XPR(NTR " -B bytes source window size\n");
XPR(NTR " -W bytes input window size\n");
XPR(NTR " -P size compression duplicates window\n");
XPR(NTR " -I size instruction buffer size (0 = unlimited)\n");
XPR(NTR "compression options:\n");
XPR(NTR " -s source source file to copy from (if any)\n");
XPR(NTR " -S [djw|fgk] enable/disable secondary compression\n");
XPR(NTR " -N disable small string-matching compression\n");
XPR(NTR " -D disable external decompression (encode/decode)\n");
XPR(NTR " -R disable external recompression (decode)\n");
XPR(NTR " -n disable checksum (encode/decode)\n");
XPR(NTR " -C soft config (encode, undocumented)\n");
XPR(NTR " -A [apphead] disable/provide application header (encode)\n");
XPR(NTR " -J disable output (check/compute only)\n");
XPR(NTR " -T use alternate code table (test)\n");
XPR(NTR " -m arguments for \"merge\"\n");
XPR(NTR "the XDELTA environment variable may contain extra args:\n");
XPR(NTR " XDELTA=\"-s source-x.y.tar.gz\" \\\n");
XPR(NTR " tar --use-compress-program=xdelta3 \\\n");
XPR(NTR " -cf target-x.z.tar.gz.vcdiff target-x.y\n");
return EXIT_FAILURE;
}
|
/* xdelta3 - delta compression tools and library
* Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
* 2009, 2010, 2011, 2012, 2013 Joshua P. MacDonald
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* This is all the extra stuff you need for convenience to users in a
* command line application. It contains these major components:
*
* 1. VCDIFF tools 2. external compression support (this is
* POSIX-specific). 3. a general read/write loop that handles all of
* the Xdelta decode/encode/VCDIFF-print functions 4. command-line
* interpreter 5. an Xdelta application header which stores default
* filename, external compression settings 6. output/error printing
* 7. basic file support and OS interface
*/
/* TODO list: 1. do exact gzip-like filename, stdout handling. make a
* .vcdiff extension, refuse to encode to stdout without -cf, etc.
* 2. Allow the user to add a comment string to the app header without
* disturbing the default behavior.
*/
/* On error handling and printing:
*
* The xdelta library sets stream->msg to indicate what condition
* caused an internal failure, but many failures originate here and
* are printed here. The return convention is 0 for success, as
* throughout Xdelta code, but special attention is required here for
* the operating system calls with different error handling. See the
* main_file_* routines. All errors in this file have a message
* printed at the time of occurance. Since some of these calls occur
* within calls to the library, the error may end up being printed
* again with a more general error message.
*/
/*********************************************************************/
#ifndef XD3_POSIX
#define XD3_POSIX 0
#endif
#ifndef XD3_STDIO
#define XD3_STDIO 0
#endif
#ifndef XD3_WIN32
#define XD3_WIN32 0
#endif
#ifndef NOT_MAIN
#define NOT_MAIN 0
#endif
/* Combines xd3_strerror() and strerror() */
const char* xd3_mainerror(int err_num);
#include "xdelta3-internal.h"
int
xsnprintf_func (char *str, int n, const char *fmt, ...)
{
va_list a;
int ret;
va_start (a, fmt);
ret = vsnprintf_func (str, n, fmt, a);
va_end (a);
if (ret < 0)
{
ret = n;
}
return ret;
}
/* If none are set, default to posix. */
#if (XD3_POSIX + XD3_STDIO + XD3_WIN32) == 0
#undef XD3_POSIX
#define XD3_POSIX 1
#endif
/* Handle externally-compressed inputs. */
#ifndef EXTERNAL_COMPRESSION
#define EXTERNAL_COMPRESSION 1
#endif
#define PRINTHDR_SPECIAL -4378291
/* The number of soft-config variables. */
#define XD3_SOFTCFG_VARCNT 7
/* this is used as in XPR(NT XD3_LIB_ERRMSG (stream, ret)) to print an
* error message from the library. */
#define XD3_LIB_ERRMSG(stream, ret) "%s: %s\n", \
xd3_errstring (stream), xd3_mainerror (ret)
#if XD3_POSIX
#include <unistd.h> /* close, read, write... */
#include <sys/types.h>
#include <fcntl.h>
#endif
#ifndef _WIN32
#include <unistd.h> /* lots */
#include <sys/time.h> /* gettimeofday() */
#include <sys/stat.h> /* stat() and fstat() */
#else
#if defined(_MSC_VER)
#define strtoll _strtoi64
#endif
#include <sys/types.h>
#include <sys/stat.h>
#ifndef WIFEXITED
# define WIFEXITED(stat) (((*((int *) &(stat))) & 0xff) == 0)
#endif
#ifndef WEXITSTATUS
# define WEXITSTATUS(stat) (((*((int *) &(stat))) >> 8) & 0xff)
#endif
#ifndef S_ISREG
//# ifdef S_IFREG
//# define S_ISREG(m) (((m) & S_IFMT) == S_IFREG)
//# else
# define S_ISREG(m) 1
//# endif
#endif /* !S_ISREG */
// For standard input/output handles
static STARTUPINFO winStartupInfo;
#endif
/**********************************************************************
ENUMS and TYPES
*********************************************************************/
/* These flags (mainly pertaining to main_read() operations) are set
* in the main_file->flags variable. All are related to with external
* decompression support.
*
* RD_FIRST causes the external decompression check when the input is
* first read.
*
* RD_NONEXTERNAL disables external decompression for reading a
* compressed input, in the case of Xdelta inputs. Note: Xdelta is
* supported as an external compression type, which makes is the
* reason for this flag. An example to justify this is: to create a
* delta between two files that are VCDIFF-compressed. Two external
* Xdelta decoders are run to supply decompressed source and target
* inputs to the Xdelta encoder. */
typedef enum
{
RD_FIRST = (1 << 0),
RD_NONEXTERNAL = (1 << 1),
RD_DECOMPSET = (1 << 2),
RD_MAININPUT = (1 << 3),
} xd3_read_flags;
/* Main commands. For example, CMD_PRINTHDR is the "xdelta printhdr"
* command. */
typedef enum
{
CMD_NONE = 0,
CMD_PRINTHDR,
CMD_PRINTHDRS,
CMD_PRINTDELTA,
CMD_RECODE,
CMD_MERGE_ARG,
CMD_MERGE,
#if XD3_ENCODER
CMD_ENCODE,
#endif
CMD_DECODE,
CMD_TEST,
CMD_CONFIG,
} xd3_cmd;
#if XD3_ENCODER
#define CMD_DEFAULT CMD_ENCODE
#define IS_ENCODE(cmd) (cmd == CMD_ENCODE)
#else
#define CMD_DEFAULT CMD_DECODE
#define IS_ENCODE(cmd) (0)
#endif
typedef struct _main_merge main_merge;
typedef struct _main_merge_list main_merge_list;
/* Various strings and magic values used to detect and call external
* compression. See below for examples. */
struct _main_extcomp
{
const char *recomp_cmdname;
const char *recomp_options;
const char *decomp_cmdname;
const char *decomp_options;
const char *ident;
const char *magic;
usize_t magic_size;
int flags;
};
/* Merge state: */
struct _main_merge_list
{
main_merge_list *next;
main_merge_list *prev;
};
struct _main_merge
{
const char *filename;
main_merge_list link;
};
XD3_MAKELIST(main_merge_list,main_merge,link);
/* TODO: really need to put options in a struct so that internal
* callers can easily reset state. */
#define DEFAULT_VERBOSE 0
/* Program options: various command line flags and options. */
static int option_stdout = 0;
static int option_force = 0;
static int option_verbose = DEFAULT_VERBOSE;
static int option_quiet = 0;
static int option_use_appheader = 1;
static uint8_t* option_appheader = NULL;
static int option_use_secondary = 0;
static const char* option_secondary = NULL;
static int option_use_checksum = 1;
static int option_use_altcodetable = 0;
static const char* option_smatch_config = NULL;
static int option_no_compress = 0;
static int option_no_output = 0; /* do not write output */
static const char *option_source_filename = NULL;
static int option_level = XD3_DEFAULT_LEVEL;
static usize_t option_iopt_size = XD3_DEFAULT_IOPT_SIZE;
static usize_t option_winsize = XD3_DEFAULT_WINSIZE;
/* Note: option_srcwinsz is restricted from [16Kb, 4Gb], because
* addresses in the large hash checksum are 32 bits. The flag is read
* as xoff_t, so that 4Gb != 0. */
static xoff_t option_srcwinsz = XD3_DEFAULT_SRCWINSZ;
static usize_t option_sprevsz = XD3_DEFAULT_SPREVSZ;
/* These variables are supressed to avoid their use w/o support. main() warns
* appropriately when external compression is not enabled. */
#if EXTERNAL_COMPRESSION
static int num_subprocs = 0;
static int option_force2 = 0;
static int option_decompress_inputs = 1;
static int option_recompress_outputs = 1;
#endif
/* This is for comparing "printdelta" output without attention to
* copy-instruction modes. */
#if VCDIFF_TOOLS
static int option_print_cpymode = 1; /* Note: see reset_defaults(). */
#endif
/* Static variables */
IF_DEBUG(static int main_mallocs = 0;)
static char* program_name = NULL;
static uint8_t* appheader_used = NULL;
static uint8_t* main_bdata = NULL;
static usize_t main_bsize = 0;
/* Hacks for VCDIFF tools, recode command. */
static int allow_fake_source = 0;
/* recode_stream is used by both recode/merge for reading vcdiff inputs */
static xd3_stream *recode_stream = NULL;
/* merge_stream is used by merge commands for storing the source encoding */
static xd3_stream *merge_stream = NULL;
/* This array of compressor types is compiled even if EXTERNAL_COMPRESSION is
* false just so the program knows the mapping of IDENT->NAME. */
static main_extcomp extcomp_types[] =
{
{ "bzip2", "-c", "bzip2", "-dc", "B", "BZh", 3, 0 },
{ "gzip", "-c", "gzip", "-dc", "G", "\037\213", 2, 0 },
{ "compress", "-c", "uncompress", "-c", "Z", "\037\235", 2, 0 },
/* Xz is lzma with a magic number http://tukaani.org/xz/format.html */
{ "xz", "-c", "xz", "-dc", "Y", "\xfd\x37\x7a\x58\x5a\x00", 2, 0 },
};
static int main_input (xd3_cmd cmd, main_file *ifile,
main_file *ofile, main_file *sfile);
static void main_get_appheader (xd3_stream *stream, main_file *ifile,
main_file *output, main_file *sfile);
static int main_getblk_func (xd3_stream *stream,
xd3_source *source,
xoff_t blkno);
static void main_free (void *ptr);
static void* main_malloc (size_t size);
static int main_file_stat (main_file *xfile, xoff_t *size);
static int main_file_seek (main_file *xfile, xoff_t pos);
static int main_read_primary_input (main_file *file,
uint8_t *buf,
size_t size,
size_t *nread);
static const char* main_format_bcnt (xoff_t r, shortbuf *buf);
static int main_help (void);
#if XD3_ENCODER
static int xd3_merge_input_output (xd3_stream *stream,
xd3_whole_state *source);
#endif
/* The code in xdelta3-blk.h is essentially part of this unit, see
* comments there. */
#include "xdelta3-blkcache.h"
void (*xprintf_message_func)(const char*msg) = NULL;
void
xprintf (const char *fmt, ...)
{
char buf[1000];
va_list a;
int size;
va_start (a, fmt);
size = vsnprintf_func (buf, 1000, fmt, a);
va_end (a);
if (size < 0)
{
size = sizeof(buf) - 1;
buf[size] = 0;
}
if (xprintf_message_func != NULL) {
xprintf_message_func(buf);
} else {
size_t ignore = fwrite(buf, 1, size, stderr);
(void) ignore;
}
}
static int
main_version (void)
{
/* $Format: " XPR(NTR \"Xdelta version $Xdelta3Version$, Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, Joshua MacDonald\\n\");" $ */
XPR(NTR "Xdelta version 3.0.8, Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Joshua MacDonald\n");
XPR(NTR "Xdelta comes with ABSOLUTELY NO WARRANTY.\n");
XPR(NTR "This is free software, and you are welcome to redistribute it\n");
XPR(NTR "under certain conditions; see \"COPYING\" for details.\n");
return EXIT_SUCCESS;
}
static int
main_config (void)
{
main_version ();
XPR(NTR "EXTERNAL_COMPRESSION=%d\n", EXTERNAL_COMPRESSION);
XPR(NTR "GENERIC_ENCODE_TABLES=%d\n", GENERIC_ENCODE_TABLES);
XPR(NTR "GENERIC_ENCODE_TABLES_COMPUTE=%d\n", GENERIC_ENCODE_TABLES_COMPUTE);
XPR(NTR "REGRESSION_TEST=%d\n", REGRESSION_TEST);
XPR(NTR "SECONDARY_DJW=%d\n", SECONDARY_DJW);
XPR(NTR "SECONDARY_FGK=%d\n", SECONDARY_FGK);
XPR(NTR "SECONDARY_LZMA=%d\n", SECONDARY_LZMA);
XPR(NTR "UNALIGNED_OK=%d\n", UNALIGNED_OK);
XPR(NTR "VCDIFF_TOOLS=%d\n", VCDIFF_TOOLS);
XPR(NTR "XD3_ALLOCSIZE=%d\n", XD3_ALLOCSIZE);
XPR(NTR "XD3_DEBUG=%d\n", XD3_DEBUG);
XPR(NTR "XD3_ENCODER=%d\n", XD3_ENCODER);
XPR(NTR "XD3_POSIX=%d\n", XD3_POSIX);
XPR(NTR "XD3_STDIO=%d\n", XD3_STDIO);
XPR(NTR "XD3_WIN32=%d\n", XD3_WIN32);
XPR(NTR "XD3_USE_LARGEFILE64=%d\n", XD3_USE_LARGEFILE64);
XPR(NTR "XD3_DEFAULT_LEVEL=%d\n", XD3_DEFAULT_LEVEL);
XPR(NTR "XD3_DEFAULT_IOPT_SIZE=%d\n", XD3_DEFAULT_IOPT_SIZE);
XPR(NTR "XD3_DEFAULT_SPREVSZ=%d\n", XD3_DEFAULT_SPREVSZ);
XPR(NTR "XD3_DEFAULT_SRCWINSZ=%d\n", XD3_DEFAULT_SRCWINSZ);
XPR(NTR "XD3_DEFAULT_WINSIZE=%d\n", XD3_DEFAULT_WINSIZE);
XPR(NTR "XD3_HARDMAXWINSIZE=%d\n", XD3_HARDMAXWINSIZE);
XPR(NTR "sizeof(void*)=%d\n", (int)sizeof(void*));
XPR(NTR "sizeof(int)=%d\n", (int)sizeof(int));
XPR(NTR "sizeof(size_t)=%d\n", (int)sizeof(size_t));
XPR(NTR "sizeof(uint32_t)=%d\n", (int)sizeof(uint32_t));
XPR(NTR "sizeof(uint64_t)=%d\n", (int)sizeof(uint64_t));
XPR(NTR "sizeof(usize_t)=%d\n", (int)sizeof(usize_t));
XPR(NTR "sizeof(xoff_t)=%d\n", (int)sizeof(xoff_t));
return EXIT_SUCCESS;
}
static void
reset_defaults(void)
{
option_stdout = 0;
option_force = 0;
option_verbose = DEFAULT_VERBOSE;
option_quiet = 0;
option_appheader = NULL;
option_use_secondary = 0;
option_secondary = NULL;
option_use_altcodetable = 0;
option_smatch_config = NULL;
option_no_compress = 0;
option_no_output = 0;
option_source_filename = NULL;
program_name = NULL;
appheader_used = NULL;
main_bdata = NULL;
main_bsize = 0;
allow_fake_source = 0;
option_smatch_config = NULL;
main_lru_reset();
option_use_appheader = 1;
option_use_checksum = 1;
#if EXTERNAL_COMPRESSION
option_force2 = 0;
option_decompress_inputs = 1;
option_recompress_outputs = 1;
num_subprocs = 0;
#endif
#if VCDIFF_TOOLS
option_print_cpymode = 1;
#endif
option_level = XD3_DEFAULT_LEVEL;
option_iopt_size = XD3_DEFAULT_IOPT_SIZE;
option_winsize = XD3_DEFAULT_WINSIZE;
option_srcwinsz = XD3_DEFAULT_SRCWINSZ;
option_sprevsz = XD3_DEFAULT_SPREVSZ;
}
static void*
main_malloc1 (size_t size)
{
void* r = malloc (size);
if (r == NULL) { XPR(NT "malloc: %s\n", xd3_mainerror (ENOMEM)); }
return r;
}
void* main_bufalloc (size_t size) {
#if XD3_WIN32
return VirtualAlloc(NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
#else
return main_malloc1(size);
#endif
}
static void*
main_malloc (size_t size)
{
void *r = main_malloc1 (size);
if (r) { IF_DEBUG (main_mallocs += 1); }
return r;
}
static void*
main_alloc (void *opaque,
size_t items,
usize_t size)
{
return main_malloc1 (items * size);
}
static void
main_free1 (void *opaque, void *ptr)
{
free (ptr);
}
static void
main_free (void *ptr)
{
if (ptr)
{
IF_DEBUG (main_mallocs -= 1);
main_free1 (NULL, ptr);
IF_DEBUG (XD3_ASSERT(main_mallocs >= 0));
}
}
void main_buffree (void *ptr) {
#if XD3_WIN32
VirtualFree(ptr, 0, MEM_RELEASE);
#else
main_free1(NULL, ptr);
#endif
}
/* This ensures that (ret = errno) always indicates failure, in case errno was
* accidentally not set. If this prints there's a bug somewhere. */
static int
get_errno (void)
{
#ifndef _WIN32
if (errno == 0)
{
XPR(NT "you found a bug: expected errno != 0\n");
errno = XD3_INTERNAL;
}
return errno;
#else
DWORD err_num = GetLastError();
if (err_num == NO_ERROR)
{
err_num = XD3_INTERNAL;
}
return err_num;
#endif
}
const char*
xd3_mainerror(int err_num) {
#ifndef _WIN32
const char* x = xd3_strerror (err_num);
if (x != NULL)
{
return x;
}
return strerror(err_num);
#else
static char err_buf[256];
const char* x = xd3_strerror (err_num);
if (x != NULL)
{
return x;
}
memset (err_buf, 0, 256);
FormatMessage (FORMAT_MESSAGE_FROM_SYSTEM |
FORMAT_MESSAGE_IGNORE_INSERTS,
NULL, err_num,
MAKELANGID (LANG_NEUTRAL, SUBLANG_DEFAULT),
err_buf, 256, NULL);
if (err_buf[0] != 0 && err_buf[strlen(err_buf) - 1] == '\n')
{
err_buf[strlen(err_buf) - 1] = 0;
}
return err_buf;
#endif
}
static long
get_millisecs_now (void)
{
#ifndef _WIN32
struct timeval tv;
gettimeofday (& tv, NULL);
return (tv.tv_sec) * 1000L + (tv.tv_usec) / 1000;
#else
SYSTEMTIME st;
FILETIME ft;
__int64 *pi = (__int64*)&ft;
GetLocalTime(&st);
SystemTimeToFileTime(&st, &ft);
return (long)((*pi) / 10000);
#endif
}
/* Always >= 1 millisec, right? */
static long
get_millisecs_since (void)
{
static long last = 0;
long now = get_millisecs_now();
long diff = now - last;
last = now;
return diff;
}
static const char*
main_format_bcnt (xoff_t r, shortbuf *buf)
{
static const char* fmts[] = { "B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB" };
usize_t i;
for (i = 0; i < SIZEOF_ARRAY(fmts) - 1; i += 1)
{
xoff_t new_r;
if (r == 0)
{
short_sprintf (*buf, "0 %s", fmts[i]);
return buf->buf;
}
if (r >= 1 && r < 10)
{
short_sprintf (*buf, "%.2f %s", (double) r, fmts[i]);
return buf->buf;
}
if (r >= 10 && r < 100)
{
short_sprintf (*buf, "%.1f %s", (double) r, fmts[i]);
return buf->buf;
}
if (r >= 100 && r < 1000)
{
short_sprintf (*buf, "%"Q"u %s", r, fmts[i]);
return buf->buf;
}
new_r = r / 1024;
if (new_r < 10)
{
short_sprintf (*buf, "%.2f %s", (double) r / 1024.0, fmts[i + 1]);
return buf->buf;
}
if (new_r < 100)
{
short_sprintf (*buf, "%.1f %s", (double) r / 1024.0, fmts[i + 1]);
return buf->buf;
}
r = new_r;
}
XD3_ASSERT (0);
return "";
}
static char*
main_format_rate (xoff_t bytes, long millis, shortbuf *buf)
{
xoff_t r = (xoff_t)(1.0 * bytes / (1.0 * millis / 1000.0));
static shortbuf lbuf;
main_format_bcnt (r, &lbuf);
short_sprintf (*buf, "%s/s", lbuf.buf);
return buf->buf;
}
static char*
main_format_millis (long millis, shortbuf *buf)
{
if (millis < 1000)
{
short_sprintf (*buf, "%lu ms", millis);
}
else if (millis < 10000)
{
short_sprintf (*buf, "%.1f sec", millis / 1000.0);
}
else
{
short_sprintf (*buf, "%lu sec", millis / 1000L);
}
return buf->buf;
}
/* A safe version of strtol for xoff_t. */
static int
main_strtoxoff (const char* s, xoff_t *xo, char which)
{
char *e;
xoff_t x;
XD3_ASSERT(s && *s != 0);
{
/* Should check LONG_MIN, LONG_MAX, LLONG_MIN, LLONG_MAX? */
#if SIZEOF_XOFF_T == 4
long xx = strtol (s, &e, 0);
#else
long long xx = strtoll (s, &e, 0);
#endif
if (xx < 0)
{
XPR(NT "-%c: negative integer: %s\n", which, s);
return EXIT_FAILURE;
}
x = xx;
}
if (*e != 0)
{
XPR(NT "-%c: invalid integer: %s\n", which, s);
return EXIT_FAILURE;
}
(*xo) = x;
return 0;
}
static int
main_atoux (const char* arg, xoff_t *xo, xoff_t low,
xoff_t high, char which)
{
xoff_t x;
int ret;
if ((ret = main_strtoxoff (arg, & x, which))) { return ret; }
if (x < low)
{
XPR(NT "-%c: minimum value: %"Q"u\n", which, low);
return EXIT_FAILURE;
}
if (high != 0 && x > high)
{
XPR(NT "-%c: maximum value: %"Q"u\n", which, high);
return EXIT_FAILURE;
}
(*xo) = x;
return 0;
}
static int
main_atou (const char* arg, usize_t *uo, usize_t low,
usize_t high, char which)
{
int ret;
xoff_t xo;
if ((ret = main_atoux (arg, &xo, low, high, which)))
{
return ret;
}
*uo = (usize_t)xo;
return 0;
}
/******************************************************************
FILE BASICS
******************************************************************/
/* With all the variation in file system-call semantics, arguments,
* return values and error-handling for the POSIX and STDIO file APIs,
* the insides of these functions make me sick, which is why these
* wrappers exist. */
#define XOPEN_OPNAME (xfile->mode == XO_READ ? "read" : "write")
#define XOPEN_STDIO (xfile->mode == XO_READ ? "rb" : "wb")
#define XOPEN_POSIX (xfile->mode == XO_READ ? \
O_RDONLY : O_WRONLY | O_CREAT | O_TRUNC)
#define XOPEN_MODE (xfile->mode == XO_READ ? 0 : 0666)
#define XF_ERROR(op, name, ret) \
do { if (!option_quiet) { XPR(NT "file %s failed: %s: %s: %s\n", (op), \
XOPEN_OPNAME, (name), xd3_mainerror (ret)); } } while (0)
#if XD3_STDIO
#define XFNO(f) fileno(f->file)
#define XSTDOUT_XF(f) { (f)->file = stdout; (f)->filename = "/dev/stdout"; }
#define XSTDIN_XF(f) { (f)->file = stdin; (f)->filename = "/dev/stdin"; }
#elif XD3_POSIX
#define XFNO(f) f->file
#define XSTDOUT_XF(f) \
{ (f)->file = STDOUT_FILENO; (f)->filename = "/dev/stdout"; }
#define XSTDIN_XF(f) \
{ (f)->file = STDIN_FILENO; (f)->filename = "/dev/stdin"; }
#elif XD3_WIN32
#define XFNO(f) -1
#define XSTDOUT_XF(f) { \
(f)->file = GetStdHandle(STD_OUTPUT_HANDLE); \
(f)->filename = "(stdout)"; \
}
#define XSTDIN_XF(f) { \
(f)->file = GetStdHandle(STD_INPUT_HANDLE); \
(f)->filename = "(stdin)"; \
}
#endif
void
main_file_init (main_file *xfile)
{
memset (xfile, 0, sizeof (*xfile));
#if XD3_POSIX
xfile->file = -1;
#endif
#if XD3_WIN32
xfile->file = INVALID_HANDLE_VALUE;
#endif
}
int
main_file_isopen (main_file *xfile)
{
#if XD3_STDIO
return xfile->file != NULL;
#elif XD3_POSIX
return xfile->file != -1;
#elif XD3_WIN32
return xfile->file != INVALID_HANDLE_VALUE;
#endif
}
int
main_file_close (main_file *xfile)
{
int ret = 0;
if (! main_file_isopen (xfile))
{
return 0;
}
#if XD3_STDIO
ret = fclose (xfile->file);
xfile->file = NULL;
#elif XD3_POSIX
ret = close (xfile->file);
xfile->file = -1;
#elif XD3_WIN32
if (!CloseHandle(xfile->file)) {
ret = get_errno ();
}
xfile->file = INVALID_HANDLE_VALUE;
#endif
if (ret != 0) { XF_ERROR ("close", xfile->filename, ret = get_errno ()); }
return ret;
}
void
main_file_cleanup (main_file *xfile)
{
XD3_ASSERT (xfile != NULL);
if (main_file_isopen (xfile))
{
main_file_close (xfile);
}
if (xfile->snprintf_buf != NULL)
{
main_free(xfile->snprintf_buf);
xfile->snprintf_buf = NULL;
}
if (xfile->filename_copy != NULL)
{
main_free(xfile->filename_copy);
xfile->filename_copy = NULL;
}
}
int
main_file_open (main_file *xfile, const char* name, int mode)
{
int ret = 0;
xfile->mode = mode;
XD3_ASSERT (name != NULL);
XD3_ASSERT (! main_file_isopen (xfile));
if (name[0] == 0)
{
XPR(NT "invalid file name: empty string\n");
return XD3_INVALID;
}
#if XD3_STDIO
xfile->file = fopen (name, XOPEN_STDIO);
ret = (xfile->file == NULL) ? get_errno () : 0;
#elif XD3_POSIX
/* TODO: Should retry this call if interrupted, similar to read/write */
if ((ret = open (name, XOPEN_POSIX, XOPEN_MODE)) < 0)
{
ret = get_errno ();
}
else
{
xfile->file = ret;
ret = 0;
}
#elif XD3_WIN32
xfile->file = CreateFile(name,
(mode == XO_READ) ? GENERIC_READ : GENERIC_WRITE,
FILE_SHARE_READ,
NULL,
(mode == XO_READ) ?
OPEN_EXISTING :
(option_force ? CREATE_ALWAYS : CREATE_NEW),
FILE_ATTRIBUTE_NORMAL,
NULL);
if (xfile->file == INVALID_HANDLE_VALUE)
{
ret = get_errno ();
}
#endif
if (ret) { XF_ERROR ("open", name, ret); }
else { xfile->realname = name; xfile->nread = 0; }
return ret;
}
int
main_file_stat (main_file *xfile, xoff_t *size)
{
int ret = 0;
#if XD3_WIN32
if (GetFileType(xfile->file) != FILE_TYPE_DISK)
{
return -1;
}
# if (_WIN32_WINNT >= 0x0500)
{
LARGE_INTEGER li;
if (GetFileSizeEx(xfile->file, &li) == 0)
{
return get_errno ();
}
*size = li.QuadPart;
}
# else
{
DWORD filesize = GetFileSize(xfile->file, NULL);
if (filesize == INVALID_FILE_SIZE)
{
return get_errno ()
}
*size = filesize;
}
# endif
#else
struct stat sbuf;
if (fstat (XFNO (xfile), & sbuf) < 0)
{
ret = get_errno ();
return ret;
}
if (! S_ISREG (sbuf.st_mode))
{
return ESPIPE;
}
(*size) = sbuf.st_size;
#endif
return ret;
}
int
main_file_exists (main_file *xfile)
{
struct stat sbuf;
return stat (xfile->filename, & sbuf) == 0 && S_ISREG (sbuf.st_mode);
}
#if (XD3_POSIX || EXTERNAL_COMPRESSION)
/* POSIX-generic code takes a function pointer to read() or write().
* This calls the function repeatedly until the buffer is full or EOF.
* The NREAD parameter is not set for write, NULL is passed. Return
* is signed, < 0 indicate errors, otherwise byte count. */
typedef int (xd3_posix_func) (int fd, uint8_t *buf, usize_t size);
static int
xd3_posix_io (int fd, uint8_t *buf, size_t size,
xd3_posix_func *func, size_t *nread)
{
int ret;
size_t nproc = 0;
while (nproc < size)
{
size_t tryread = min(size - nproc, 1U << 30);
ssize_t result = (*func) (fd, buf + nproc, tryread);
if (result < 0)
{
ret = get_errno ();
if (ret != EAGAIN && ret != EINTR)
{
return ret;
}
continue;
}
if (nread != NULL && result == 0) { break; }
nproc += result;
}
if (nread != NULL) { (*nread) = nproc; }
return 0;
}
#endif
#if XD3_WIN32
static int
xd3_win32_io (HANDLE file, uint8_t *buf, size_t size,
int is_read, size_t *nread)
{
int ret = 0;
size_t nproc = 0;
while (nproc < size)
{
DWORD nproc2 = 0; /* hmm */
DWORD nremain = size - nproc;
if ((is_read ?
ReadFile (file, buf + nproc, nremain, &nproc2, NULL) :
WriteFile (file, buf + nproc, nremain, &nproc2, NULL)) == 0)
{
ret = get_errno();
if (ret != ERROR_HANDLE_EOF && ret != ERROR_BROKEN_PIPE)
{
return ret;
}
/* By falling through here, we'll break this loop in the
* read case in case of eof or broken pipe. */
}
nproc += nproc2;
if (nread != NULL && nproc2 == 0) { break; }
}
if (nread != NULL) { (*nread) = nproc; }
return 0;
}
#endif
/* POSIX is unbuffered, while STDIO is buffered. main_file_read()
* should always be called on blocks. */
int
main_file_read (main_file *ifile,
uint8_t *buf,
size_t size,
size_t *nread,
const char *msg)
{
int ret = 0;
#if XD3_STDIO
size_t result;
result = fread (buf, 1, size, ifile->file);
if (result < size && ferror (ifile->file))
{
ret = get_errno ();
}
else
{
*nread = result;
}
#elif XD3_POSIX
ret = xd3_posix_io (ifile->file, buf, size, (xd3_posix_func*) &read, nread);
#elif XD3_WIN32
ret = xd3_win32_io (ifile->file, buf, size, 1 /* is_read */, nread);
#endif
if (ret)
{
XPR(NT "%s: %s: %s\n", msg, ifile->filename, xd3_mainerror (ret));
}
else
{
if (option_verbose > 4) { XPR(NT "read %s: %zu bytes\n",
ifile->filename, (*nread)); }
ifile->nread += (*nread);
}
return ret;
}
int
main_file_write (main_file *ofile, uint8_t *buf, usize_t size, const char *msg)
{
int ret = 0;
#if XD3_STDIO
usize_t result;
result = fwrite (buf, 1, size, ofile->file);
if (result != size) { ret = get_errno (); }
#elif XD3_POSIX
ret = xd3_posix_io (ofile->file, buf, size, (xd3_posix_func*) &write, NULL);
#elif XD3_WIN32
ret = xd3_win32_io (ofile->file, buf, size, 0, NULL);
#endif
if (ret)
{
XPR(NT "%s: %s: %s\n", msg, ofile->filename, xd3_mainerror (ret));
}
else
{
if (option_verbose > 5) { XPR(NT "write %s: %u bytes\n",
ofile->filename, size); }
ofile->nwrite += size;
}
return ret;
}
static int
main_file_seek (main_file *xfile, xoff_t pos)
{
int ret = 0;
#if XD3_STDIO
if (fseek (xfile->file, pos, SEEK_SET) != 0) { ret = get_errno (); }
#elif XD3_POSIX
if ((xoff_t) lseek (xfile->file, pos, SEEK_SET) != pos)
{ ret = get_errno (); }
#elif XD3_WIN32
# if (_WIN32_WINNT >= 0x0500)
LARGE_INTEGER move, out;
move.QuadPart = pos;
if (SetFilePointerEx(xfile->file, move, &out, FILE_BEGIN) == 0)
{
ret = get_errno ();
}
# else
if (SetFilePointer(xfile->file, (LONG)pos, NULL, FILE_BEGIN) ==
INVALID_SET_FILE_POINTER)
{
ret = get_errno ();
}
# endif
#endif
return ret;
}
/* This function simply writes the stream output buffer, if there is
* any, for encode, decode and recode commands. (The VCDIFF tools use
* main_print_func()). */
static int
main_write_output (xd3_stream* stream, main_file *ofile)
{
int ret;
if (option_no_output)
{
return 0;
}
if (stream->avail_out > 0 &&
(ret = main_file_write (ofile, stream->next_out,
stream->avail_out, "write failed")))
{
return ret;
}
return 0;
}
static int
main_set_secondary_flags (xd3_config *config)
{
int ret;
if (option_use_secondary)
{
/* The default secondary compressor is DJW, if it's compiled. */
if (option_secondary == NULL)
{
if (SECONDARY_DJW)
{
config->flags |= XD3_SEC_DJW;
}
}
else
{
if (strcmp (option_secondary, "fgk") == 0 && SECONDARY_FGK)
{
config->flags |= XD3_SEC_FGK;
}
else if (strcmp (option_secondary, "lzma") == 0 && SECONDARY_LZMA)
{
config->flags |= XD3_SEC_LZMA;
}
else if (strncmp (option_secondary, "djw", 3) == 0 && SECONDARY_DJW)
{
usize_t level = XD3_DEFAULT_SECONDARY_LEVEL;
config->flags |= XD3_SEC_DJW;
if (strlen (option_secondary) > 3 &&
(ret = main_atou (option_secondary + 3,
&level,
0, 9, 'S')) != 0 &&
!option_quiet)
{
return XD3_INVALID;
}
/* XD3_SEC_NOXXXX flags disable secondary compression on
* a per-section basis. For djw, ngroups=1 indicates
* minimum work, ngroups=0 uses default settings, which
* is > 1 groups by default. */
if (level < 1) { config->flags |= XD3_SEC_NODATA; }
if (level < 7) { config->sec_data.ngroups = 1; }
else { config->sec_data.ngroups = 0; }
if (level < 3) { config->flags |= XD3_SEC_NOINST; }
if (level < 8) { config->sec_inst.ngroups = 1; }
else { config->sec_inst.ngroups = 0; }
if (level < 5) { config->flags |= XD3_SEC_NOADDR; }
if (level < 9) { config->sec_addr.ngroups = 1; }
else { config->sec_addr.ngroups = 0; }
}
else if (strcmp (option_secondary, "none") == 0 && SECONDARY_DJW)
{
/* No secondary */
}
else
{
if (!option_quiet)
{
XPR(NT "unrecognized secondary compressor type: %s\n",
option_secondary);
return XD3_INVALID;
}
}
}
}
return 0;
}
/******************************************************************
VCDIFF TOOLS
*****************************************************************/
#if VCDIFF_TOOLS
#include "xdelta3-merge.h"
/* The following macros let VCDIFF print using main_file_write(),
* for example:
*
* VC(UT "trying to be portable: %d\n", x)VE;
*/
#define SNPRINTF_BUFSIZE 1024
#define VC do { if (((ret = xsnprintf_func
#define UT (char*)xfile->snprintf_buf, SNPRINTF_BUFSIZE,
#define VE ) >= SNPRINTF_BUFSIZE \
&& (ret = main_print_overflow(ret)) != 0) \
|| (ret = main_file_write(xfile, xfile->snprintf_buf, \
(usize_t)ret, "print")) != 0) \
{ return ret; } } while (0)
static int
main_print_overflow (int x)
{
XPR(NT "internal print buffer overflow: %d bytes\n", x);
return XD3_INTERNAL;
}
/* This function prints a single VCDIFF window. */
static int
main_print_window (xd3_stream* stream, main_file *xfile)
{
int ret;
usize_t size = 0;
VC(UT " Offset Code Type1 Size1 @Addr1 + Type2 Size2 @Addr2\n")VE;
while (stream->inst_sect.buf < stream->inst_sect.buf_max)
{
usize_t code = stream->inst_sect.buf[0];
const uint8_t *addr_before = stream->addr_sect.buf;
const uint8_t *inst_before = stream->inst_sect.buf;
usize_t addr_bytes;
usize_t inst_bytes;
usize_t size_before = size;
if ((ret = xd3_decode_instruction (stream)))
{
XPR(NT "instruction decode error at %"Q"u: %s\n",
stream->dec_winstart + size, stream->msg);
return ret;
}
addr_bytes = (usize_t)(stream->addr_sect.buf - addr_before);
inst_bytes = (usize_t)(stream->inst_sect.buf - inst_before);
VC(UT " %06"Q"u %03u %s %6u", stream->dec_winstart + size,
option_print_cpymode ? code : 0,
xd3_rtype_to_string ((xd3_rtype) stream->dec_current1.type,
option_print_cpymode),
stream->dec_current1.size)VE;
if (stream->dec_current1.type != XD3_NOOP)
{
if (stream->dec_current1.type >= XD3_CPY)
{
if (stream->dec_current1.addr >= stream->dec_cpylen)
{
VC(UT " T@%-6u",
stream->dec_current1.addr - stream->dec_cpylen)VE;
}
else
{
VC(UT " S@%-6"Q"u",
stream->dec_cpyoff + stream->dec_current1.addr)VE;
}
}
else
{
VC(UT " ")VE;
}
size += stream->dec_current1.size;
}
if (stream->dec_current2.type != XD3_NOOP)
{
VC(UT " %s %6u",
xd3_rtype_to_string ((xd3_rtype) stream->dec_current2.type,
option_print_cpymode),
stream->dec_current2.size)VE;
if (stream->dec_current2.type >= XD3_CPY)
{
if (stream->dec_current2.addr >= stream->dec_cpylen)
{
VC(UT " T@%-6u",
stream->dec_current2.addr - stream->dec_cpylen)VE;
}
else
{
VC(UT " S@%-6"Q"u",
stream->dec_cpyoff + stream->dec_current2.addr)VE;
}
}
size += stream->dec_current2.size;
}
VC(UT "\n")VE;
if (option_verbose &&
addr_bytes + inst_bytes >= (size - size_before) &&
(stream->dec_current1.type >= XD3_CPY ||
stream->dec_current2.type >= XD3_CPY))
{
VC(UT " %06"Q"u (inefficiency) %u encoded as %u bytes\n",
stream->dec_winstart + size_before,
size - size_before,
addr_bytes + inst_bytes)VE;
}
}
if (stream->dec_tgtlen != size && (stream->flags & XD3_SKIP_WINDOW) == 0)
{
XPR(NT "target window size inconsistency");
return XD3_INTERNAL;
}
if (stream->dec_position != stream->dec_maxpos)
{
XPR(NT "target window position inconsistency");
return XD3_INTERNAL;
}
if (stream->addr_sect.buf != stream->addr_sect.buf_max)
{
XPR(NT "address section inconsistency");
return XD3_INTERNAL;
}
return 0;
}
static int
main_print_vcdiff_file (main_file *xfile, main_file *file, const char *type)
{
int ret; /* Used by above macros */
if (file->filename)
{
VC(UT "XDELTA filename (%s): %s\n", type,
file->filename)VE;
}
if (file->compressor)
{
VC(UT "XDELTA ext comp (%s): %s\n", type,
file->compressor->recomp_cmdname)VE;
}
return 0;
}
/* This function prints a VCDIFF input, mainly for debugging purposes. */
static int
main_print_func (xd3_stream* stream, main_file *xfile)
{
int ret;
if (option_no_output)
{
return 0;
}
if (xfile->snprintf_buf == NULL)
{
if ((xfile->snprintf_buf =
(uint8_t*)main_malloc(SNPRINTF_BUFSIZE)) == NULL)
{
return ENOMEM;
}
}
if (stream->dec_winstart == 0)
{
VC(UT "VCDIFF version: 0\n")VE;
VC(UT "VCDIFF header size: %d\n",
stream->dec_hdrsize)VE;
VC(UT "VCDIFF header indicator: ")VE;
if ((stream->dec_hdr_ind & VCD_SECONDARY) != 0)
VC(UT "VCD_SECONDARY ")VE;
if ((stream->dec_hdr_ind & VCD_CODETABLE) != 0)
VC(UT "VCD_CODETABLE ")VE;
if ((stream->dec_hdr_ind & VCD_APPHEADER) != 0)
VC(UT "VCD_APPHEADER ")VE;
if (stream->dec_hdr_ind == 0)
VC(UT "none")VE;
VC(UT "\n")VE;
IF_SEC(VC(UT "VCDIFF secondary compressor: %s\n",
stream->sec_type ? stream->sec_type->name : "none")VE);
IF_NSEC(VC(UT "VCDIFF secondary compressor: unsupported\n")VE);
if (stream->dec_hdr_ind & VCD_APPHEADER)
{
uint8_t *apphead;
usize_t appheadsz;
ret = xd3_get_appheader (stream, & apphead, & appheadsz);
if (ret == 0 && appheadsz > 0)
{
int sq = option_quiet;
main_file i, o, s;
XD3_ASSERT (apphead != NULL);
VC(UT "VCDIFF application header: ")VE;
if ((ret = main_file_write (xfile, apphead,
appheadsz, "print")) != 0)
{ return ret; }
VC(UT "\n")VE;
main_file_init (& i);
main_file_init (& o);
main_file_init (& s);
option_quiet = 1;
main_get_appheader (stream, &i, & o, & s);
option_quiet = sq;
if ((ret = main_print_vcdiff_file (xfile, & o, "output")))
{ return ret; }
if ((ret = main_print_vcdiff_file (xfile, & s, "source")))
{ return ret; }
main_file_cleanup (& i);
main_file_cleanup (& o);
main_file_cleanup (& s);
}
}
}
else
{
VC(UT "\n")VE;
}
VC(UT "VCDIFF window number: %"Q"u\n", stream->current_window)VE;
VC(UT "VCDIFF window indicator: ")VE;
if ((stream->dec_win_ind & VCD_SOURCE) != 0) VC(UT "VCD_SOURCE ")VE;
if ((stream->dec_win_ind & VCD_TARGET) != 0) VC(UT "VCD_TARGET ")VE;
if ((stream->dec_win_ind & VCD_ADLER32) != 0) VC(UT "VCD_ADLER32 ")VE;
if (stream->dec_win_ind == 0) VC(UT "none")VE;
VC(UT "\n")VE;
if ((stream->dec_win_ind & VCD_ADLER32) != 0)
{
VC(UT "VCDIFF adler32 checksum: %08X\n",
(usize_t)stream->dec_adler32)VE;
}
if (stream->dec_del_ind != 0)
{
VC(UT "VCDIFF delta indicator: ")VE;
if ((stream->dec_del_ind & VCD_DATACOMP) != 0) VC(UT "VCD_DATACOMP ")VE;
if ((stream->dec_del_ind & VCD_INSTCOMP) != 0) VC(UT "VCD_INSTCOMP ")VE;
if ((stream->dec_del_ind & VCD_ADDRCOMP) != 0) VC(UT "VCD_ADDRCOMP ")VE;
if (stream->dec_del_ind == 0) VC(UT "none")VE;
VC(UT "\n")VE;
}
if (stream->dec_winstart != 0)
{
VC(UT "VCDIFF window at offset: %"Q"u\n", stream->dec_winstart)VE;
}
if (SRCORTGT (stream->dec_win_ind))
{
VC(UT "VCDIFF copy window length: %u\n",
(usize_t)stream->dec_cpylen)VE;
VC(UT "VCDIFF copy window offset: %"Q"u\n",
stream->dec_cpyoff)VE;
}
VC(UT "VCDIFF delta encoding length: %u\n",
(usize_t)stream->dec_enclen)VE;
VC(UT "VCDIFF target window length: %u\n",
(usize_t)stream->dec_tgtlen)VE;
VC(UT "VCDIFF data section length: %u\n",
(usize_t)stream->data_sect.size)VE;
VC(UT "VCDIFF inst section length: %u\n",
(usize_t)stream->inst_sect.size)VE;
VC(UT "VCDIFF addr section length: %u\n",
(usize_t)stream->addr_sect.size)VE;
ret = 0;
if ((stream->flags & XD3_JUST_HDR) != 0)
{
/* Print a header -- finished! */
ret = PRINTHDR_SPECIAL;
}
else if ((stream->flags & XD3_SKIP_WINDOW) == 0)
{
ret = main_print_window (stream, xfile);
}
return ret;
}
static int
main_recode_copy (xd3_stream* stream,
xd3_output* output,
xd3_desect* input)
{
int ret;
XD3_ASSERT(output != NULL);
XD3_ASSERT(output->next_page == NULL);
if ((ret = xd3_decode_allocate (recode_stream,
input->size,
&output->base,
&output->avail)))
{
XPR(NT XD3_LIB_ERRMSG (stream, ret));
return ret;
}
memcpy (output->base,
/* Note: decoder advances buf, so get base of buffer with
* buf_max - size */
input->buf_max - input->size,
input->size);
output->next = input->size;
return 0;
}
// Re-encode one window
static int
main_recode_func (xd3_stream* stream, main_file *ofile)
{
int ret;
xd3_source decode_source;
XD3_ASSERT(stream->dec_state == DEC_FINISH);
XD3_ASSERT(recode_stream->enc_state == ENC_INIT ||
recode_stream->enc_state == ENC_INPUT);
// Copy partial decoder output to partial encoder inputs
if ((ret = main_recode_copy (recode_stream,
DATA_HEAD(recode_stream),
&stream->data_sect)) ||
(ret = main_recode_copy (recode_stream,
INST_HEAD(recode_stream),
&stream->inst_sect)) ||
(ret = main_recode_copy (recode_stream,
ADDR_HEAD(recode_stream),
&stream->addr_sect)))
{
return ret;
}
// This jumps to xd3_emit_hdr()
recode_stream->enc_state = ENC_FLUSH;
recode_stream->avail_in = stream->dec_tgtlen;
if (SRCORTGT (stream->dec_win_ind))
{
recode_stream->src = & decode_source;
decode_source.srclen = stream->dec_cpylen;
decode_source.srcbase = stream->dec_cpyoff;
}
if (option_use_checksum &&
(stream->dec_win_ind & VCD_ADLER32) != 0)
{
recode_stream->flags |= XD3_ADLER32_RECODE;
recode_stream->recode_adler32 = stream->dec_adler32;
}
if (option_use_appheader != 0 &&
option_appheader != NULL)
{
xd3_set_appheader (recode_stream, option_appheader,
(usize_t) strlen ((char*) option_appheader));
}
else if (option_use_appheader != 0 &&
option_appheader == NULL)
{
if (stream->dec_appheader != NULL)
{
xd3_set_appheader (recode_stream,
stream->dec_appheader, stream->dec_appheadsz);
}
}
// Output loop
for (;;)
{
switch((ret = xd3_encode_input (recode_stream)))
{
case XD3_INPUT: {
/* finished recoding one window */
stream->total_out = recode_stream->total_out;
return 0;
}
case XD3_OUTPUT: {
/* main_file_write below */
break;
}
case XD3_GOTHEADER:
case XD3_WINSTART:
case XD3_WINFINISH: {
/* ignore */
continue;
}
case XD3_GETSRCBLK:
case 0: {
return XD3_INTERNAL;
}
default:
return ret;
}
if ((ret = main_write_output (recode_stream, ofile)))
{
return ret;
}
xd3_consume_output (recode_stream);
}
}
#endif /* VCDIFF_TOOLS */
/*******************************************************************
VCDIFF merging
******************************************************************/
#if VCDIFF_TOOLS
/* Modifies static state. */
static int
main_init_recode_stream (void)
{
int ret;
int stream_flags = XD3_ADLER32_NOVER | XD3_SKIP_EMIT;
int recode_flags;
xd3_config recode_config;
XD3_ASSERT (recode_stream == NULL);
if ((recode_stream = (xd3_stream*) main_malloc(sizeof(xd3_stream))) == NULL)
{
return ENOMEM;
}
recode_flags = (stream_flags & XD3_SEC_TYPE);
recode_config.alloc = main_alloc;
recode_config.freef = main_free1;
xd3_init_config(&recode_config, recode_flags);
if ((ret = main_set_secondary_flags (&recode_config)) ||
(ret = xd3_config_stream (recode_stream, &recode_config)) ||
(ret = xd3_encode_init_partial (recode_stream)) ||
(ret = xd3_whole_state_init (recode_stream)))
{
XPR(NT XD3_LIB_ERRMSG (recode_stream, ret));
xd3_free_stream (recode_stream);
recode_stream = NULL;
return ret;
}
return 0;
}
/* This processes the sequence of -m arguments. The final input
* is processed as part of the ordinary main_input() loop. */
static int
main_merge_arguments (main_merge_list* merges)
{
int ret = 0;
int count = 0;
main_merge *merge = NULL;
xd3_stream merge_input;
if (main_merge_list_empty (merges))
{
return 0;
}
if ((ret = xd3_config_stream (& merge_input, NULL)) ||
(ret = xd3_whole_state_init (& merge_input)))
{
XPR(NT XD3_LIB_ERRMSG (& merge_input, ret));
return ret;
}
merge = main_merge_list_front (merges);
while (!main_merge_list_end (merges, merge))
{
main_file mfile;
main_file_init (& mfile);
mfile.filename = merge->filename;
mfile.flags = RD_NONEXTERNAL;
if ((ret = main_file_open (& mfile, merge->filename, XO_READ)))
{
goto error;
}
ret = main_input (CMD_MERGE_ARG, & mfile, NULL, NULL);
if (ret == 0)
{
if (count++ == 0)
{
/* The first merge source is the next merge input. */
xd3_swap_whole_state (& recode_stream->whole_target,
& merge_input.whole_target);
}
else
{
/* Merge the recode_stream with merge_input. */
ret = xd3_merge_input_output (recode_stream,
& merge_input.whole_target);
/* Save the next merge source in merge_input. */
xd3_swap_whole_state (& recode_stream->whole_target,
& merge_input.whole_target);
}
}
main_file_cleanup (& mfile);
if (recode_stream != NULL)
{
xd3_free_stream (recode_stream);
main_free (recode_stream);
recode_stream = NULL;
}
if (main_bdata != NULL)
{
main_buffree (main_bdata);
main_bdata = NULL;
main_bsize = 0;
}
if (ret != 0)
{
goto error;
}
merge = main_merge_list_next (merge);
}
XD3_ASSERT (merge_stream == NULL);
if ((merge_stream = (xd3_stream*) main_malloc (sizeof(xd3_stream))) == NULL)
{
ret = ENOMEM;
goto error;
}
if ((ret = xd3_config_stream (merge_stream, NULL)) ||
(ret = xd3_whole_state_init (merge_stream)))
{
XPR(NT XD3_LIB_ERRMSG (& merge_input, ret));
goto error;
}
xd3_swap_whole_state (& merge_stream->whole_target,
& merge_input.whole_target);
ret = 0;
error:
xd3_free_stream (& merge_input);
return ret;
}
/* This processes each window of the final merge input. This routine
* does not output, it buffers the entire delta into memory. */
static int
main_merge_func (xd3_stream* stream, main_file *no_write)
{
int ret;
if ((ret = xd3_whole_append_window (stream)))
{
return ret;
}
return 0;
}
/* This is called after all windows have been read, as a final step in
* main_input(). This is only called for the final merge step. */
static int
main_merge_output (xd3_stream *stream, main_file *ofile)
{
int ret;
usize_t inst_pos = 0;
xoff_t output_pos = 0;
xd3_source recode_source;
usize_t window_num = 0;
int at_least_once = 0;
/* merge_stream is set if there were arguments. this stream's input
* needs to be applied to the merge_stream source. */
if ((merge_stream != NULL) &&
(ret = xd3_merge_input_output (stream,
& merge_stream->whole_target)))
{
XPR(NT XD3_LIB_ERRMSG (stream, ret));
return ret;
}
if (option_use_appheader != 0 &&
option_appheader != NULL)
{
xd3_set_appheader (recode_stream, option_appheader,
(usize_t) strlen ((char*) option_appheader));
}
/* Enter the ENC_INPUT state and bypass the next_in == NULL test
* and (leftover) input buffering logic. */
XD3_ASSERT(recode_stream->enc_state == ENC_INIT);
recode_stream->enc_state = ENC_INPUT;
recode_stream->next_in = main_bdata;
recode_stream->flags |= XD3_FLUSH;
/* This encodes the entire target. */
while (inst_pos < stream->whole_target.instlen || !at_least_once)
{
xoff_t window_start = output_pos;
int window_srcset = 0;
xoff_t window_srcmin = 0;
xoff_t window_srcmax = 0;
usize_t window_pos = 0;
usize_t window_size;
/* at_least_once ensures that we encode at least one window,
* which handles the 0-byte case. */
at_least_once = 1;
XD3_ASSERT (recode_stream->enc_state == ENC_INPUT);
if ((ret = xd3_encode_input (recode_stream)) != XD3_WINSTART)
{
XPR(NT "invalid merge state: %s\n", xd3_mainerror (ret));
return XD3_INVALID;
}
/* Window sizes must match from the input to the output, so that
* target copies are in-range (and so that checksums carry
* over). */
XD3_ASSERT (window_num < stream->whole_target.wininfolen);
window_size = stream->whole_target.wininfo[window_num].length;
/* Output position should also match. */
if (output_pos != stream->whole_target.wininfo[window_num].offset)
{
XPR(NT "internal merge error: offset mismatch\n");
return XD3_INVALID;
}
if (option_use_checksum &&
(stream->dec_win_ind & VCD_ADLER32) != 0)
{
recode_stream->flags |= XD3_ADLER32_RECODE;
recode_stream->recode_adler32 =
stream->whole_target.wininfo[window_num].adler32;
}
window_num++;
if (main_bsize < window_size)
{
main_buffree (main_bdata);
main_bdata = NULL;
main_bsize = 0;
if ((main_bdata = (uint8_t*)
main_bufalloc (window_size)) == NULL)
{
return ENOMEM;
}
main_bsize = window_size;
}
/* This encodes a single target window. */
while (window_pos < window_size &&
inst_pos < stream->whole_target.instlen)
{
xd3_winst *inst = &stream->whole_target.inst[inst_pos];
usize_t take = min(inst->size, window_size - window_pos);
xoff_t addr;
switch (inst->type)
{
case XD3_RUN:
if ((ret = xd3_emit_run (recode_stream, window_pos, take,
&stream->whole_target.adds[inst->addr])))
{
return ret;
}
break;
case XD3_ADD:
/* Adds are implicit, put them into the input buffer. */
memcpy (main_bdata + window_pos,
stream->whole_target.adds + inst->addr, take);
break;
default: /* XD3_COPY + copy mode */
if (inst->mode != 0)
{
if (window_srcset) {
window_srcmin = min(window_srcmin, inst->addr);
window_srcmax = max(window_srcmax, inst->addr + take);
} else {
window_srcset = 1;
window_srcmin = inst->addr;
window_srcmax = inst->addr + take;
}
addr = inst->addr;
}
else
{
XD3_ASSERT (inst->addr >= window_start);
addr = inst->addr - window_start;
}
IF_DEBUG2 (XPR(NTR "[merge copy] winpos %u take %u addr %"Q"u mode %u\n",
window_pos, take, addr, inst->mode));
if ((ret = xd3_found_match (recode_stream, window_pos, take,
addr, inst->mode != 0)))
{
return ret;
}
break;
}
window_pos += take;
output_pos += take;
if (take == inst->size)
{
inst_pos += 1;
}
else
{
/* Modify the instruction for the next pass. */
if (inst->type != XD3_RUN)
{
inst->addr += take;
}
inst->size -= take;
}
}
xd3_avail_input (recode_stream, main_bdata, window_pos);
recode_stream->enc_state = ENC_INSTR;
if (window_srcset) {
recode_stream->srcwin_decided = 1;
recode_stream->src = &recode_source;
recode_source.srclen = (usize_t)(window_srcmax - window_srcmin);
recode_source.srcbase = window_srcmin;
recode_stream->taroff = recode_source.srclen;
XD3_ASSERT (recode_source.srclen != 0);
} else {
recode_stream->srcwin_decided = 0;
recode_stream->src = NULL;
recode_stream->taroff = 0;
}
for (;;)
{
switch ((ret = xd3_encode_input (recode_stream)))
{
case XD3_INPUT: {
goto done_window;
}
case XD3_OUTPUT: {
/* main_file_write below */
break;
}
case XD3_GOTHEADER:
case XD3_WINSTART:
case XD3_WINFINISH: {
/* ignore */
continue;
}
case XD3_GETSRCBLK:
case 0: {
return XD3_INTERNAL;
}
default:
return ret;
}
if ((ret = main_write_output(recode_stream, ofile)))
{
return ret;
}
xd3_consume_output (recode_stream);
}
done_window:
(void) 0;
}
return 0;
}
#endif
/*******************************************************************
Input decompression, output recompression
******************************************************************/
#if EXTERNAL_COMPRESSION
/* This is tricky POSIX-specific code with lots of fork(), pipe(),
* dup(), waitpid(), and exec() business. Most of this code
* originated in PRCS1, which did automatic package-file
* decompression. It works with both XD3_POSIX and XD3_STDIO file
* disciplines.
*
* To automatically detect compressed inputs requires a child process
* to reconstruct the input stream, which was advanced in order to
* detect compression, because it may not be seekable. In other
* words, the main program reads part of the input stream, and if it
* detects a compressed input it then forks a pipe copier process,
* which copies the first-read block out of the main-program's memory,
* then streams the remaining compressed input into the
* input-decompression pipe.
*/
#include <signal.h>
#include <unistd.h>
#include <sys/stat.h>
#include <sys/wait.h>
/* Remember which pipe FD is which. */
#define PIPE_READ_FD 0
#define PIPE_WRITE_FD 1
#define MAX_SUBPROCS 4 /* max(source + copier + output,
source + copier + input + copier). */
static pid_t ext_subprocs[MAX_SUBPROCS];
/* Like write(), applies to a fd instead of a main_file, for the pipe
* copier subprocess. Does not print an error, to facilitate ignoring
* trailing garbage, see main_pipe_copier(). */
static int
main_pipe_write (int outfd, uint8_t *exist_buf, usize_t remain)
{
int ret;
if ((ret = xd3_posix_io (outfd, exist_buf, remain,
(xd3_posix_func*) &write, NULL)))
{
return ret;
}
return 0;
}
/* A simple error-reporting waitpid interface. */
static int
main_waitpid_check(pid_t pid)
{
int status;
int ret = 0;
if (waitpid (pid, & status, 0) < 0)
{
ret = get_errno ();
XPR(NT "external compression [pid %d] wait: %s\n",
pid, xd3_mainerror (ret));
}
else if (! WIFEXITED (status))
{
// SIGPIPE will be delivered to the child process whenever it
// writes data after this process closes the pipe,
// happens if xdelta does not require access to the entire
// source file. Considered normal.
if (! WIFSIGNALED (status) || WTERMSIG (status) != SIGPIPE)
{
ret = ECHILD;
XPR(NT "external compression [pid %d] signal %d\n", pid,
WIFSIGNALED (status) ? WTERMSIG (status) : WSTOPSIG (status));
}
else if (option_verbose)
{
XPR(NT "external compression sigpipe\n");
}
}
else if (WEXITSTATUS (status) != 0)
{
ret = ECHILD;
if (option_verbose > 1)
{
/* Presumably, the error was printed by the subprocess. */
XPR(NT "external compression [pid %d] exit %d\n",
pid, WEXITSTATUS (status));
}
}
return ret;
}
/* Wait for any existing child processes to check for abnormal exit. */
static int
main_external_compression_finish (void)
{
int i;
int ret;
for (i = 0; i < num_subprocs; i += 1)
{
if (! ext_subprocs[i]) { continue; }
if ((ret = main_waitpid_check (ext_subprocs[i])))
{
return ret;
}
ext_subprocs[i] = 0;
}
return 0;
}
/* Kills any outstanding compression process. */
static void
main_external_compression_cleanup (void)
{
int i;
for (i = 0; i < num_subprocs; i += 1)
{
if (! ext_subprocs[i]) { continue; }
kill (ext_subprocs[i], SIGTERM);
ext_subprocs[i] = 0;
}
}
/* This runs as a forked process of main_input_decompress_setup() to
* copy input to the decompression process. First, the available
* input is copied out of the existing buffer, then the buffer is
* reused to continue reading from the compressed input file. */
static int
main_pipe_copier (uint8_t *pipe_buf,
usize_t pipe_bufsize,
size_t nread,
main_file *ifile,
int outfd)
{
int ret;
xoff_t skipped = 0;
/* Prevent SIGPIPE signals, allow EPIPE return values instead. This
* is safe to comment-out, except that the -F flag will not work
* properly (the parent would need to treat WTERMSIG(status) ==
* SIGPIPE). */
struct sigaction sa;
sa.sa_handler = SIG_IGN;
sigaction (SIGPIPE, &sa, NULL);
for (;;)
{
/* force_drain will be set when option_force and EPIPE cause us
* to skip data. This is reset each time through the loop, so
* the break condition below works. */
int force_drain = 0;
if (nread > 0 && (ret = main_pipe_write (outfd, pipe_buf, nread)))
{
if (ret == EPIPE)
{
/* This causes the loop to continue reading until nread
* == 0. */
skipped += nread;
force_drain = 1;
}
else
{
XPR(NT "pipe write failed: %s\n", xd3_mainerror (ret));
return ret;
}
}
if (nread < pipe_bufsize && !force_drain)
{
break;
}
if ((ret = main_file_read (ifile, pipe_buf, pipe_bufsize,
& nread, "pipe read failed")) < 0)
{
return ret;
}
}
if (option_verbose && skipped != 0)
{
XPR(NT "skipping %"Q"u bytes in %s\n",
skipped, ifile->filename);
}
return 0;
}
/* This function is called after we have read some amount of data from
* the input file and detected a compressed input. Here we start a
* decompression subprocess by forking twice. The first process runs
* the decompression command, the second process copies data to the
* input of the first. */
static int
main_input_decompress_setup (const main_extcomp *decomp,
main_file *ifile,
uint8_t *input_buf,
usize_t input_bufsize,
uint8_t *pipe_buf,
usize_t pipe_bufsize,
usize_t pipe_avail,
size_t *nread)
{
/* The two pipes: input and output file descriptors. */
int outpipefd[2], inpipefd[2];
int input_fd = -1; /* The resulting input_fd (output of decompression). */
pid_t decomp_id, copier_id; /* The two subprocs. */
int ret;
outpipefd[0] = outpipefd[1] = -1;
inpipefd[0] = inpipefd[1] = -1;
if (pipe (outpipefd) || pipe (inpipefd))
{
XPR(NT "pipe failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
if ((decomp_id = fork ()) < 0)
{
XPR(NT "fork failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
/* The first child runs the decompression process: */
if (decomp_id == 0)
{
if (option_verbose > 2)
{
XPR(NT "external decompression pid %d\n", getpid ());
}
/* Setup pipes: write to the outpipe, read from the inpipe. */
if (dup2 (outpipefd[PIPE_WRITE_FD], STDOUT_FILENO) < 0 ||
dup2 (inpipefd[PIPE_READ_FD], STDIN_FILENO) < 0 ||
close (outpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_WRITE_FD]) ||
close (inpipefd[PIPE_READ_FD]) ||
close (inpipefd[PIPE_WRITE_FD]) ||
execlp (decomp->decomp_cmdname, decomp->decomp_cmdname,
decomp->decomp_options,
option_force2 ? "-f" : NULL,
NULL))
{
XPR(NT "child process %s failed to execute: %s\n",
decomp->decomp_cmdname, xd3_mainerror (get_errno ()));
}
_exit (127);
}
XD3_ASSERT(num_subprocs < MAX_SUBPROCS);
ext_subprocs[num_subprocs++] = decomp_id;
if ((copier_id = fork ()) < 0)
{
XPR(NT "fork failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
/* The second child runs the copier process: */
if (copier_id == 0)
{
int exitval = 0;
if (option_verbose > 2)
{
XPR(NT "child pipe-copier pid %d\n", getpid ());
}
if (close (inpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_WRITE_FD]) ||
main_pipe_copier (pipe_buf, pipe_bufsize, pipe_avail,
ifile, inpipefd[PIPE_WRITE_FD]) ||
close (inpipefd[PIPE_WRITE_FD]))
{
XPR(NT "child copier process failed: %s\n",
xd3_mainerror (get_errno ()));
exitval = 1;
}
_exit (exitval);
}
XD3_ASSERT(num_subprocs < MAX_SUBPROCS);
ext_subprocs[num_subprocs++] = copier_id;
/* The parent closes both pipes after duplicating the output of
* compression. */
input_fd = dup (outpipefd[PIPE_READ_FD]);
if (input_fd < 0 ||
main_file_close (ifile) ||
close (outpipefd[PIPE_READ_FD]) ||
close (outpipefd[PIPE_WRITE_FD]) ||
close (inpipefd[PIPE_READ_FD]) ||
close (inpipefd[PIPE_WRITE_FD]))
{
XPR(NT "dup/close failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#if XD3_STDIO
/* Note: fdopen() acquires the fd, closes it when finished. */
if ((ifile->file = fdopen (input_fd, "r")) == NULL)
{
XPR(NT "fdopen failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#elif XD3_POSIX
ifile->file = input_fd;
#endif
ifile->compressor = decomp;
/* Now the input file is decompressed. */
return main_file_read (ifile, input_buf, input_bufsize,
nread, "input decompression failed");
pipe_cleanup:
close (input_fd);
close (outpipefd[PIPE_READ_FD]);
close (outpipefd[PIPE_WRITE_FD]);
close (inpipefd[PIPE_READ_FD]);
close (inpipefd[PIPE_WRITE_FD]);
return ret;
}
/* This routine is called when the first buffer of input data is read
* by the main program (unless input decompression is disabled by
* command-line option). If it recognizes the magic number of a known
* input type it invokes decompression.
*
* Skips decompression if the decompression type or the file type is
* RD_NONEXTERNAL.
*
* Behaves exactly like main_file_read, otherwise.
*
* This function uses a separate buffer to read the first small block
* of input. If a compressed input is detected, the separate buffer
* is passed to the pipe copier. This avoids using the same size
* buffer in both cases. */
static int
main_secondary_decompress_check (main_file *file,
uint8_t *input_buf,
size_t input_size,
size_t *nread)
{
int ret;
usize_t i;
usize_t try_read = min (input_size, XD3_ALLOCSIZE);
size_t check_nread = 0;
uint8_t check_buf[XD3_ALLOCSIZE]; /* TODO: stack limit */
const main_extcomp *decompressor = NULL;
if ((ret = main_file_read (file, check_buf,
try_read,
& check_nread, "input read failed")))
{
return ret;
}
if (file->flags & RD_DECOMPSET)
{
/* This allows the application header to override the magic
* number, for whatever reason. */
decompressor = file->compressor;
}
else
{
for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1)
{
const main_extcomp *decomp = & extcomp_types[i];
if (check_nread > decomp->magic_size)
{
/* The following expr checks if we are trying to read a
* VCDIFF input, in which case do not treat it as
* "secondary" decompression. */
int skip_this_type = (decomp->flags & RD_NONEXTERNAL) &&
(file->flags & RD_NONEXTERNAL);
if (skip_this_type)
{
continue;
}
if (memcmp (check_buf, decomp->magic, decomp->magic_size) == 0)
{
decompressor = decomp;
break;
}
}
}
}
if (decompressor != NULL)
{
if (! option_quiet)
{
XPR(NT "externally compressed input: %s %s%s < %s\n",
decompressor->decomp_cmdname,
decompressor->decomp_options,
(option_force2 ? " -f" : ""),
file->filename);
if (file->flags & RD_MAININPUT)
{
XPR(NT
"WARNING: the encoder is automatically decompressing the input file;\n");
XPR(NT
"WARNING: the decoder will automatically recompress the output file;\n");
XPR(NT
"WARNING: this may result in different compressed data and checksums\n");
XPR(NT
"WARNING: despite being identical data; if this is an issue, use -D\n");
XPR(NT
"WARNING: to avoid decompression and/or use -R to avoid recompression\n");
XPR(NT
"WARNING: and/or manually decompress the input file; if you know the\n");
XPR(NT
"WARNING: compression settings that will produce identical output\n");
XPR(NT
"WARNING: you may set those flags using the environment (e.g., GZIP=-9)\n");
}
}
file->size_known = 0;
return main_input_decompress_setup (decompressor, file,
input_buf, input_size,
check_buf, XD3_ALLOCSIZE,
check_nread, nread);
}
/* Now read the rest of the input block. */
(*nread) = 0;
if (check_nread == try_read)
{
ret = main_file_read (file,
input_buf + try_read,
input_size - try_read,
nread,
"input read failed");
}
memcpy (input_buf, check_buf, check_nread);
(*nread) += check_nread;
return 0;
}
/* Initiate re-compression of the output stream. This is easier than
* input decompression because we know beforehand that the stream will
* be compressed, whereas the input has already been read when we
* decide it should be decompressed. Thus, it only requires one
* subprocess and one pipe. */
static int
main_recompress_output (main_file *ofile)
{
pid_t recomp_id; /* One subproc. */
int pipefd[2]; /* One pipe. */
int output_fd = -1;
int ret;
const main_extcomp *recomp = ofile->compressor;
pipefd[0] = pipefd[1] = -1;
if (pipe (pipefd))
{
XPR(NT "pipe failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
if ((recomp_id = fork ()) < 0)
{
XPR(NT "fork failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
/* The child runs the recompression process: */
if (recomp_id == 0)
{
if (option_verbose > 2)
{
XPR(NT "external recompression pid %d\n", getpid ());
}
/* Setup pipes: write to the output file, read from the pipe. */
if (dup2 (XFNO (ofile), STDOUT_FILENO) < 0 ||
dup2 (pipefd[PIPE_READ_FD], STDIN_FILENO) < 0 ||
close (pipefd[PIPE_READ_FD]) ||
close (pipefd[PIPE_WRITE_FD]) ||
execlp (recomp->recomp_cmdname, recomp->recomp_cmdname,
recomp->recomp_options,
option_force2 ? "-f" : NULL,
NULL))
{
XPR(NT "child process %s failed to execute: %s\n",
recomp->recomp_cmdname, xd3_mainerror (get_errno ()));
}
_exit (127);
}
XD3_ASSERT(num_subprocs < MAX_SUBPROCS);
ext_subprocs[num_subprocs++] = recomp_id;
/* The parent closes both pipes after duplicating the output-fd for
* writing to the compression pipe. */
output_fd = dup (pipefd[PIPE_WRITE_FD]);
if (output_fd < 0 ||
main_file_close (ofile) ||
close (pipefd[PIPE_READ_FD]) ||
close (pipefd[PIPE_WRITE_FD]))
{
XPR(NT "close failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#if XD3_STDIO
/* Note: fdopen() acquires the fd, closes it when finished. */
if ((ofile->file = fdopen (output_fd, "w")) == NULL)
{
XPR(NT "fdopen failed: %s\n", xd3_mainerror (ret = get_errno ()));
goto pipe_cleanup;
}
#elif XD3_POSIX
ofile->file = output_fd;
#endif
/* Now the output file will be compressed. */
return 0;
pipe_cleanup:
close (output_fd);
close (pipefd[PIPE_READ_FD]);
close (pipefd[PIPE_WRITE_FD]);
return ret;
}
#endif /* EXTERNAL_COMPRESSION */
/* Identify the compressor that was used based on its ident string,
* which is passed in the application header. */
static const main_extcomp*
main_ident_compressor (const char *ident)
{
usize_t i;
for (i = 0; i < SIZEOF_ARRAY (extcomp_types); i += 1)
{
if (strcmp (extcomp_types[i].ident, ident) == 0)
{
return & extcomp_types[i];
}
}
return NULL;
}
/* Return the main_extcomp record to use for this identifier, if possible. */
static const main_extcomp*
main_get_compressor (const char *ident)
{
const main_extcomp *ext = main_ident_compressor (ident);
if (ext == NULL)
{
if (! option_quiet)
{
XPR(NT "warning: cannot recompress output: "
"unrecognized external compression ID: %s\n", ident);
}
return NULL;
}
else if (! EXTERNAL_COMPRESSION)
{
if (! option_quiet)
{
XPR(NT "warning: external support not compiled: "
"original input was compressed: %s\n", ext->recomp_cmdname);
}
return NULL;
}
else
{
return ext;
}
}
/*********************************************************************
APPLICATION HEADER
*******************************************************************/
#if XD3_ENCODER
static const char*
main_apphead_string (const char* x)
{
const char *y;
if (x == NULL) { return ""; }
if (strcmp (x, "/dev/stdin") == 0 ||
strcmp (x, "/dev/stdout") == 0 ||
strcmp (x, "/dev/stderr") == 0) { return "-"; }
// TODO: this is not portable
return (y = strrchr (x, '/')) == NULL ? x : y + 1;
}
static int
main_set_appheader (xd3_stream *stream, main_file *input, main_file *sfile)
{
/* The user may disable the application header. Once the appheader
* is set, this disables setting it again. */
if (appheader_used || ! option_use_appheader) { return 0; }
/* The user may specify the application header, otherwise format the
default header. */
if (option_appheader)
{
appheader_used = option_appheader;
}
else
{
const char *iname;
const char *icomp;
const char *sname;
const char *scomp;
usize_t len;
iname = main_apphead_string (input->filename);
icomp = (input->compressor == NULL) ? "" : input->compressor->ident;
len = (usize_t) strlen (iname) + (usize_t) strlen (icomp) + 2;
if (sfile->filename != NULL)
{
sname = main_apphead_string (sfile->filename);
scomp = (sfile->compressor == NULL) ? "" : sfile->compressor->ident;
len += (usize_t) strlen (sname) + (usize_t) strlen (scomp) + 2;
}
else
{
sname = scomp = "";
}
if ((appheader_used = (uint8_t*) main_malloc (len)) == NULL)
{
return ENOMEM;
}
if (sfile->filename == NULL)
{
snprintf_func ((char*)appheader_used, len, "%s/%s", iname, icomp);
}
else
{
snprintf_func ((char*)appheader_used, len, "%s/%s/%s/%s",
iname, icomp, sname, scomp);
}
}
xd3_set_appheader (stream, appheader_used,
(usize_t) strlen ((char*)appheader_used));
return 0;
}
#endif
static void
main_get_appheader_params (main_file *file, char **parsed,
int output, const char *type,
main_file *other)
{
/* Set the filename if it was not specified. If output, option_stdout (-c)
* overrides. */
if (file->filename == NULL &&
! (output && option_stdout) &&
strcmp (parsed[0], "-") != 0)
{
file->filename = parsed[0];
if (other->filename != NULL) {
/* Take directory from the other file, if it has one. */
/* TODO: This results in nonsense names like /dev/foo.tar.gz
* and probably the filename-default logic interferes with
* multi-file operation and the standard file extension?
* Possibly the name header is bad, should be off by default.
* Possibly we just want to remember external/compression
* settings. */
const char *last_slash = strrchr(other->filename, '/');
if (last_slash != NULL) {
usize_t dlen = (usize_t) (last_slash - other->filename);
XD3_ASSERT(file->filename_copy == NULL);
file->filename_copy =
(char*) main_malloc(dlen + 2 + (usize_t) strlen(file->filename));
strncpy(file->filename_copy, other->filename, dlen);
file->filename_copy[dlen] = '/';
strcpy(file->filename_copy + dlen + 1, parsed[0]);
file->filename = file->filename_copy;
}
}
if (! option_quiet)
{
XPR(NT "using default %s filename: %s\n", type, file->filename);
}
}
/* Set the compressor, initiate de/recompression later. */
if (file->compressor == NULL && *parsed[1] != 0)
{
file->flags |= RD_DECOMPSET;
file->compressor = main_get_compressor (parsed[1]);
}
}
static void
main_get_appheader (xd3_stream *stream, main_file *ifile,
main_file *output, main_file *sfile)
{
uint8_t *apphead;
usize_t appheadsz;
int ret;
/* The user may disable the application header. Once the appheader
* is set, this disables setting it again. */
if (! option_use_appheader) { return; }
ret = xd3_get_appheader (stream, & apphead, & appheadsz);
/* Ignore failure, it only means we haven't received a header yet. */
if (ret != 0) { return; }
if (appheadsz > 0)
{
const int kMaxArgs = 4;
char *start = (char*)apphead;
char *slash;
int place = 0;
char *parsed[kMaxArgs];
memset (parsed, 0, sizeof (parsed));
while ((slash = strchr (start, '/')) != NULL && place < (kMaxArgs-1))
{
*slash = 0;
parsed[place++] = start;
start = slash + 1;
}
parsed[place++] = start;
/* First take the output parameters. */
if (place == 2 || place == 4)
{
main_get_appheader_params (output, parsed, 1, "output", ifile);
}
/* Then take the source parameters. */
if (place == 4)
{
main_get_appheader_params (sfile, parsed+2, 0, "source", ifile);
}
}
option_use_appheader = 0;
return;
}
/*********************************************************************
Main I/O routines
**********************************************************************/
/* This function acts like the above except it may also try to
* recognize a compressed input (source or target) when the first
* buffer of data is read. The EXTERNAL_COMPRESSION code is called to
* search for magic numbers. */
static int
main_read_primary_input (main_file *file,
uint8_t *buf,
size_t size,
size_t *nread)
{
#if EXTERNAL_COMPRESSION
if (option_decompress_inputs && file->flags & RD_FIRST)
{
file->flags &= ~RD_FIRST;
return main_secondary_decompress_check (file, buf, size, nread);
}
#endif
return main_file_read (file, buf, size, nread, "input read failed");
}
/* Open the main output file, sets a default file name, initiate
* recompression. This function is expected to fprint any error
* messages. */
static int
main_open_output (xd3_stream *stream, main_file *ofile)
{
int ret;
if (option_no_output)
{
return 0;
}
if (ofile->filename == NULL)
{
XSTDOUT_XF (ofile);
if (option_verbose > 1)
{
XPR(NT "using standard output: %s\n", ofile->filename);
}
}
else
{
/* Stat the file to check for overwrite. */
if (option_force == 0 && main_file_exists (ofile))
{
if (!option_quiet)
{
XPR(NT "to overwrite output file specify -f: %s\n",
ofile->filename);
}
return EEXIST;
}
if ((ret = main_file_open (ofile, ofile->filename, XO_WRITE)))
{
return ret;
}
if (option_verbose > 1) { XPR(NT "output %s\n", ofile->filename); }
}
#if EXTERNAL_COMPRESSION
/* Do output recompression. */
if (ofile->compressor != NULL && option_recompress_outputs == 1)
{
if (! option_quiet)
{
XPR(NT "externally compressed output: %s %s%s > %s\n",
ofile->compressor->recomp_cmdname,
ofile->compressor->recomp_options,
(option_force2 ? " -f" : ""),
ofile->filename);
}
if ((ret = main_recompress_output (ofile)))
{
return ret;
}
}
#endif
return 0;
}
static usize_t
main_get_winsize (main_file *ifile) {
xoff_t file_size = 0;
usize_t size = option_winsize;
static shortbuf iszbuf;
if (main_file_stat (ifile, &file_size) == 0)
{
size = (usize_t) min(file_size, (xoff_t) size);
}
size = max(size, XD3_ALLOCSIZE);
if (option_verbose > 1)
{
XPR(NT "input %s window size %s\n",
ifile->filename,
main_format_bcnt (size, &iszbuf));
}
return size;
}
/*********************************************************************
Main routines
********************************************************************/
/* This is a generic input function. It calls the xd3_encode_input or
* xd3_decode_input functions and makes calls to the various input
* handling routines above, which coordinate external decompression.
*/
static int
main_input (xd3_cmd cmd,
main_file *ifile,
main_file *ofile,
main_file *sfile)
{
int ret;
xd3_stream stream;
size_t nread = 0;
usize_t winsize;
int stream_flags = 0;
xd3_config config;
xd3_source source;
xoff_t last_total_in = 0;
xoff_t last_total_out = 0;
long start_time;
int stdout_only = 0;
int (*input_func) (xd3_stream*);
int (*output_func) (xd3_stream*, main_file *);
memset (& stream, 0, sizeof (stream));
memset (& source, 0, sizeof (source));
memset (& config, 0, sizeof (config));
config.alloc = main_alloc;
config.freef = main_free1;
config.iopt_size = option_iopt_size;
config.sprevsz = option_sprevsz;
do_src_fifo = 0;
start_time = get_millisecs_now ();
if (option_use_checksum) { stream_flags |= XD3_ADLER32; }
/* main_input setup. */
switch ((int) cmd)
{
#if VCDIFF_TOOLS
if (1) { case CMD_PRINTHDR: stream_flags |= XD3_JUST_HDR; }
else if (1) { case CMD_PRINTHDRS: stream_flags |= XD3_SKIP_WINDOW; }
else { case CMD_PRINTDELTA: stream_flags |= XD3_SKIP_EMIT; }
ifile->flags |= RD_NONEXTERNAL;
input_func = xd3_decode_input;
output_func = main_print_func;
stream_flags |= XD3_ADLER32_NOVER;
stdout_only = 1;
break;
case CMD_RECODE:
case CMD_MERGE:
case CMD_MERGE_ARG:
/* No source will be read */
stream_flags |= XD3_ADLER32_NOVER | XD3_SKIP_EMIT;
ifile->flags |= RD_NONEXTERNAL;
input_func = xd3_decode_input;
if ((ret = main_init_recode_stream ()))
{
return EXIT_FAILURE;
}
if (cmd == CMD_RECODE) { output_func = main_recode_func; }
else { output_func = main_merge_func; }
break;
#endif /* VCDIFF_TOOLS */
#if XD3_ENCODER
case CMD_ENCODE:
do_src_fifo = 1;
input_func = xd3_encode_input;
output_func = main_write_output;
if (option_no_compress) { stream_flags |= XD3_NOCOMPRESS; }
if (option_use_altcodetable) { stream_flags |= XD3_ALT_CODE_TABLE; }
if (option_smatch_config)
{
const char *s = option_smatch_config;
char *e;
int values[XD3_SOFTCFG_VARCNT];
int got;
config.smatch_cfg = XD3_SMATCH_SOFT;
for (got = 0; got < XD3_SOFTCFG_VARCNT; got += 1, s = e + 1)
{
values[got] = strtol (s, &e, 10);
if ((values[got] < 0) ||
(e == s) ||
(got < XD3_SOFTCFG_VARCNT-1 && *e == 0) ||
(got == XD3_SOFTCFG_VARCNT-1 && *e != 0))
{
XPR(NT "invalid string match specifier (-C) %d: %s\n",
got, s);
return EXIT_FAILURE;
}
}
config.smatcher_soft.large_look = values[0];
config.smatcher_soft.large_step = values[1];
config.smatcher_soft.small_look = values[2];
config.smatcher_soft.small_chain = values[3];
config.smatcher_soft.small_lchain = values[4];
config.smatcher_soft.max_lazy = values[5];
config.smatcher_soft.long_enough = values[6];
}
else
{
if (option_verbose > 2)
{
XPR(NT "compression level: %d\n", option_level);
}
if (option_level == 0)
{
stream_flags |= XD3_NOCOMPRESS;
config.smatch_cfg = XD3_SMATCH_FASTEST;
}
else if (option_level == 1)
{ config.smatch_cfg = XD3_SMATCH_FASTEST; }
else if (option_level == 2)
{ config.smatch_cfg = XD3_SMATCH_FASTER; }
else if (option_level <= 5)
{ config.smatch_cfg = XD3_SMATCH_FAST; }
else if (option_level == 6)
{ config.smatch_cfg = XD3_SMATCH_DEFAULT; }
else
{ config.smatch_cfg = XD3_SMATCH_SLOW; }
}
break;
#endif
case CMD_DECODE:
if (option_use_checksum == 0) { stream_flags |= XD3_ADLER32_NOVER; }
ifile->flags |= RD_NONEXTERNAL;
input_func = xd3_decode_input;
output_func = main_write_output;
break;
default:
XPR(NT "internal error\n");
return EXIT_FAILURE;
}
main_bsize = winsize = main_get_winsize (ifile);
if ((main_bdata = (uint8_t*) main_bufalloc (winsize)) == NULL)
{
return EXIT_FAILURE;
}
config.winsize = winsize;
config.getblk = main_getblk_func;
config.flags = stream_flags;
if ((ret = main_set_secondary_flags (&config)) ||
(ret = xd3_config_stream (& stream, & config)))
{
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
return EXIT_FAILURE;
}
#if VCDIFF_TOOLS
if ((cmd == CMD_MERGE || cmd == CMD_MERGE_ARG) &&
(ret = xd3_whole_state_init (& stream)))
{
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
return EXIT_FAILURE;
}
#endif
if (cmd != CMD_DECODE)
{
/* When not decoding, set source now. The decoder delays this
* step until XD3_GOTHEADER. */
if (sfile && sfile->filename != NULL)
{
if ((ret = main_set_source (& stream, cmd, sfile, & source)))
{
return EXIT_FAILURE;
}
XD3_ASSERT(stream.src != NULL);
}
}
if (cmd == CMD_PRINTHDR ||
cmd == CMD_PRINTHDRS ||
cmd == CMD_PRINTDELTA ||
cmd == CMD_RECODE)
{
if (sfile->filename == NULL)
{
allow_fake_source = 1;
sfile->filename = "<placeholder>";
main_set_source (& stream, cmd, sfile, & source);
}
}
/* This times each window. */
get_millisecs_since ();
/* Main input loop. */
do
{
xoff_t input_offset;
xoff_t input_remain;
usize_t try_read;
input_offset = ifile->nread;
input_remain = XOFF_T_MAX - input_offset;
try_read = (usize_t) min ((xoff_t) config.winsize, input_remain);
if ((ret = main_read_primary_input (ifile, main_bdata,
try_read, & nread)))
{
return EXIT_FAILURE;
}
/* If we've reached EOF tell the stream to flush. */
if (nread < try_read)
{
stream.flags |= XD3_FLUSH;
}
#if XD3_ENCODER
/* After the first main_read_primary_input completes, we know
* all the information needed to encode the application
* header. */
if (cmd == CMD_ENCODE &&
(ret = main_set_appheader (& stream, ifile, sfile)))
{
return EXIT_FAILURE;
}
#endif
xd3_avail_input (& stream, main_bdata, nread);
/* If we read zero bytes after encoding at least one window... */
if (nread == 0 && stream.current_window > 0) {
break;
}
again:
ret = input_func (& stream);
switch (ret)
{
case XD3_INPUT:
continue;
case XD3_GOTHEADER:
{
XD3_ASSERT (stream.current_window == 0);
/* Need to process the appheader as soon as possible. It may
* contain a suggested default filename/decompression routine for
* the ofile, and it may contain default/decompression routine for
* the sources. */
if (cmd == CMD_DECODE)
{
/* May need to set the sfile->filename if none was given. */
main_get_appheader (& stream, ifile, ofile, sfile);
/* Now open the source file. */
if ((sfile->filename != NULL) &&
(ret = main_set_source (& stream, cmd, sfile, & source)))
{
return EXIT_FAILURE;
}
}
}
/* FALLTHROUGH */
case XD3_WINSTART:
{
/* e.g., set or unset XD3_SKIP_WINDOW. */
goto again;
}
case XD3_OUTPUT:
{
/* Defer opening the output file until the stream produces its
* first output for both encoder and decoder, this way we
* delay long enough for the decoder to receive the
* application header. (Or longer if there are skipped
* windows, but I can't think of any reason not to delay
* open.) */
if (ofile != NULL &&
! main_file_isopen (ofile) &&
(ret = main_open_output (& stream, ofile)) != 0)
{
return EXIT_FAILURE;
}
if ((ret = output_func (& stream, ofile)) &&
(ret != PRINTHDR_SPECIAL))
{
return EXIT_FAILURE;
}
if (ret == PRINTHDR_SPECIAL)
{
xd3_abort_stream (& stream);
ret = EXIT_SUCCESS;
goto done;
}
ret = 0;
xd3_consume_output (& stream);
goto again;
}
case XD3_WINFINISH:
{
if (IS_ENCODE (cmd) || cmd == CMD_DECODE || cmd == CMD_RECODE)
{
if (! option_quiet && IS_ENCODE (cmd) &&
main_file_isopen (sfile))
{
/* Warn when no source copies are found */
if (option_verbose && ! xd3_encoder_used_source (& stream))
{
XPR(NT "warning: input window %"Q"u..%"Q"u has "
"no source copies\n",
stream.current_window * winsize,
(stream.current_window+1) * winsize);
XD3_ASSERT (stream.src != NULL);
}
/* Limited i-buffer size affects source copies
* when the sourcewin is decided early. */
if (option_verbose > 1 &&
stream.srcwin_decided_early &&
stream.i_slots_used > stream.iopt_size)
{
XPR(NT "warning: input position %"Q"u overflowed "
"instruction buffer, needed %u (vs. %u), "
"consider changing -I\n",
stream.current_window * winsize,
stream.i_slots_used, stream.iopt_size);
}
}
if (option_verbose)
{
shortbuf rrateavg, wrateavg, tm;
shortbuf rdb, wdb;
shortbuf trdb, twdb;
shortbuf srcpos;
long millis = get_millisecs_since ();
usize_t this_read = (usize_t)(stream.total_in -
last_total_in);
usize_t this_write = (usize_t)(stream.total_out -
last_total_out);
last_total_in = stream.total_in;
last_total_out = stream.total_out;
if (option_verbose > 1)
{
XPR(NT "%"Q"u: in %s (%s): out %s (%s): "
"total in %s: out %s: %s: srcpos %s\n",
stream.current_window,
main_format_bcnt (this_read, &rdb),
main_format_rate (this_read, millis, &rrateavg),
main_format_bcnt (this_write, &wdb),
main_format_rate (this_write, millis, &wrateavg),
main_format_bcnt (stream.total_in, &trdb),
main_format_bcnt (stream.total_out, &twdb),
main_format_millis (millis, &tm),
main_format_bcnt (sfile->source_position, &srcpos));
}
else
{
XPR(NT "%"Q"u: in %s: out %s: total in %s: "
"out %s: %s\n",
stream.current_window,
main_format_bcnt (this_read, &rdb),
main_format_bcnt (this_write, &wdb),
main_format_bcnt (stream.total_in, &trdb),
main_format_bcnt (stream.total_out, &twdb),
main_format_millis (millis, &tm));
}
}
}
goto again;
}
default:
/* input_func() error */
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
if (! option_quiet && ret == XD3_INVALID_INPUT)
{
XPR(NT "normally this indicates that the source file is incorrect\n");
XPR(NT "please verify the source file with sha1sum or equivalent\n");
}
return EXIT_FAILURE;
}
}
while (nread == config.winsize);
done:
/* Close the inputs. (ifile must be open, sfile may be open) */
main_file_close (ifile);
if (sfile != NULL)
{
main_file_close (sfile);
}
#if VCDIFF_TOOLS
if (cmd == CMD_MERGE &&
(ret = main_merge_output (& stream, ofile)))
{
return EXIT_FAILURE;
}
if (cmd == CMD_MERGE_ARG)
{
xd3_swap_whole_state (& stream.whole_target,
& recode_stream->whole_target);
}
#endif /* VCDIFF_TOOLS */
/* If output file is not open yet because of delayed-open, it means
* we never encountered a window in the delta, but it could have had
* a VCDIFF header? TODO: solve this elsewhere. For now, it prints
* "nothing to output" below, but the check doesn't happen in case
* of option_no_output. */
if (! option_no_output && ofile != NULL)
{
if (!stdout_only && ! main_file_isopen (ofile))
{
XPR(NT "nothing to output: %s\n", ifile->filename);
return EXIT_FAILURE;
}
/* Have to close the output before calling
* main_external_compression_finish, or else it hangs. */
if (main_file_close (ofile) != 0)
{
return EXIT_FAILURE;
}
}
#if EXTERNAL_COMPRESSION
if ((ret = main_external_compression_finish ()))
{
XPR(NT "external compression commands failed\n");
return EXIT_FAILURE;
}
#endif
if ((ret = xd3_close_stream (& stream)))
{
XPR(NT XD3_LIB_ERRMSG (& stream, ret));
return EXIT_FAILURE;
}
#if XD3_ENCODER
if (option_verbose > 1 && cmd == CMD_ENCODE)
{
XPR(NT "scanner configuration: %s\n", stream.smatcher.name);
XPR(NT "target hash table size: %u\n", stream.small_hash.size);
if (sfile != NULL && sfile->filename != NULL)
{
XPR(NT "source hash table size: %u\n", stream.large_hash.size);
}
}
if (option_verbose > 2 && cmd == CMD_ENCODE)
{
XPR(NT "source copies: %"Q"u (%"Q"u bytes)\n",
stream.n_scpy, stream.l_scpy);
XPR(NT "target copies: %"Q"u (%"Q"u bytes)\n",
stream.n_tcpy, stream.l_tcpy);
XPR(NT "adds: %"Q"u (%"Q"u bytes)\n", stream.n_add, stream.l_add);
XPR(NT "runs: %"Q"u (%"Q"u bytes)\n", stream.n_run, stream.l_run);
}
#endif
xd3_free_stream (& stream);
if (option_verbose)
{
shortbuf tm;
long end_time = get_millisecs_now ();
xoff_t nwrite = ofile != NULL ? ofile->nwrite : 0;
XPR(NT "finished in %s; input %"Q"u output %"Q"u bytes (%0.2f%%)\n",
main_format_millis (end_time - start_time, &tm),
ifile->nread, nwrite, 100.0 * nwrite / ifile->nread);
}
return EXIT_SUCCESS;
}
/* free memory before exit, reset single-use variables. */
static void
main_cleanup (void)
{
if (appheader_used != NULL &&
appheader_used != option_appheader)
{
main_free (appheader_used);
appheader_used = NULL;
}
main_buffree (main_bdata);
main_bdata = NULL;
main_bsize = 0;
main_lru_cleanup();
if (recode_stream != NULL)
{
xd3_free_stream (recode_stream);
main_free (recode_stream);
recode_stream = NULL;
}
if (merge_stream != NULL)
{
xd3_free_stream (merge_stream);
main_free (merge_stream);
merge_stream = NULL;
}
XD3_ASSERT (main_mallocs == 0);
}
static void
setup_environment (int argc,
char **argv,
int *argc_out,
char ***argv_out,
char ***argv_free,
char **env_free)
{
int n, i, i0;
char *p, *v = getenv("XDELTA");
if (v == NULL) {
(*argc_out) = argc;
(*argv_out) = argv;
(*argv_free) = NULL;
(*env_free) = NULL;
return;
}
(*env_free) = (char*) main_malloc((usize_t) strlen(v) + 1);
strcpy(*env_free, v);
/* Space needed for extra args, at least # of spaces */
n = argc + 1;
for (p = *env_free; *p != 0; ) {
if (*p++ == ' ') {
n++;
}
}
(*argv_free) = (char**) main_malloc(sizeof(char*) * (n + 1));
(*argv_out) = (*argv_free);
(*argv_out)[0] = argv[0];
(*argv_out)[n] = NULL;
i = 1;
for (p = *env_free; *p != 0; ) {
(*argv_out)[i++] = p;
while (*p != ' ' && *p != 0) {
p++;
}
while (*p == ' ') {
*p++ = 0;
}
}
for (i0 = 1; i0 < argc; i0++) {
(*argv_out)[i++] = argv[i0];
}
/* Counting spaces is an upper bound, argv stays NULL terminated. */
(*argc_out) = i;
while (i <= n) {
(*argv_out)[i++] = NULL;
}
}
#if PYTHON_MODULE || SWIG_MODULE || NOT_MAIN
int xd3_main_cmdline (int argc, char **argv)
#else
int main (int argc, char **argv)
#endif
{
static const char *flags =
"0123456789cdefhnqvDFJNORTVs:m:B:C:E:I:L:O:M:P:W:A::S::";
xd3_cmd cmd;
main_file ifile;
main_file ofile;
main_file sfile;
main_merge_list merge_order;
main_merge *merge;
int my_optind;
const char *my_optarg;
const char *my_optstr;
const char *sfilename;
int env_argc;
char **env_argv;
char **free_argv; /* malloc() in setup_environment() */
char *free_value; /* malloc() in setup_environment() */
int ret;
#ifdef _WIN32
GetStartupInfo(&winStartupInfo);
setvbuf(stderr, NULL, _IONBF, 0); /* Do not buffer stderr */
#endif
main_file_init (& ifile);
main_file_init (& ofile);
main_file_init (& sfile);
main_merge_list_init (& merge_order);
reset_defaults();
free_argv = NULL;
free_value = NULL;
setup_environment(argc, argv, &env_argc, &env_argv,
&free_argv, &free_value);
cmd = CMD_NONE;
sfilename = NULL;
my_optind = 1;
argv = env_argv;
argc = env_argc;
program_name = env_argv[0];
takearg:
my_optarg = NULL;
my_optstr = argv[my_optind];
/* This doesn't use getopt() because it makes trouble for -P & python which
* reenter main() and thus care about freeing all memory. I never had much
* trust for getopt anyway, it's too opaque. This implements a fairly
* standard non-long-option getopt with support for named operations (e.g.,
* "xdelta3 [encode|decode|printhdr...] < in > out"). */
if (my_optstr)
{
if (*my_optstr == '-') { my_optstr += 1; }
else if (cmd == CMD_NONE) { goto nonflag; }
else { my_optstr = NULL; }
}
while (my_optstr)
{
const char *s;
my_optarg = NULL;
if ((ret = *my_optstr++) == 0) { my_optind += 1; goto takearg; }
/* Option handling: first check for one ':' following the option in
* flags, then check for two. The syntax allows:
*
* 1. -Afoo defines optarg="foo"
* 2. -A foo defines optarg="foo"
* 3. -A "" defines optarg="" (allows empty-string)
* 4. -A [EOA or -moreargs] error (mandatory case)
* 5. -A [EOA -moreargs] defines optarg=NULL (optional case)
* 6. -A=foo defines optarg="foo"
* 7. -A= defines optarg="" (mandatory case)
* 8. -A= defines optarg=NULL (optional case)
*
* See tests in test_command_line_arguments().
*/
s = strchr (flags, ret);
if (s && s[1] && s[1] == ':')
{
int option = s[2] && s[2] == ':';
/* Case 1, set optarg to the remaining characters. */
my_optarg = my_optstr;
my_optstr = "";
/* Case 2-5 */
if (*my_optarg == 0)
{
/* Condition 4-5 */
int have_arg = (my_optind < (argc - 1) &&
*argv[my_optind+1] != '-');
if (! have_arg)
{
if (! option)
{
/* Case 4 */
XPR(NT "-%c: requires an argument\n", ret);
ret = EXIT_FAILURE;
goto cleanup;
}
/* Case 5. */
my_optarg = NULL;
}
else
{
/* Case 2-3. */
my_optarg = argv[++my_optind];
}
}
/* Case 6-8. */
else if (*my_optarg == '=')
{
/* Remove the = in all cases. */
my_optarg += 1;
if (option && *my_optarg == 0)
{
/* Case 8. */
my_optarg = NULL;
}
}
}
switch (ret)
{
/* case: if no '-' was found, maybe check for a command name. */
nonflag:
if (strcmp (my_optstr, "decode") == 0) { cmd = CMD_DECODE; }
else if (strcmp (my_optstr, "encode") == 0)
{
#if XD3_ENCODER
cmd = CMD_ENCODE;
#else
XPR(NT "encoder support not compiled\n");
return EXIT_FAILURE;
#endif
}
else if (strcmp (my_optstr, "config") == 0) { cmd = CMD_CONFIG; }
#if REGRESSION_TEST
else if (strcmp (my_optstr, "test") == 0) { cmd = CMD_TEST; }
#endif
#if VCDIFF_TOOLS
else if (strcmp (my_optstr, "printhdr") == 0) { cmd = CMD_PRINTHDR; }
else if (strcmp (my_optstr, "printhdrs") == 0)
{ cmd = CMD_PRINTHDRS; }
else if (strcmp (my_optstr, "printdelta") == 0)
{ cmd = CMD_PRINTDELTA; }
else if (strcmp (my_optstr, "recode") == 0) { cmd = CMD_RECODE; }
else if (strcmp (my_optstr, "merge") == 0) { cmd = CMD_MERGE; }
#endif
/* If no option was found and still no command, let the default
* command be encode. The remaining args are treated as
* filenames. */
if (cmd == CMD_NONE)
{
cmd = CMD_DEFAULT;
my_optstr = NULL;
break;
}
else
{
/* But if we find a command name, continue the getopt loop. */
my_optind += 1;
goto takearg;
}
/* gzip-like options */
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
option_level = ret - '0';
break;
case 'f': option_force = 1; break;
case 'F':
#if EXTERNAL_COMPRESSION
option_force2 = 1;
#else
XPR(NT "warning: -F option ignored, "
"external compression support was not compiled\n");
break;
#endif
case 'v': option_verbose += 1; option_quiet = 0; break;
case 'q': option_quiet = 1; option_verbose = 0; break;
case 'c': option_stdout = 1; break;
case 'd':
if (cmd == CMD_NONE) { cmd = CMD_DECODE; }
else { ret = main_help (); goto exit; }
break;
case 'e':
#if XD3_ENCODER
if (cmd == CMD_NONE) { cmd = CMD_ENCODE; }
else { ret = main_help (); goto exit; }
break;
#else
XPR(NT "encoder support not compiled\n");
return EXIT_FAILURE;
#endif
case 'n': option_use_checksum = 0; break;
case 'N': option_no_compress = 1; break;
case 'T': option_use_altcodetable = 1; break;
case 'C': option_smatch_config = my_optarg; break;
case 'J': option_no_output = 1; break;
case 'S': if (my_optarg == NULL)
{
option_use_secondary = 1;
option_secondary = "none";
}
else
{
option_use_secondary = 1;
option_secondary = my_optarg;
}
break;
case 'A': if (my_optarg == NULL) { option_use_appheader = 0; }
else { option_appheader = (uint8_t*) my_optarg; } break;
case 'B': {
xoff_t bsize;
if ((ret = main_atoux (my_optarg, & bsize,
XD3_MINSRCWINSZ, XD3_MAXSRCWINSZ, 'B')))
{
goto exit;
}
option_srcwinsz = bsize;
break;
}
case 'I':
if ((ret = main_atou (my_optarg, & option_iopt_size, 0,
0, 'I')))
{
goto exit;
}
break;
case 'P':
if ((ret = main_atou (my_optarg, & option_sprevsz, 0,
0, 'P')))
{
goto exit;
}
break;
case 'W':
if ((ret = main_atou (my_optarg, & option_winsize, XD3_ALLOCSIZE,
XD3_HARDMAXWINSIZE, 'W')))
{
goto exit;
}
break;
case 'D':
#if EXTERNAL_COMPRESSION == 0
if (option_verbose > 0)
{
XPR(NT "warning: -D option ignored, "
"external compression support was not compiled\n");
}
#else
option_decompress_inputs = 0;
#endif
break;
case 'R':
#if EXTERNAL_COMPRESSION == 0
if (option_verbose > 0)
{
XPR(NT "warning: -R option ignored, "
"external compression support was not compiled\n");
}
#else
option_recompress_outputs = 0;
#endif
break;
case 's':
if (sfilename != NULL)
{
XPR(NT "specify only one source file\n");
goto cleanup;
}
sfilename = my_optarg;
break;
case 'm':
if ((merge = (main_merge*)
main_malloc (sizeof (main_merge))) == NULL)
{
goto cleanup;
}
main_merge_list_push_back (& merge_order, merge);
merge->filename = my_optarg;
break;
case 'V':
ret = main_version (); goto exit;
default:
ret = main_help (); goto exit;
}
}
option_source_filename = sfilename;
/* In case there were no arguments, set the default command. */
if (cmd == CMD_NONE) { cmd = CMD_DEFAULT; }
argc -= my_optind;
argv += my_optind;
/* There may be up to two more arguments. */
if (argc > 2)
{
XPR(NT "too many filenames: %s ...\n", argv[2]);
goto cleanup;
}
ifile.flags = RD_FIRST | RD_MAININPUT;
sfile.flags = RD_FIRST;
sfile.filename = option_source_filename;
/* The infile takes the next argument, if there is one. But if not, infile
* is set to stdin. */
if (argc > 0)
{
ifile.filename = argv[0];
if ((ret = main_file_open (& ifile, ifile.filename, XO_READ)))
{
goto cleanup;
}
}
else
{
XSTDIN_XF (& ifile);
}
/* The ofile takes the following argument, if there is one. But if not, it
* is left NULL until the application header is processed. It will be set
* in main_open_output. */
if (argc > 1)
{
/* Check for conflicting arguments. */
if (option_stdout && ! option_quiet)
{
XPR(NT "warning: -c option overrides output filename: %s\n",
argv[1]);
}
if (! option_stdout) { ofile.filename = argv[1]; }
}
#if VCDIFF_TOOLS
if (cmd == CMD_MERGE &&
(ret = main_merge_arguments (&merge_order)))
{
goto cleanup;
}
#endif /* VCDIFF_TOOLS */
switch (cmd)
{
case CMD_PRINTHDR:
case CMD_PRINTHDRS:
case CMD_PRINTDELTA:
#if XD3_ENCODER
case CMD_ENCODE:
case CMD_RECODE:
case CMD_MERGE:
#endif
case CMD_DECODE:
ret = main_input (cmd, & ifile, & ofile, & sfile);
break;
#if REGRESSION_TEST
case CMD_TEST:
main_config ();
ret = xd3_selftest ();
break;
#endif
case CMD_CONFIG:
ret = main_config ();
break;
default:
ret = main_help ();
break;
}
if (0)
{
cleanup:
ret = EXIT_FAILURE;
exit:
(void)0;
}
#if EXTERNAL_COMPRESSION
main_external_compression_cleanup ();
#endif
main_file_cleanup (& ifile);
main_file_cleanup (& ofile);
main_file_cleanup (& sfile);
while (! main_merge_list_empty (& merge_order))
{
merge = main_merge_list_pop_front (& merge_order);
main_free (merge);
}
main_free (free_argv);
main_free (free_value);
main_cleanup ();
fflush (stdout);
fflush (stderr);
return ret;
}
static int
main_help (void)
{
main_version();
/* Note: update wiki when command-line features change */
XPR(NTR "usage: xdelta3 [command/options] [input [output]]\n");
XPR(NTR "make patch:\n");
XPR(NTR "\n");
XPR(NTR " xdelta3.exe -e -s old_file new_file delta_file\n");
XPR(NTR "\n");
XPR(NTR "apply patch:\n");
XPR(NTR "\n");
XPR(NTR " xdelta3.exe -d -s old_file delta_file decoded_new_file\n");
XPR(NTR "\n");
XPR(NTR "special command names:\n");
XPR(NTR " config prints xdelta3 configuration\n");
XPR(NTR " decode decompress the input\n");
XPR(NTR " encode compress the input%s\n",
XD3_ENCODER ? "" : " [Not compiled]");
#if REGRESSION_TEST
XPR(NTR " test run the builtin tests\n");
#endif
#if VCDIFF_TOOLS
XPR(NTR "special commands for VCDIFF inputs:\n");
XPR(NTR " printdelta print information about the entire delta\n");
XPR(NTR " printhdr print information about the first window\n");
XPR(NTR " printhdrs print information about all windows\n");
XPR(NTR " recode encode with new application/secondary settings\n");
XPR(NTR " merge merge VCDIFF inputs (see below)\n");
#endif
XPR(NTR "merge patches:\n");
XPR(NTR "\n");
XPR(NTR " xdelta3 merge -m 1.vcdiff -m 2.vcdiff 3.vcdiff merged.vcdiff\n");
XPR(NTR "\n");
XPR(NTR "standard options:\n");
XPR(NTR " -0 .. -9 compression level\n");
XPR(NTR " -c use stdout\n");
XPR(NTR " -d decompress\n");
XPR(NTR " -e compress%s\n",
XD3_ENCODER ? "" : " [Not compiled]");
XPR(NTR " -f force (overwrite, ignore trailing garbage)\n");
#if EXTERNAL_COMPRESSION
XPR(NTR " -F force the external-compression subprocess\n");
#endif
XPR(NTR " -h show help\n");
XPR(NTR " -q be quiet\n");
XPR(NTR " -v be verbose (max 2)\n");
XPR(NTR " -V show version\n");
XPR(NTR "memory options:\n");
XPR(NTR " -B bytes source window size\n");
XPR(NTR " -W bytes input window size\n");
XPR(NTR " -P size compression duplicates window\n");
XPR(NTR " -I size instruction buffer size (0 = unlimited)\n");
XPR(NTR "compression options:\n");
XPR(NTR " -s source source file to copy from (if any)\n");
XPR(NTR " -S [djw|fgk] enable/disable secondary compression\n");
XPR(NTR " -N disable small string-matching compression\n");
XPR(NTR " -D disable external decompression (encode/decode)\n");
XPR(NTR " -R disable external recompression (decode)\n");
XPR(NTR " -n disable checksum (encode/decode)\n");
XPR(NTR " -C soft config (encode, undocumented)\n");
XPR(NTR " -A [apphead] disable/provide application header (encode)\n");
XPR(NTR " -J disable output (check/compute only)\n");
XPR(NTR " -T use alternate code table (test)\n");
XPR(NTR " -m arguments for \"merge\"\n");
XPR(NTR "the XDELTA environment variable may contain extra args:\n");
XPR(NTR " XDELTA=\"-s source-x.y.tar.gz\" \\\n");
XPR(NTR " tar --use-compress-program=xdelta3 \\\n");
XPR(NTR " -cf target-x.z.tar.gz.vcdiff target-x.y\n");
return EXIT_FAILURE;
}
|
2419_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#include <asm/cache.h>
#include "entry.h"
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/* read ar.itc in advance, and use it before leaving bank 0 */
#define ACCOUNT_GET_STAMP \
(pUStk) mov.m r20=ar.itc;
#define ACCOUNT_SYS_ENTER \
(pUStk) br.call.spnt rp=account_sys_enter \
;;
#else
#define ACCOUNT_GET_STAMP
#define ACCOUNT_SYS_ENTER
#endif
/*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
* on.
*
* Assumed state upon entry:
* psr.ic: off
* r31: contains saved predicates (pr)
*
* Upon exit, the state is as follows:
* psr.ic: off
* r2 = points to &pt_regs.r16
* r8 = contents of ar.ccv
* r9 = contents of ar.csd
* r10 = contents of ar.ssd
* r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
mov r26=ar.pfs; /* I */ \
mov r28=cr.iip; /* M */ \
mov r21=ar.fpsr; /* M */ \
COVER; /* B;; (or nothing) */ \
;; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
;; \
ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
/* switch from user to kernel RBS: */ \
;; \
invala; /* M */ \
SAVE_IFS; \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
;; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=r29; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
st8 [r16]=r28,16; /* save cr.iip */ \
st8 [r17]=r30,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r16]=r25,16; /* save ar.unat */ \
st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=r27,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
st8 [r17]=r31,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
st8 [r16]=r29,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r13,16; \
.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r15,16; \
.mem.offset 8,0; st8.spill [r17]=r14,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
ACCOUNT_GET_STAMP \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
EXTRA; \
movl r1=__gp; /* establish kernel global pointer */ \
;; \
ACCOUNT_SYS_ENTER \
bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
;;
/*
* SAVE_REST saves the remainder of pt_regs (with psr.ic on).
*
* Assumed state upon entry:
* psr.ic: on
* r2: points to &pt_regs.r16
* r3: points to &pt_regs.r17
* r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
*
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
.mem.offset 8,0; st8.spill [r3]=r17,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
.mem.offset 8,0; st8.spill [r3]=r19,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
.mem.offset 8,0; st8.spill [r3]=r21,16; \
mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
.mem.offset 8,0; st8.spill [r3]=r23,16; \
mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
.mem.offset 8,0; st8.spill [r3]=r25,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
.mem.offset 8,0; st8.spill [r3]=r27,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
.mem.offset 8,0; st8.spill [r3]=r29,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
.mem.offset 8,0; st8.spill [r3]=r31,32; \
;; \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar.ccv */ \
adds r24=PT(B6)-PT(F7),r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
;; \
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32; \
;; \
stf.spill [r2]=f10; \
stf.spill [r3]=f11; \
adds r25=PT(B7)-PT(F11),r3; \
;; \
st8 [r24]=r18,16; /* b6 */ \
st8 [r25]=r19,16; /* b7 */ \
;; \
st8 [r24]=r9; /* ar.csd */ \
st8 [r25]=r10; /* ar.ssd */ \
;;
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs,)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19)
#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, )
|
#include <asm/cache.h>
#include "entry.h"
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/* read ar.itc in advance, and use it before leaving bank 0 */
#define ACCOUNT_GET_STAMP \
(pUStk) mov.m r20=ar.itc;
#define ACCOUNT_SYS_ENTER \
(pUStk) br.call.spnt rp=account_sys_enter \
;;
#else
#define ACCOUNT_GET_STAMP
#define ACCOUNT_SYS_ENTER
#endif
.section ".data.patch.rse", "a"
.previous
/*
* DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
* the minimum state necessary that allows us to turn psr.ic back
* on.
*
* Assumed state upon entry:
* psr.ic: off
* r31: contains saved predicates (pr)
*
* Upon exit, the state is as follows:
* psr.ic: off
* r2 = points to &pt_regs.r16
* r8 = contents of ar.ccv
* r9 = contents of ar.csd
* r10 = contents of ar.ssd
* r11 = FPSR_DEFAULT
* r12 = kernel sp (kernel virtual address)
* r13 = points to current task_struct (kernel virtual address)
* p15 = TRUE if psr.i is set in cr.ipsr
* predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
* preserved
*
* Note that psr.ic is NOT turned on by this macro. This is so that
* we can pass interruption state as arguments to a handler.
*/
#define DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA,WORKAROUND) \
mov r16=IA64_KR(CURRENT); /* M */ \
mov r27=ar.rsc; /* M */ \
mov r20=r1; /* A */ \
mov r25=ar.unat; /* M */ \
mov r29=cr.ipsr; /* M */ \
mov r26=ar.pfs; /* I */ \
mov r28=cr.iip; /* M */ \
mov r21=ar.fpsr; /* M */ \
COVER; /* B;; (or nothing) */ \
;; \
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16; \
;; \
ld1 r17=[r16]; /* load current->thread.on_ustack flag */ \
st1 [r16]=r0; /* clear current->thread.on_ustack flag */ \
adds r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 \
/* switch from user to kernel RBS: */ \
;; \
invala; /* M */ \
SAVE_IFS; \
cmp.eq pKStk,pUStk=r0,r17; /* are we in kernel mode already? */ \
;; \
(pUStk) mov ar.rsc=0; /* set enforced lazy mode, pl 0, little-endian, loadrs=0 */ \
;; \
(pUStk) mov.m r24=ar.rnat; \
(pUStk) addl r22=IA64_RBS_OFFSET,r1; /* compute base of RBS */ \
(pKStk) mov r1=sp; /* get sp */ \
;; \
(pUStk) lfetch.fault.excl.nt1 [r22]; \
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1; /* compute base of memory stack */ \
(pUStk) mov r23=ar.bspstore; /* save ar.bspstore */ \
;; \
(pUStk) mov ar.bspstore=r22; /* switch to kernel RBS */ \
(pKStk) addl r1=-IA64_PT_REGS_SIZE,r1; /* if in kernel mode, use sp (r12) */ \
;; \
(pUStk) mov r18=ar.bsp; \
(pUStk) mov ar.rsc=0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */ \
adds r17=2*L1_CACHE_BYTES,r1; /* really: biggest cache-line size */ \
adds r16=PT(CR_IPSR),r1; \
;; \
lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
st8 [r16]=r29; /* save cr.ipsr */ \
;; \
lfetch.fault.excl.nt1 [r17]; \
tbit.nz p15,p0=r29,IA64_PSR_I_BIT; \
mov r29=b0 \
;; \
WORKAROUND; \
adds r16=PT(R8),r1; /* initialize first base pointer */ \
adds r17=PT(R9),r1; /* initialize second base pointer */ \
(pKStk) mov r18=r0; /* make sure r18 isn't NaT */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r8,16; \
.mem.offset 8,0; st8.spill [r17]=r9,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r10,24; \
.mem.offset 8,0; st8.spill [r17]=r11,24; \
;; \
st8 [r16]=r28,16; /* save cr.iip */ \
st8 [r17]=r30,16; /* save cr.ifs */ \
(pUStk) sub r18=r18,r22; /* r18=RSE.ndirty*8 */ \
mov r8=ar.ccv; \
mov r9=ar.csd; \
mov r10=ar.ssd; \
movl r11=FPSR_DEFAULT; /* L-unit */ \
;; \
st8 [r16]=r25,16; /* save ar.unat */ \
st8 [r17]=r26,16; /* save ar.pfs */ \
shl r18=r18,16; /* compute ar.rsc to be used for "loadrs" */ \
;; \
st8 [r16]=r27,16; /* save ar.rsc */ \
(pUStk) st8 [r17]=r24,16; /* save ar.rnat */ \
(pKStk) adds r17=16,r17; /* skip over ar_rnat field */ \
;; /* avoid RAW on r16 & r17 */ \
(pUStk) st8 [r16]=r23,16; /* save ar.bspstore */ \
st8 [r17]=r31,16; /* save predicates */ \
(pKStk) adds r16=16,r16; /* skip over ar_bspstore field */ \
;; \
st8 [r16]=r29,16; /* save b0 */ \
st8 [r17]=r18,16; /* save ar.rsc value for "loadrs" */ \
cmp.eq pNonSys,pSys=r0,r0 /* initialize pSys=0, pNonSys=1 */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r20,16; /* save original r1 */ \
.mem.offset 8,0; st8.spill [r17]=r12,16; \
adds r12=-16,r1; /* switch to kernel memory stack (with 16 bytes of scratch) */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r13,16; \
.mem.offset 8,0; st8.spill [r17]=r21,16; /* save ar.fpsr */ \
mov r13=IA64_KR(CURRENT); /* establish `current' */ \
;; \
.mem.offset 0,0; st8.spill [r16]=r15,16; \
.mem.offset 8,0; st8.spill [r17]=r14,16; \
;; \
.mem.offset 0,0; st8.spill [r16]=r2,16; \
.mem.offset 8,0; st8.spill [r17]=r3,16; \
ACCOUNT_GET_STAMP \
adds r2=IA64_PT_REGS_R16_OFFSET,r1; \
;; \
EXTRA; \
movl r1=__gp; /* establish kernel global pointer */ \
;; \
ACCOUNT_SYS_ENTER \
bsw.1; /* switch back to bank 1 (must be last in insn group) */ \
;;
/*
* SAVE_REST saves the remainder of pt_regs (with psr.ic on).
*
* Assumed state upon entry:
* psr.ic: on
* r2: points to &pt_regs.r16
* r3: points to &pt_regs.r17
* r8: contents of ar.ccv
* r9: contents of ar.csd
* r10: contents of ar.ssd
* r11: FPSR_DEFAULT
*
* Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
*/
#define SAVE_REST \
.mem.offset 0,0; st8.spill [r2]=r16,16; \
.mem.offset 8,0; st8.spill [r3]=r17,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r18,16; \
.mem.offset 8,0; st8.spill [r3]=r19,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r20,16; \
.mem.offset 8,0; st8.spill [r3]=r21,16; \
mov r18=b6; \
;; \
.mem.offset 0,0; st8.spill [r2]=r22,16; \
.mem.offset 8,0; st8.spill [r3]=r23,16; \
mov r19=b7; \
;; \
.mem.offset 0,0; st8.spill [r2]=r24,16; \
.mem.offset 8,0; st8.spill [r3]=r25,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r26,16; \
.mem.offset 8,0; st8.spill [r3]=r27,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r28,16; \
.mem.offset 8,0; st8.spill [r3]=r29,16; \
;; \
.mem.offset 0,0; st8.spill [r2]=r30,16; \
.mem.offset 8,0; st8.spill [r3]=r31,32; \
;; \
mov ar.fpsr=r11; /* M-unit */ \
st8 [r2]=r8,8; /* ar.ccv */ \
adds r24=PT(B6)-PT(F7),r3; \
;; \
stf.spill [r2]=f6,32; \
stf.spill [r3]=f7,32; \
;; \
stf.spill [r2]=f8,32; \
stf.spill [r3]=f9,32; \
;; \
stf.spill [r2]=f10; \
stf.spill [r3]=f11; \
adds r25=PT(B7)-PT(F11),r3; \
;; \
st8 [r24]=r18,16; /* b6 */ \
st8 [r25]=r19,16; /* b7 */ \
;; \
st8 [r24]=r9; /* ar.csd */ \
st8 [r25]=r10; /* ar.ssd */ \
;;
#define RSE_WORKAROUND \
(pUStk) extr.u r17=r18,3,6; \
(pUStk) sub r16=r18,r22; \
[1:](pKStk) br.cond.sptk.many 1f; \
.xdata4 ".data.patch.rse",1b-. \
;; \
cmp.ge p6,p7 = 33,r17; \
;; \
(p6) mov r17=0x310; \
(p7) mov r17=0x308; \
;; \
cmp.leu p1,p0=r16,r17; \
(p1) br.cond.sptk.many 1f; \
dep.z r17=r26,0,62; \
movl r16=2f; \
;; \
mov ar.pfs=r17; \
dep r27=r0,r27,16,14; \
mov b0=r16; \
;; \
br.ret.sptk b0; \
;; \
2: \
mov ar.rsc=r0 \
;; \
flushrs; \
;; \
mov ar.bspstore=r22 \
;; \
mov r18=ar.bsp; \
;; \
1: \
.pred.rel "mutex", pKStk, pUStk
#define SAVE_MIN_WITH_COVER DO_SAVE_MIN(cover, mov r30=cr.ifs, , RSE_WORKAROUND)
#define SAVE_MIN_WITH_COVER_R19 DO_SAVE_MIN(cover, mov r30=cr.ifs, mov r15=r19, RSE_WORKAROUND)
#define SAVE_MIN DO_SAVE_MIN( , mov r30=r0, , )
|
2483_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _ASM_IA64_PATCH_H
#define _ASM_IA64_PATCH_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* There are a number of reasons for patching instructions. Rather than duplicating code
* all over the place, we put the common stuff here. Reasons for patching: in-kernel
* module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
* shared library. Undoubtedly, some of these reasons will disappear and others will
* be added over time.
*/
#include <linux/elf.h>
#include <linux/types.h>
extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
extern void ia64_patch_vtop (unsigned long start, unsigned long end);
extern void ia64_patch_phys_stack_reg(unsigned long val);
extern void ia64_patch_gate (void);
#endif /* _ASM_IA64_PATCH_H */
|
#ifndef _ASM_IA64_PATCH_H
#define _ASM_IA64_PATCH_H
/*
* Copyright (C) 2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*
* There are a number of reasons for patching instructions. Rather than duplicating code
* all over the place, we put the common stuff here. Reasons for patching: in-kernel
* module-loader, virtual-to-physical patch-list, McKinley Errata 9 workaround, and gate
* shared library. Undoubtedly, some of these reasons will disappear and others will
* be added over time.
*/
#include <linux/elf.h>
#include <linux/types.h>
extern void ia64_patch (u64 insn_addr, u64 mask, u64 val); /* patch any insn slot */
extern void ia64_patch_imm64 (u64 insn_addr, u64 val); /* patch "movl" w/abs. value*/
extern void ia64_patch_imm60 (u64 insn_addr, u64 val); /* patch "brl" w/ip-rel value */
extern void ia64_patch_mckinley_e9 (unsigned long start, unsigned long end);
extern void ia64_patch_vtop (unsigned long start, unsigned long end);
extern void ia64_patch_phys_stack_reg(unsigned long val);
extern void ia64_patch_rse (unsigned long start, unsigned long end);
extern void ia64_patch_gate (void);
#endif /* _ASM_IA64_PATCH_H */
|
2483_5
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _ASM_IA64_PTRACE_H
#define _ASM_IA64_PTRACE_H
/*
* Copyright (C) 1998-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2003 Intel Co
* Suresh Siddha <suresh.b.siddha@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
* Arun Sharma <arun.sharma@intel.com>
*
* 12/07/98 S. Eranian added pt_regs & switch_stack
* 12/21/98 D. Mosberger updated to match latest code
* 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
*
*/
/*
* When a user process is blocked, its state looks as follows:
*
* +----------------------+ ------- IA64_STK_OFFSET
* | | ^
* | struct pt_regs | |
* | | |
* +----------------------+ |
* | | |
* | memory stack | |
* | (growing downwards) | |
* //.....................// |
* |
* //.....................// |
* | | |
* +----------------------+ |
* | struct switch_stack | |
* | | |
* +----------------------+ |
* | | |
* //.....................// |
* |
* //.....................// |
* | | |
* | register stack | |
* | (growing upwards) | |
* | | |
* +----------------------+ | --- IA64_RBS_OFFSET
* | struct thread_info | | ^
* +----------------------+ | |
* | | | |
* | struct task_struct | | |
* current -> | | | |
* +----------------------+ -------
*
* Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
* This is because ar.ec is saved as part of ar.pfs.
*/
#include <asm/fpu.h>
#ifdef __KERNEL__
#ifndef ASM_OFFSETS_C
#include <asm/asm-offsets.h>
#endif
/*
* Base-2 logarithm of number of pages to allocate per task structure
* (including register backing store and memory stack):
*/
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
# define KERNEL_STACK_SIZE_ORDER 3
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
# define KERNEL_STACK_SIZE_ORDER 2
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
# define KERNEL_STACK_SIZE_ORDER 1
#else
# define KERNEL_STACK_SIZE_ORDER 0
#endif
#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 15) & ~15)
#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
#define KERNEL_STACK_SIZE IA64_STK_OFFSET
#endif /* __KERNEL__ */
#ifndef __ASSEMBLY__
/*
* This struct defines the way the registers are saved on system
* calls.
*
* We don't save all floating point register because the kernel
* is compiled to use only a very small subset, so the other are
* untouched.
*
* THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
* (because the memory stack pointer MUST ALWAYS be aligned this way)
*
*/
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
unsigned long b7; /* scratch */
unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
unsigned long ar_ssd; /* reserved for future use (scratch) */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long cr_ipsr; /* interrupted task's psr */
unsigned long cr_iip; /* interrupted task's instruction pointer */
/*
* interrupted task's function state; if bit 63 is cleared, it
* contains syscall's ar.pfs.pfm:
*/
unsigned long cr_ifs;
unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
unsigned long ar_pfs; /* prev function state */
unsigned long ar_rsc; /* RSE configuration */
/* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
unsigned long ar_rnat; /* RSE NaT */
unsigned long ar_bspstore; /* RSE bspstore */
unsigned long pr; /* 64 predicate registers (1 bit each) */
unsigned long b0; /* return pointer (bp) */
unsigned long loadrs; /* size of dirty partition << 16 */
unsigned long r1; /* the gp pointer */
unsigned long r12; /* interrupted task's memory stack pointer */
unsigned long r13; /* thread pointer */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
/* The remaining registers are NOT saved for system calls. */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
unsigned long r17; /* scratch */
unsigned long r18; /* scratch */
unsigned long r19; /* scratch */
unsigned long r20; /* scratch */
unsigned long r21; /* scratch */
unsigned long r22; /* scratch */
unsigned long r23; /* scratch */
unsigned long r24; /* scratch */
unsigned long r25; /* scratch */
unsigned long r26; /* scratch */
unsigned long r27; /* scratch */
unsigned long r28; /* scratch */
unsigned long r29; /* scratch */
unsigned long r30; /* scratch */
unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
* Floating point registers that the kernel considers scratch:
*/
struct ia64_fpreg f6; /* scratch */
struct ia64_fpreg f7; /* scratch */
struct ia64_fpreg f8; /* scratch */
struct ia64_fpreg f9; /* scratch */
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
/*
* This structure contains the addition registers that need to
* preserved across a context switch. This generally consists of
* "preserved" registers.
*/
struct switch_stack {
unsigned long caller_unat; /* user NaT collection register (preserved) */
unsigned long ar_fpsr; /* floating-point status register */
struct ia64_fpreg f2; /* preserved */
struct ia64_fpreg f3; /* preserved */
struct ia64_fpreg f4; /* preserved */
struct ia64_fpreg f5; /* preserved */
struct ia64_fpreg f12; /* scratch, but untouched by kernel */
struct ia64_fpreg f13; /* scratch, but untouched by kernel */
struct ia64_fpreg f14; /* scratch, but untouched by kernel */
struct ia64_fpreg f15; /* scratch, but untouched by kernel */
struct ia64_fpreg f16; /* preserved */
struct ia64_fpreg f17; /* preserved */
struct ia64_fpreg f18; /* preserved */
struct ia64_fpreg f19; /* preserved */
struct ia64_fpreg f20; /* preserved */
struct ia64_fpreg f21; /* preserved */
struct ia64_fpreg f22; /* preserved */
struct ia64_fpreg f23; /* preserved */
struct ia64_fpreg f24; /* preserved */
struct ia64_fpreg f25; /* preserved */
struct ia64_fpreg f26; /* preserved */
struct ia64_fpreg f27; /* preserved */
struct ia64_fpreg f28; /* preserved */
struct ia64_fpreg f29; /* preserved */
struct ia64_fpreg f30; /* preserved */
struct ia64_fpreg f31; /* preserved */
unsigned long r4; /* preserved */
unsigned long r5; /* preserved */
unsigned long r6; /* preserved */
unsigned long r7; /* preserved */
unsigned long b0; /* so we can force a direct return in copy_thread */
unsigned long b1;
unsigned long b2;
unsigned long b3;
unsigned long b4;
unsigned long b5;
unsigned long ar_pfs; /* previous function state */
unsigned long ar_lc; /* loop counter (preserved) */
unsigned long ar_unat; /* NaT bits for r4-r7 */
unsigned long ar_rnat; /* RSE NaT collection register */
unsigned long ar_bspstore; /* RSE dirty base (preserved) */
unsigned long pr; /* 64 predicate registers (1 bit each) */
};
#ifdef __KERNEL__
#include <asm/current.h>
#include <asm/page.h>
/*
* We use the ia64_psr(regs)->ri to determine which of the three
* instructions in bundle (16 bytes) took the sample. Generate
* the canonical representation by adding to instruction pointer.
*/
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
#define regs_return_value(regs) ((regs)->r8)
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
#define profile_pc(regs) \
({ \
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
/* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
# define fsys_mode(task,regs) \
({ \
struct task_struct *_task = (task); \
struct pt_regs *_regs = (regs); \
!user_mode(_regs) && user_stack(_task, _regs); \
})
/*
* System call handlers that, upon successful completion, need to return a negative value
* should call force_successful_syscall_return() right before returning. On architectures
* where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
* ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
* flag will not get set. On architectures which do not support a separate error flag,
* the macro is a no-op and the spurious error condition needs to be filtered out by some
* other means (e.g., in user-level, by passing an extra argument to the syscall handler,
* or something along those lines).
*
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
*/
# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
extern void show_regs (struct pt_regs *);
extern void ia64_do_show_stack (struct unw_frame_info *, void *);
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
unsigned long *);
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
unsigned long, long *);
extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
unsigned long, long);
extern void ia64_flush_fph (struct task_struct *);
extern void ia64_sync_fph (struct task_struct *);
extern void ia64_sync_krbs(void);
extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
unsigned long, unsigned long);
/* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
/* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
extern void ia64_increment_ip (struct pt_regs *pt);
extern void ia64_decrement_ip (struct pt_regs *pt);
extern void ia64_ptrace_stop(void);
#define arch_ptrace_stop(code, info) \
ia64_ptrace_stop()
#define arch_ptrace_stop_needed(code, info) \
(!test_thread_flag(TIF_RESTORE_RSE))
extern void ptrace_attach_sync_user_rbs (struct task_struct *);
#define arch_ptrace_attach(child) \
ptrace_attach_sync_user_rbs(child)
#define arch_has_single_step() (1)
extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#define arch_has_block_step() (1)
extern void user_enable_block_step(struct task_struct *);
#endif /* !__KERNEL__ */
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
struct pt_all_user_regs {
unsigned long nat;
unsigned long cr_iip;
unsigned long cfm;
unsigned long cr_ipsr;
unsigned long pr;
unsigned long gr[32];
unsigned long br[8];
unsigned long ar[128];
struct ia64_fpreg fr[128];
};
#endif /* !__ASSEMBLY__ */
/* indices to application-registers array in pt_all_user_regs */
#define PT_AUR_RSC 16
#define PT_AUR_BSP 17
#define PT_AUR_BSPSTORE 18
#define PT_AUR_RNAT 19
#define PT_AUR_CCV 32
#define PT_AUR_UNAT 36
#define PT_AUR_FPSR 40
#define PT_AUR_PFS 64
#define PT_AUR_LC 65
#define PT_AUR_EC 66
/*
* The numbers chosen here are somewhat arbitrary but absolutely MUST
* not overlap with any of the number assigned in <linux/ptrace.h>.
*/
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
#define PTRACE_OLDSETOPTIONS 21
#endif /* _ASM_IA64_PTRACE_H */
|
#ifndef _ASM_IA64_PTRACE_H
#define _ASM_IA64_PTRACE_H
/*
* Copyright (C) 1998-2004 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
* Copyright (C) 2003 Intel Co
* Suresh Siddha <suresh.b.siddha@intel.com>
* Fenghua Yu <fenghua.yu@intel.com>
* Arun Sharma <arun.sharma@intel.com>
*
* 12/07/98 S. Eranian added pt_regs & switch_stack
* 12/21/98 D. Mosberger updated to match latest code
* 6/17/99 D. Mosberger added second unat member to "struct switch_stack"
*
*/
/*
* When a user process is blocked, its state looks as follows:
*
* +----------------------+ ------- IA64_STK_OFFSET
* | | ^
* | struct pt_regs | |
* | | |
* +----------------------+ |
* | | |
* | memory stack | |
* | (growing downwards) | |
* //.....................// |
* |
* //.....................// |
* | | |
* +----------------------+ |
* | struct switch_stack | |
* | | |
* +----------------------+ |
* | | |
* //.....................// |
* |
* //.....................// |
* | | |
* | register stack | |
* | (growing upwards) | |
* | | |
* +----------------------+ | --- IA64_RBS_OFFSET
* | struct thread_info | | ^
* +----------------------+ | |
* | | | |
* | struct task_struct | | |
* current -> | | | |
* +----------------------+ -------
*
* Note that ar.ec is not saved explicitly in pt_reg or switch_stack.
* This is because ar.ec is saved as part of ar.pfs.
*/
#include <asm/fpu.h>
#ifdef __KERNEL__
#ifndef ASM_OFFSETS_C
#include <asm/asm-offsets.h>
#endif
/*
* Base-2 logarithm of number of pages to allocate per task structure
* (including register backing store and memory stack):
*/
#if defined(CONFIG_IA64_PAGE_SIZE_4KB)
# define KERNEL_STACK_SIZE_ORDER 3
#elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
# define KERNEL_STACK_SIZE_ORDER 2
#elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
# define KERNEL_STACK_SIZE_ORDER 1
#else
# define KERNEL_STACK_SIZE_ORDER 0
#endif
#define IA64_RBS_OFFSET ((IA64_TASK_SIZE + IA64_THREAD_INFO_SIZE + 31) & ~31)
#define IA64_STK_OFFSET ((1 << KERNEL_STACK_SIZE_ORDER)*PAGE_SIZE)
#define KERNEL_STACK_SIZE IA64_STK_OFFSET
#endif /* __KERNEL__ */
#ifndef __ASSEMBLY__
/*
* This struct defines the way the registers are saved on system
* calls.
*
* We don't save all floating point register because the kernel
* is compiled to use only a very small subset, so the other are
* untouched.
*
* THIS STRUCTURE MUST BE A MULTIPLE 16-BYTE IN SIZE
* (because the memory stack pointer MUST ALWAYS be aligned this way)
*
*/
struct pt_regs {
/* The following registers are saved by SAVE_MIN: */
unsigned long b6; /* scratch */
unsigned long b7; /* scratch */
unsigned long ar_csd; /* used by cmp8xchg16 (scratch) */
unsigned long ar_ssd; /* reserved for future use (scratch) */
unsigned long r8; /* scratch (return value register 0) */
unsigned long r9; /* scratch (return value register 1) */
unsigned long r10; /* scratch (return value register 2) */
unsigned long r11; /* scratch (return value register 3) */
unsigned long cr_ipsr; /* interrupted task's psr */
unsigned long cr_iip; /* interrupted task's instruction pointer */
/*
* interrupted task's function state; if bit 63 is cleared, it
* contains syscall's ar.pfs.pfm:
*/
unsigned long cr_ifs;
unsigned long ar_unat; /* interrupted task's NaT register (preserved) */
unsigned long ar_pfs; /* prev function state */
unsigned long ar_rsc; /* RSE configuration */
/* The following two are valid only if cr_ipsr.cpl > 0 || ti->flags & _TIF_MCA_INIT */
unsigned long ar_rnat; /* RSE NaT */
unsigned long ar_bspstore; /* RSE bspstore */
unsigned long pr; /* 64 predicate registers (1 bit each) */
unsigned long b0; /* return pointer (bp) */
unsigned long loadrs; /* size of dirty partition << 16 */
unsigned long r1; /* the gp pointer */
unsigned long r12; /* interrupted task's memory stack pointer */
unsigned long r13; /* thread pointer */
unsigned long ar_fpsr; /* floating point status (preserved) */
unsigned long r15; /* scratch */
/* The remaining registers are NOT saved for system calls. */
unsigned long r14; /* scratch */
unsigned long r2; /* scratch */
unsigned long r3; /* scratch */
/* The following registers are saved by SAVE_REST: */
unsigned long r16; /* scratch */
unsigned long r17; /* scratch */
unsigned long r18; /* scratch */
unsigned long r19; /* scratch */
unsigned long r20; /* scratch */
unsigned long r21; /* scratch */
unsigned long r22; /* scratch */
unsigned long r23; /* scratch */
unsigned long r24; /* scratch */
unsigned long r25; /* scratch */
unsigned long r26; /* scratch */
unsigned long r27; /* scratch */
unsigned long r28; /* scratch */
unsigned long r29; /* scratch */
unsigned long r30; /* scratch */
unsigned long r31; /* scratch */
unsigned long ar_ccv; /* compare/exchange value (scratch) */
/*
* Floating point registers that the kernel considers scratch:
*/
struct ia64_fpreg f6; /* scratch */
struct ia64_fpreg f7; /* scratch */
struct ia64_fpreg f8; /* scratch */
struct ia64_fpreg f9; /* scratch */
struct ia64_fpreg f10; /* scratch */
struct ia64_fpreg f11; /* scratch */
};
/*
* This structure contains the addition registers that need to
* preserved across a context switch. This generally consists of
* "preserved" registers.
*/
struct switch_stack {
unsigned long caller_unat; /* user NaT collection register (preserved) */
unsigned long ar_fpsr; /* floating-point status register */
struct ia64_fpreg f2; /* preserved */
struct ia64_fpreg f3; /* preserved */
struct ia64_fpreg f4; /* preserved */
struct ia64_fpreg f5; /* preserved */
struct ia64_fpreg f12; /* scratch, but untouched by kernel */
struct ia64_fpreg f13; /* scratch, but untouched by kernel */
struct ia64_fpreg f14; /* scratch, but untouched by kernel */
struct ia64_fpreg f15; /* scratch, but untouched by kernel */
struct ia64_fpreg f16; /* preserved */
struct ia64_fpreg f17; /* preserved */
struct ia64_fpreg f18; /* preserved */
struct ia64_fpreg f19; /* preserved */
struct ia64_fpreg f20; /* preserved */
struct ia64_fpreg f21; /* preserved */
struct ia64_fpreg f22; /* preserved */
struct ia64_fpreg f23; /* preserved */
struct ia64_fpreg f24; /* preserved */
struct ia64_fpreg f25; /* preserved */
struct ia64_fpreg f26; /* preserved */
struct ia64_fpreg f27; /* preserved */
struct ia64_fpreg f28; /* preserved */
struct ia64_fpreg f29; /* preserved */
struct ia64_fpreg f30; /* preserved */
struct ia64_fpreg f31; /* preserved */
unsigned long r4; /* preserved */
unsigned long r5; /* preserved */
unsigned long r6; /* preserved */
unsigned long r7; /* preserved */
unsigned long b0; /* so we can force a direct return in copy_thread */
unsigned long b1;
unsigned long b2;
unsigned long b3;
unsigned long b4;
unsigned long b5;
unsigned long ar_pfs; /* previous function state */
unsigned long ar_lc; /* loop counter (preserved) */
unsigned long ar_unat; /* NaT bits for r4-r7 */
unsigned long ar_rnat; /* RSE NaT collection register */
unsigned long ar_bspstore; /* RSE dirty base (preserved) */
unsigned long pr; /* 64 predicate registers (1 bit each) */
};
#ifdef __KERNEL__
#include <asm/current.h>
#include <asm/page.h>
/*
* We use the ia64_psr(regs)->ri to determine which of the three
* instructions in bundle (16 bytes) took the sample. Generate
* the canonical representation by adding to instruction pointer.
*/
# define instruction_pointer(regs) ((regs)->cr_iip + ia64_psr(regs)->ri)
#define regs_return_value(regs) ((regs)->r8)
/* Conserve space in histogram by encoding slot bits in address
* bits 2 and 3 rather than bits 0 and 1.
*/
#define profile_pc(regs) \
({ \
unsigned long __ip = instruction_pointer(regs); \
(__ip & ~3UL) + ((__ip & 3UL) << 2); \
})
/* given a pointer to a task_struct, return the user's pt_regs */
# define task_pt_regs(t) (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
# define ia64_psr(regs) ((struct ia64_psr *) &(regs)->cr_ipsr)
# define user_mode(regs) (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
# define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
# define fsys_mode(task,regs) \
({ \
struct task_struct *_task = (task); \
struct pt_regs *_regs = (regs); \
!user_mode(_regs) && user_stack(_task, _regs); \
})
/*
* System call handlers that, upon successful completion, need to return a negative value
* should call force_successful_syscall_return() right before returning. On architectures
* where the syscall convention provides for a separate error flag (e.g., alpha, ia64,
* ppc{,64}, sparc{,64}, possibly others), this macro can be used to ensure that the error
* flag will not get set. On architectures which do not support a separate error flag,
* the macro is a no-op and the spurious error condition needs to be filtered out by some
* other means (e.g., in user-level, by passing an extra argument to the syscall handler,
* or something along those lines).
*
* On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
*/
# define force_successful_syscall_return() (task_pt_regs(current)->r8 = 0)
struct task_struct; /* forward decl */
struct unw_frame_info; /* forward decl */
extern void show_regs (struct pt_regs *);
extern void ia64_do_show_stack (struct unw_frame_info *, void *);
extern unsigned long ia64_get_user_rbs_end (struct task_struct *, struct pt_regs *,
unsigned long *);
extern long ia64_peek (struct task_struct *, struct switch_stack *, unsigned long,
unsigned long, long *);
extern long ia64_poke (struct task_struct *, struct switch_stack *, unsigned long,
unsigned long, long);
extern void ia64_flush_fph (struct task_struct *);
extern void ia64_sync_fph (struct task_struct *);
extern void ia64_sync_krbs(void);
extern long ia64_sync_user_rbs (struct task_struct *, struct switch_stack *,
unsigned long, unsigned long);
/* get nat bits for scratch registers such that bit N==1 iff scratch register rN is a NaT */
extern unsigned long ia64_get_scratch_nat_bits (struct pt_regs *pt, unsigned long scratch_unat);
/* put nat bits for scratch registers such that scratch register rN is a NaT iff bit N==1 */
extern unsigned long ia64_put_scratch_nat_bits (struct pt_regs *pt, unsigned long nat);
extern void ia64_increment_ip (struct pt_regs *pt);
extern void ia64_decrement_ip (struct pt_regs *pt);
extern void ia64_ptrace_stop(void);
#define arch_ptrace_stop(code, info) \
ia64_ptrace_stop()
#define arch_ptrace_stop_needed(code, info) \
(!test_thread_flag(TIF_RESTORE_RSE))
extern void ptrace_attach_sync_user_rbs (struct task_struct *);
#define arch_ptrace_attach(child) \
ptrace_attach_sync_user_rbs(child)
#define arch_has_single_step() (1)
extern void user_enable_single_step(struct task_struct *);
extern void user_disable_single_step(struct task_struct *);
#define arch_has_block_step() (1)
extern void user_enable_block_step(struct task_struct *);
#endif /* !__KERNEL__ */
/* pt_all_user_regs is used for PTRACE_GETREGS PTRACE_SETREGS */
struct pt_all_user_regs {
unsigned long nat;
unsigned long cr_iip;
unsigned long cfm;
unsigned long cr_ipsr;
unsigned long pr;
unsigned long gr[32];
unsigned long br[8];
unsigned long ar[128];
struct ia64_fpreg fr[128];
};
#endif /* !__ASSEMBLY__ */
/* indices to application-registers array in pt_all_user_regs */
#define PT_AUR_RSC 16
#define PT_AUR_BSP 17
#define PT_AUR_BSPSTORE 18
#define PT_AUR_RNAT 19
#define PT_AUR_CCV 32
#define PT_AUR_UNAT 36
#define PT_AUR_FPSR 40
#define PT_AUR_PFS 64
#define PT_AUR_LC 65
#define PT_AUR_EC 66
/*
* The numbers chosen here are somewhat arbitrary but absolutely MUST
* not overlap with any of the number assigned in <linux/ptrace.h>.
*/
#define PTRACE_SINGLEBLOCK 12 /* resume execution until next branch */
#define PTRACE_OLD_GETSIGINFO 13 /* (replaced by PTRACE_GETSIGINFO in <linux/ptrace.h>) */
#define PTRACE_OLD_SETSIGINFO 14 /* (replaced by PTRACE_SETSIGINFO in <linux/ptrace.h>) */
#define PTRACE_GETREGS 18 /* get all registers (pt_all_user_regs) in one shot */
#define PTRACE_SETREGS 19 /* set all registers (pt_all_user_regs) in one shot */
#define PTRACE_OLDSETOPTIONS 21
#endif /* _ASM_IA64_PTRACE_H */
|
2483_6
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _ASM_IA64_SECTIONS_H
#define _ASM_IA64_SECTIONS_H
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm-generic/sections.h>
extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
extern char __start_gate_section[];
extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
extern char __start_unwind[], __end_unwind[];
extern char __start_ivt_text[], __end_ivt_text[];
#endif /* _ASM_IA64_SECTIONS_H */
|
#ifndef _ASM_IA64_SECTIONS_H
#define _ASM_IA64_SECTIONS_H
/*
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
#include <asm-generic/sections.h>
extern char __per_cpu_start[], __per_cpu_end[], __phys_per_cpu_start[];
extern char __start___vtop_patchlist[], __end___vtop_patchlist[];
extern char __start___rse_patchlist[], __end___rse_patchlist[];
extern char __start___mckinley_e9_bundles[], __end___mckinley_e9_bundles[];
extern char __start___phys_stack_reg_patchlist[], __end___phys_stack_reg_patchlist[];
extern char __start_gate_section[];
extern char __start_gate_mckinley_e9_patchlist[], __end_gate_mckinley_e9_patchlist[];
extern char __start_gate_vtop_patchlist[], __end_gate_vtop_patchlist[];
extern char __start_gate_fsyscall_patchlist[], __end_gate_fsyscall_patchlist[];
extern char __start_gate_brl_fsys_bubble_down_patchlist[], __end_gate_brl_fsys_bubble_down_patchlist[];
extern char __start_unwind[], __end_unwind[];
extern char __start_ivt_text[], __end_ivt_text[];
#endif /* _ASM_IA64_SECTIONS_H */
|
2483_7
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#ifndef __L2CAP_H
#define __L2CAP_H
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
#define L2CAP_DEFAULT_FLUSH_TO 0xffff
#define L2CAP_DEFAULT_RX_WINDOW 1
#define L2CAP_DEFAULT_MAX_RECEIVE 1
#define L2CAP_DEFAULT_RETRANS_TO 300 /* 300 milliseconds */
#define L2CAP_DEFAULT_MONITOR_TO 1000 /* 1 second */
#define L2CAP_DEFAULT_MAX_RX_APDU 0xfff7
#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
__le16 l2_psm;
bdaddr_t l2_bdaddr;
__le16 l2_cid;
};
/* L2CAP socket options */
#define L2CAP_OPTIONS 0x01
struct l2cap_options {
__u16 omtu;
__u16 imtu;
__u16 flush_to;
__u8 mode;
};
#define L2CAP_CONNINFO 0x02
struct l2cap_conninfo {
__u16 hci_handle;
__u8 dev_class[3];
};
#define L2CAP_LM 0x03
#define L2CAP_LM_MASTER 0x0001
#define L2CAP_LM_AUTH 0x0002
#define L2CAP_LM_ENCRYPT 0x0004
#define L2CAP_LM_TRUSTED 0x0008
#define L2CAP_LM_RELIABLE 0x0010
#define L2CAP_LM_SECURE 0x0020
/* L2CAP command codes */
#define L2CAP_COMMAND_REJ 0x01
#define L2CAP_CONN_REQ 0x02
#define L2CAP_CONN_RSP 0x03
#define L2CAP_CONF_REQ 0x04
#define L2CAP_CONF_RSP 0x05
#define L2CAP_DISCONN_REQ 0x06
#define L2CAP_DISCONN_RSP 0x07
#define L2CAP_ECHO_REQ 0x08
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
/* L2CAP feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
#define L2CAP_FEAT_RETRANS 0x00000002
#define L2CAP_FEAT_ERTM 0x00000008
#define L2CAP_FEAT_STREAMING 0x00000010
#define L2CAP_FEAT_FCS 0x00000020
#define L2CAP_FEAT_FIXED_CHAN 0x00000080
/* L2CAP checksum option */
#define L2CAP_FCS_NONE 0x00
#define L2CAP_FCS_CRC16 0x01
/* L2CAP structures */
struct l2cap_hdr {
__le16 len;
__le16 cid;
} __attribute__ ((packed));
#define L2CAP_HDR_SIZE 4
struct l2cap_cmd_hdr {
__u8 code;
__u8 ident;
__le16 len;
} __attribute__ ((packed));
#define L2CAP_CMD_HDR_SIZE 4
struct l2cap_cmd_rej {
__le16 reason;
} __attribute__ ((packed));
struct l2cap_conn_req {
__le16 psm;
__le16 scid;
} __attribute__ ((packed));
struct l2cap_conn_rsp {
__le16 dcid;
__le16 scid;
__le16 result;
__le16 status;
} __attribute__ ((packed));
/* channel indentifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
/* connect result */
#define L2CAP_CR_SUCCESS 0x0000
#define L2CAP_CR_PEND 0x0001
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
/* connect status */
#define L2CAP_CS_NO_INFO 0x0000
#define L2CAP_CS_AUTHEN_PEND 0x0001
#define L2CAP_CS_AUTHOR_PEND 0x0002
struct l2cap_conf_req {
__le16 dcid;
__le16 flags;
__u8 data[0];
} __attribute__ ((packed));
struct l2cap_conf_rsp {
__le16 scid;
__le16 flags;
__le16 result;
__u8 data[0];
} __attribute__ ((packed));
#define L2CAP_CONF_SUCCESS 0x0000
#define L2CAP_CONF_UNACCEPT 0x0001
#define L2CAP_CONF_REJECT 0x0002
#define L2CAP_CONF_UNKNOWN 0x0003
struct l2cap_conf_opt {
__u8 type;
__u8 len;
__u8 val[0];
} __attribute__ ((packed));
#define L2CAP_CONF_OPT_SIZE 2
#define L2CAP_CONF_HINT 0x80
#define L2CAP_CONF_MASK 0x7f
#define L2CAP_CONF_MTU 0x01
#define L2CAP_CONF_FLUSH_TO 0x02
#define L2CAP_CONF_QOS 0x03
#define L2CAP_CONF_RFC 0x04
#define L2CAP_CONF_FCS 0x05
#define L2CAP_CONF_MAX_SIZE 22
struct l2cap_conf_rfc {
__u8 mode;
__u8 txwin_size;
__u8 max_transmit;
__le16 retrans_timeout;
__le16 monitor_timeout;
__le16 max_pdu_size;
} __attribute__ ((packed));
#define L2CAP_MODE_BASIC 0x00
#define L2CAP_MODE_RETRANS 0x01
#define L2CAP_MODE_FLOWCTL 0x02
#define L2CAP_MODE_ERTM 0x03
#define L2CAP_MODE_STREAMING 0x04
struct l2cap_disconn_req {
__le16 dcid;
__le16 scid;
} __attribute__ ((packed));
struct l2cap_disconn_rsp {
__le16 dcid;
__le16 scid;
} __attribute__ ((packed));
struct l2cap_info_req {
__le16 type;
} __attribute__ ((packed));
struct l2cap_info_rsp {
__le16 type;
__le16 result;
__u8 data[0];
} __attribute__ ((packed));
/* info type */
#define L2CAP_IT_CL_MTU 0x0001
#define L2CAP_IT_FEAT_MASK 0x0002
#define L2CAP_IT_FIXED_CHAN 0x0003
/* info result */
#define L2CAP_IR_SUCCESS 0x0000
#define L2CAP_IR_NOTSUPP 0x0001
/* ----- L2CAP connections ----- */
struct l2cap_chan_list {
struct sock *head;
rwlock_t lock;
long num;
};
struct l2cap_conn {
struct hci_conn *hcon;
bdaddr_t *dst;
bdaddr_t *src;
unsigned int mtu;
__u32 feat_mask;
__u8 info_state;
__u8 info_ident;
struct timer_list info_timer;
spinlock_t lock;
struct sk_buff *rx_skb;
__u32 rx_len;
__u8 rx_ident;
__u8 tx_ident;
__u8 disc_reason;
struct l2cap_chan_list chan_list;
};
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
/* ----- L2CAP channel and socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
struct l2cap_pinfo {
struct bt_sock bt;
__le16 psm;
__u16 dcid;
__u16 scid;
__u16 imtu;
__u16 omtu;
__u16 flush_to;
__u8 mode;
__u8 fcs;
__u8 sec_level;
__u8 role_switch;
__u8 force_reliable;
__u8 conf_req[64];
__u8 conf_len;
__u8 conf_state;
__u8 conf_retry;
__u8 ident;
__le16 sport;
struct l2cap_conn *conn;
struct sock *next_c;
struct sock *prev_c;
};
#define L2CAP_CONF_REQ_SENT 0x01
#define L2CAP_CONF_INPUT_DONE 0x02
#define L2CAP_CONF_OUTPUT_DONE 0x04
#define L2CAP_CONF_CONNECT_PEND 0x80
#define L2CAP_CONF_MAX_RETRIES 2
void l2cap_load(void);
#endif /* __L2CAP_H */
|
/*
BlueZ - Bluetooth protocol stack for Linux
Copyright (C) 2000-2001 Qualcomm Incorporated
Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2 as
published by the Free Software Foundation;
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
SOFTWARE IS DISCLAIMED.
*/
#ifndef __L2CAP_H
#define __L2CAP_H
/* L2CAP defaults */
#define L2CAP_DEFAULT_MTU 672
#define L2CAP_DEFAULT_MIN_MTU 48
#define L2CAP_DEFAULT_FLUSH_TO 0xffff
#define L2CAP_DEFAULT_TX_WINDOW 1
#define L2CAP_DEFAULT_MAX_RECEIVE 1
#define L2CAP_DEFAULT_RETRANS_TO 300 /* 300 milliseconds */
#define L2CAP_DEFAULT_MONITOR_TO 1000 /* 1 second */
#define L2CAP_DEFAULT_MAX_RX_APDU 0xfff7
#define L2CAP_CONN_TIMEOUT (40000) /* 40 seconds */
#define L2CAP_INFO_TIMEOUT (4000) /* 4 seconds */
/* L2CAP socket address */
struct sockaddr_l2 {
sa_family_t l2_family;
__le16 l2_psm;
bdaddr_t l2_bdaddr;
__le16 l2_cid;
};
/* L2CAP socket options */
#define L2CAP_OPTIONS 0x01
struct l2cap_options {
__u16 omtu;
__u16 imtu;
__u16 flush_to;
__u8 mode;
};
#define L2CAP_CONNINFO 0x02
struct l2cap_conninfo {
__u16 hci_handle;
__u8 dev_class[3];
};
#define L2CAP_LM 0x03
#define L2CAP_LM_MASTER 0x0001
#define L2CAP_LM_AUTH 0x0002
#define L2CAP_LM_ENCRYPT 0x0004
#define L2CAP_LM_TRUSTED 0x0008
#define L2CAP_LM_RELIABLE 0x0010
#define L2CAP_LM_SECURE 0x0020
/* L2CAP command codes */
#define L2CAP_COMMAND_REJ 0x01
#define L2CAP_CONN_REQ 0x02
#define L2CAP_CONN_RSP 0x03
#define L2CAP_CONF_REQ 0x04
#define L2CAP_CONF_RSP 0x05
#define L2CAP_DISCONN_REQ 0x06
#define L2CAP_DISCONN_RSP 0x07
#define L2CAP_ECHO_REQ 0x08
#define L2CAP_ECHO_RSP 0x09
#define L2CAP_INFO_REQ 0x0a
#define L2CAP_INFO_RSP 0x0b
/* L2CAP feature mask */
#define L2CAP_FEAT_FLOWCTL 0x00000001
#define L2CAP_FEAT_RETRANS 0x00000002
#define L2CAP_FEAT_ERTM 0x00000008
#define L2CAP_FEAT_STREAMING 0x00000010
#define L2CAP_FEAT_FCS 0x00000020
#define L2CAP_FEAT_FIXED_CHAN 0x00000080
/* L2CAP checksum option */
#define L2CAP_FCS_NONE 0x00
#define L2CAP_FCS_CRC16 0x01
/* L2CAP structures */
struct l2cap_hdr {
__le16 len;
__le16 cid;
} __attribute__ ((packed));
#define L2CAP_HDR_SIZE 4
struct l2cap_cmd_hdr {
__u8 code;
__u8 ident;
__le16 len;
} __attribute__ ((packed));
#define L2CAP_CMD_HDR_SIZE 4
struct l2cap_cmd_rej {
__le16 reason;
} __attribute__ ((packed));
struct l2cap_conn_req {
__le16 psm;
__le16 scid;
} __attribute__ ((packed));
struct l2cap_conn_rsp {
__le16 dcid;
__le16 scid;
__le16 result;
__le16 status;
} __attribute__ ((packed));
/* channel indentifier */
#define L2CAP_CID_SIGNALING 0x0001
#define L2CAP_CID_CONN_LESS 0x0002
#define L2CAP_CID_DYN_START 0x0040
#define L2CAP_CID_DYN_END 0xffff
/* connect result */
#define L2CAP_CR_SUCCESS 0x0000
#define L2CAP_CR_PEND 0x0001
#define L2CAP_CR_BAD_PSM 0x0002
#define L2CAP_CR_SEC_BLOCK 0x0003
#define L2CAP_CR_NO_MEM 0x0004
/* connect status */
#define L2CAP_CS_NO_INFO 0x0000
#define L2CAP_CS_AUTHEN_PEND 0x0001
#define L2CAP_CS_AUTHOR_PEND 0x0002
struct l2cap_conf_req {
__le16 dcid;
__le16 flags;
__u8 data[0];
} __attribute__ ((packed));
struct l2cap_conf_rsp {
__le16 scid;
__le16 flags;
__le16 result;
__u8 data[0];
} __attribute__ ((packed));
#define L2CAP_CONF_SUCCESS 0x0000
#define L2CAP_CONF_UNACCEPT 0x0001
#define L2CAP_CONF_REJECT 0x0002
#define L2CAP_CONF_UNKNOWN 0x0003
struct l2cap_conf_opt {
__u8 type;
__u8 len;
__u8 val[0];
} __attribute__ ((packed));
#define L2CAP_CONF_OPT_SIZE 2
#define L2CAP_CONF_HINT 0x80
#define L2CAP_CONF_MASK 0x7f
#define L2CAP_CONF_MTU 0x01
#define L2CAP_CONF_FLUSH_TO 0x02
#define L2CAP_CONF_QOS 0x03
#define L2CAP_CONF_RFC 0x04
#define L2CAP_CONF_FCS 0x05
#define L2CAP_CONF_MAX_SIZE 22
struct l2cap_conf_rfc {
__u8 mode;
__u8 txwin_size;
__u8 max_transmit;
__le16 retrans_timeout;
__le16 monitor_timeout;
__le16 max_pdu_size;
} __attribute__ ((packed));
#define L2CAP_MODE_BASIC 0x00
#define L2CAP_MODE_RETRANS 0x01
#define L2CAP_MODE_FLOWCTL 0x02
#define L2CAP_MODE_ERTM 0x03
#define L2CAP_MODE_STREAMING 0x04
struct l2cap_disconn_req {
__le16 dcid;
__le16 scid;
} __attribute__ ((packed));
struct l2cap_disconn_rsp {
__le16 dcid;
__le16 scid;
} __attribute__ ((packed));
struct l2cap_info_req {
__le16 type;
} __attribute__ ((packed));
struct l2cap_info_rsp {
__le16 type;
__le16 result;
__u8 data[0];
} __attribute__ ((packed));
/* info type */
#define L2CAP_IT_CL_MTU 0x0001
#define L2CAP_IT_FEAT_MASK 0x0002
#define L2CAP_IT_FIXED_CHAN 0x0003
/* info result */
#define L2CAP_IR_SUCCESS 0x0000
#define L2CAP_IR_NOTSUPP 0x0001
/* ----- L2CAP connections ----- */
struct l2cap_chan_list {
struct sock *head;
rwlock_t lock;
long num;
};
struct l2cap_conn {
struct hci_conn *hcon;
bdaddr_t *dst;
bdaddr_t *src;
unsigned int mtu;
__u32 feat_mask;
__u8 info_state;
__u8 info_ident;
struct timer_list info_timer;
spinlock_t lock;
struct sk_buff *rx_skb;
__u32 rx_len;
__u8 rx_ident;
__u8 tx_ident;
__u8 disc_reason;
struct l2cap_chan_list chan_list;
};
#define L2CAP_INFO_CL_MTU_REQ_SENT 0x01
#define L2CAP_INFO_FEAT_MASK_REQ_SENT 0x04
#define L2CAP_INFO_FEAT_MASK_REQ_DONE 0x08
/* ----- L2CAP channel and socket info ----- */
#define l2cap_pi(sk) ((struct l2cap_pinfo *) sk)
struct l2cap_pinfo {
struct bt_sock bt;
__le16 psm;
__u16 dcid;
__u16 scid;
__u16 imtu;
__u16 omtu;
__u16 flush_to;
__u8 mode;
__u8 num_conf_req;
__u8 num_conf_rsp;
__u8 fcs;
__u8 sec_level;
__u8 role_switch;
__u8 force_reliable;
__u8 conf_req[64];
__u8 conf_len;
__u8 conf_state;
__u8 ident;
__u8 remote_tx_win;
__u8 remote_max_tx;
__u16 retrans_timeout;
__u16 monitor_timeout;
__u16 max_pdu_size;
__le16 sport;
struct l2cap_conn *conn;
struct sock *next_c;
struct sock *prev_c;
};
#define L2CAP_CONF_REQ_SENT 0x01
#define L2CAP_CONF_INPUT_DONE 0x02
#define L2CAP_CONF_OUTPUT_DONE 0x04
#define L2CAP_CONF_MTU_DONE 0x08
#define L2CAP_CONF_MODE_DONE 0x10
#define L2CAP_CONF_CONNECT_PEND 0x20
#define L2CAP_CONF_STATE2_DEVICE 0x80
#define L2CAP_CONF_MAX_CONF_REQ 2
#define L2CAP_CONF_MAX_CONF_RSP 2
void l2cap_load(void);
#endif /* __L2CAP_H */
|
2522_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/**
* @file
* Conversion to/from base64 encoding
*
* @authors
* @copyright
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MUTT_BASE64_H
#define _MUTT_BASE64_H
#include <stdio.h>
extern const int Index64[];
#define base64val(c) Index64[(unsigned int) (c)]
size_t mutt_b64_encode(char *out, const char *cin, size_t len, size_t olen);
int mutt_b64_decode(char *out, const char *in);
#endif /* _MUTT_BASE64_H */
|
/**
* @file
* Conversion to/from base64 encoding
*
* @authors
* @copyright
* This program is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software
* Foundation, either version 2 of the License, or (at your option) any later
* version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _MUTT_BASE64_H
#define _MUTT_BASE64_H
#include <stdio.h>
extern const int Index64[];
#define base64val(c) Index64[(unsigned int) (c)]
size_t mutt_b64_encode(char *out, const char *cin, size_t len, size_t olen);
int mutt_b64_decode(char *out, const char *in, size_t olen);
#endif /* _MUTT_BASE64_H */
|
252_3
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _GENERATOR_H_
#define _GENERATOR_H_
#include <string.h>
#include <math.h>
#include <ctype.h>
#include "ruby.h"
#ifdef HAVE_RUBY_RE_H
#include "ruby/re.h"
#else
#include "re.h"
#endif
#ifndef rb_intern_str
#define rb_intern_str(string) SYM2ID(rb_str_intern(string))
#endif
#ifndef rb_obj_instance_variables
#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0)
#endif
#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key))
/* unicode definitions */
#define UNI_STRICT_CONVERSION 1
typedef unsigned long UTF32; /* at least 32 bits */
typedef unsigned short UTF16; /* at least 16 bits */
typedef unsigned char UTF8; /* typically 8 bits */
#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
#define UNI_MAX_BMP (UTF32)0x0000FFFF
#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_HIGH_END (UTF32)0xDBFF
#define UNI_SUR_LOW_START (UTF32)0xDC00
#define UNI_SUR_LOW_END (UTF32)0xDFFF
static const int halfShift = 10; /* used for shifting by 10 bits */
static const UTF32 halfBase = 0x0010000UL;
static const UTF32 halfMask = 0x3FFUL;
static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length);
static void unicode_escape(char *buf, UTF16 character);
static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character);
static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string);
static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string);
static char *fstrndup(const char *ptr, unsigned long len);
/* ruby api and some helpers */
typedef struct JSON_Generator_StateStruct {
char *indent;
long indent_len;
char *space;
long space_len;
char *space_before;
long space_before_len;
char *object_nl;
long object_nl_len;
char *array_nl;
long array_nl_len;
FBuffer *array_delim;
FBuffer *object_delim;
FBuffer *object_delim2;
long max_nesting;
char allow_nan;
char ascii_only;
long depth;
long buffer_initial_length;
} JSON_Generator_State;
#define GET_STATE_TO(self, state) \
TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state)
#define GET_STATE(self) \
JSON_Generator_State *state; \
GET_STATE_TO(self, state)
#define GENERATE_JSON(type) \
FBuffer *buffer; \
VALUE Vstate; \
JSON_Generator_State *state; \
\
rb_scan_args(argc, argv, "01", &Vstate); \
Vstate = cState_from_state_s(cState, Vstate); \
TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \
buffer = cState_prepare_buffer(Vstate); \
generate_json_##type(buffer, Vstate, state, self); \
return fbuffer_to_s(buffer)
static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self);
#ifdef RUBY_INTEGER_UNIFICATION
static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self);
#else
static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self);
#endif
static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mString_included_s(VALUE self, VALUE modul);
static VALUE mString_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mString_to_json_raw_object(VALUE self);
static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self);
static VALUE mString_Extend_json_create(VALUE self, VALUE o);
static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self);
static void State_free(void *state);
static VALUE cState_s_allocate(VALUE klass);
static VALUE cState_configure(VALUE self, VALUE opts);
static VALUE cState_to_h(VALUE self);
static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
#ifdef RUBY_INTEGER_UNIFICATION
static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
#endif
static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static VALUE cState_partial_generate(VALUE self, VALUE obj);
static VALUE cState_generate(VALUE self, VALUE obj);
static VALUE cState_initialize(int argc, VALUE *argv, VALUE self);
static VALUE cState_from_state_s(VALUE self, VALUE opts);
static VALUE cState_indent(VALUE self);
static VALUE cState_indent_set(VALUE self, VALUE indent);
static VALUE cState_space(VALUE self);
static VALUE cState_space_set(VALUE self, VALUE space);
static VALUE cState_space_before(VALUE self);
static VALUE cState_space_before_set(VALUE self, VALUE space_before);
static VALUE cState_object_nl(VALUE self);
static VALUE cState_object_nl_set(VALUE self, VALUE object_nl);
static VALUE cState_array_nl(VALUE self);
static VALUE cState_array_nl_set(VALUE self, VALUE array_nl);
static VALUE cState_max_nesting(VALUE self);
static VALUE cState_max_nesting_set(VALUE self, VALUE depth);
static VALUE cState_allow_nan_p(VALUE self);
static VALUE cState_ascii_only_p(VALUE self);
static VALUE cState_depth(VALUE self);
static VALUE cState_depth_set(VALUE self, VALUE depth);
static FBuffer *cState_prepare_buffer(VALUE self);
#ifndef ZALLOC
#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type)))
static inline void *ruby_zalloc(size_t n)
{
void *p = ruby_xmalloc(n);
memset(p, 0, n);
return p;
}
#endif
#ifdef TypedData_Make_Struct
static const rb_data_type_t JSON_Generator_State_type;
#define NEW_TYPEDDATA_WRAPPER 1
#else
#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json)
#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json)
#endif
#endif
|
#ifndef _GENERATOR_H_
#define _GENERATOR_H_
#include <math.h>
#include <ctype.h>
#include "ruby.h"
#ifdef HAVE_RUBY_RE_H
#include "ruby/re.h"
#else
#include "re.h"
#endif
#ifndef rb_intern_str
#define rb_intern_str(string) SYM2ID(rb_str_intern(string))
#endif
#ifndef rb_obj_instance_variables
#define rb_obj_instance_variables(object) rb_funcall(object, rb_intern("instance_variables"), 0)
#endif
#define option_given_p(opts, key) RTEST(rb_funcall(opts, i_key_p, 1, key))
/* unicode definitions */
#define UNI_STRICT_CONVERSION 1
typedef unsigned long UTF32; /* at least 32 bits */
typedef unsigned short UTF16; /* at least 16 bits */
typedef unsigned char UTF8; /* typically 8 bits */
#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
#define UNI_MAX_BMP (UTF32)0x0000FFFF
#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_HIGH_END (UTF32)0xDBFF
#define UNI_SUR_LOW_START (UTF32)0xDC00
#define UNI_SUR_LOW_END (UTF32)0xDFFF
static const int halfShift = 10; /* used for shifting by 10 bits */
static const UTF32 halfBase = 0x0010000UL;
static const UTF32 halfMask = 0x3FFUL;
static unsigned char isLegalUTF8(const UTF8 *source, unsigned long length);
static void unicode_escape(char *buf, UTF16 character);
static void unicode_escape_to_buffer(FBuffer *buffer, char buf[6], UTF16 character);
static void convert_UTF8_to_JSON_ASCII(FBuffer *buffer, VALUE string);
static void convert_UTF8_to_JSON(FBuffer *buffer, VALUE string);
static char *fstrndup(const char *ptr, unsigned long len);
/* ruby api and some helpers */
typedef struct JSON_Generator_StateStruct {
char *indent;
long indent_len;
char *space;
long space_len;
char *space_before;
long space_before_len;
char *object_nl;
long object_nl_len;
char *array_nl;
long array_nl_len;
FBuffer *array_delim;
FBuffer *object_delim;
FBuffer *object_delim2;
long max_nesting;
char allow_nan;
char ascii_only;
long depth;
long buffer_initial_length;
} JSON_Generator_State;
#define GET_STATE_TO(self, state) \
TypedData_Get_Struct(self, JSON_Generator_State, &JSON_Generator_State_type, state)
#define GET_STATE(self) \
JSON_Generator_State *state; \
GET_STATE_TO(self, state)
#define GENERATE_JSON(type) \
FBuffer *buffer; \
VALUE Vstate; \
JSON_Generator_State *state; \
\
rb_scan_args(argc, argv, "01", &Vstate); \
Vstate = cState_from_state_s(cState, Vstate); \
TypedData_Get_Struct(Vstate, JSON_Generator_State, &JSON_Generator_State_type, state); \
buffer = cState_prepare_buffer(Vstate); \
generate_json_##type(buffer, Vstate, state, self); \
return fbuffer_to_s(buffer)
static VALUE mHash_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mArray_to_json(int argc, VALUE *argv, VALUE self);
#ifdef RUBY_INTEGER_UNIFICATION
static VALUE mInteger_to_json(int argc, VALUE *argv, VALUE self);
#else
static VALUE mFixnum_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mBignum_to_json(int argc, VALUE *argv, VALUE self);
#endif
static VALUE mFloat_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mString_included_s(VALUE self, VALUE modul);
static VALUE mString_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mString_to_json_raw_object(VALUE self);
static VALUE mString_to_json_raw(int argc, VALUE *argv, VALUE self);
static VALUE mString_Extend_json_create(VALUE self, VALUE o);
static VALUE mTrueClass_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mFalseClass_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mNilClass_to_json(int argc, VALUE *argv, VALUE self);
static VALUE mObject_to_json(int argc, VALUE *argv, VALUE self);
static void State_free(void *state);
static VALUE cState_s_allocate(VALUE klass);
static VALUE cState_configure(VALUE self, VALUE opts);
static VALUE cState_to_h(VALUE self);
static void generate_json(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_object(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_array(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_string(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_null(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_false(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_true(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
#ifdef RUBY_INTEGER_UNIFICATION
static void generate_json_integer(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
#endif
static void generate_json_fixnum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_bignum(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static void generate_json_float(FBuffer *buffer, VALUE Vstate, JSON_Generator_State *state, VALUE obj);
static VALUE cState_partial_generate(VALUE self, VALUE obj);
static VALUE cState_generate(VALUE self, VALUE obj);
static VALUE cState_initialize(int argc, VALUE *argv, VALUE self);
static VALUE cState_from_state_s(VALUE self, VALUE opts);
static VALUE cState_indent(VALUE self);
static VALUE cState_indent_set(VALUE self, VALUE indent);
static VALUE cState_space(VALUE self);
static VALUE cState_space_set(VALUE self, VALUE space);
static VALUE cState_space_before(VALUE self);
static VALUE cState_space_before_set(VALUE self, VALUE space_before);
static VALUE cState_object_nl(VALUE self);
static VALUE cState_object_nl_set(VALUE self, VALUE object_nl);
static VALUE cState_array_nl(VALUE self);
static VALUE cState_array_nl_set(VALUE self, VALUE array_nl);
static VALUE cState_max_nesting(VALUE self);
static VALUE cState_max_nesting_set(VALUE self, VALUE depth);
static VALUE cState_allow_nan_p(VALUE self);
static VALUE cState_ascii_only_p(VALUE self);
static VALUE cState_depth(VALUE self);
static VALUE cState_depth_set(VALUE self, VALUE depth);
static FBuffer *cState_prepare_buffer(VALUE self);
#ifndef ZALLOC
#define ZALLOC(type) ((type *)ruby_zalloc(sizeof(type)))
static inline void *ruby_zalloc(size_t n)
{
void *p = ruby_xmalloc(n);
memset(p, 0, n);
return p;
}
#endif
#ifdef TypedData_Make_Struct
static const rb_data_type_t JSON_Generator_State_type;
#define NEW_TYPEDDATA_WRAPPER 1
#else
#define TypedData_Make_Struct(klass, type, ignore, json) Data_Make_Struct(klass, type, NULL, State_free, json)
#define TypedData_Get_Struct(self, JSON_Generator_State, ignore, json) Data_Get_Struct(self, JSON_Generator_State, json)
#endif
#endif
|
2767_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
* Wireless USB 1.0 (spread around). Linux has several APIs in C that
* need these:
*
* - the master/host side Linux-USB kernel driver API;
* - the "usbfs" user space API; and
* - the Linux "gadget" slave/device/peripheral side driver API.
*
* USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
* act either as a USB master/host or as a USB slave/device. That means
* the master and slave side APIs benefit from working well together.
*
* There's also "Wireless USB", using low power short range radios for
* peripheral interconnection but otherwise building on the USB framework.
*
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
* probably handled that) or externally;
*
* [b] so that accessing bigger-than-a-bytes fields will never
* generate bus errors on any platform, even when the location of
* its descriptor inside a bundle isn't "naturally aligned", and
*
* [c] for consistency, removing all doubt even when it appears to
* someone that the two other points are non-issues for that
* particular descriptor type.
*/
#ifndef _UAPI__LINUX_USB_CH9_H
#define _UAPI__LINUX_USB_CH9_H
#include <linux/types.h> /* __u8 etc */
#include <asm/byteorder.h> /* le16_to_cpu */
/*-------------------------------------------------------------------------*/
/* CONTROL REQUEST SUPPORT */
/*
* USB directions
*
* This bit flag is used in endpoint descriptors' bEndpointAddress field.
* It's also one of three fields in control requests bRequestType.
*/
#define USB_DIR_OUT 0 /* to device */
#define USB_DIR_IN 0x80 /* to host */
/*
* USB types, the second of three bRequestType fields
*/
#define USB_TYPE_MASK (0x03 << 5)
#define USB_TYPE_STANDARD (0x00 << 5)
#define USB_TYPE_CLASS (0x01 << 5)
#define USB_TYPE_VENDOR (0x02 << 5)
#define USB_TYPE_RESERVED (0x03 << 5)
/*
* USB recipients, the third of three bRequestType fields
*/
#define USB_RECIP_MASK 0x1f
#define USB_RECIP_DEVICE 0x00
#define USB_RECIP_INTERFACE 0x01
#define USB_RECIP_ENDPOINT 0x02
#define USB_RECIP_OTHER 0x03
/* From Wireless USB 1.0 */
#define USB_RECIP_PORT 0x04
#define USB_RECIP_RPIPE 0x05
/*
* Standard requests, for the bRequest field of a SETUP packet.
*
* These are qualified by the bRequestType field, so that for example
* TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
* by a GET_STATUS request.
*/
#define USB_REQ_GET_STATUS 0x00
#define USB_REQ_CLEAR_FEATURE 0x01
#define USB_REQ_SET_FEATURE 0x03
#define USB_REQ_SET_ADDRESS 0x05
#define USB_REQ_GET_DESCRIPTOR 0x06
#define USB_REQ_SET_DESCRIPTOR 0x07
#define USB_REQ_GET_CONFIGURATION 0x08
#define USB_REQ_SET_CONFIGURATION 0x09
#define USB_REQ_GET_INTERFACE 0x0A
#define USB_REQ_SET_INTERFACE 0x0B
#define USB_REQ_SYNCH_FRAME 0x0C
#define USB_REQ_SET_SEL 0x30
#define USB_REQ_SET_ISOCH_DELAY 0x31
#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
#define USB_REQ_GET_ENCRYPTION 0x0E
#define USB_REQ_RPIPE_ABORT 0x0E
#define USB_REQ_SET_HANDSHAKE 0x0F
#define USB_REQ_RPIPE_RESET 0x0F
#define USB_REQ_GET_HANDSHAKE 0x10
#define USB_REQ_SET_CONNECTION 0x11
#define USB_REQ_SET_SECURITY_DATA 0x12
#define USB_REQ_GET_SECURITY_DATA 0x13
#define USB_REQ_SET_WUSB_DATA 0x14
#define USB_REQ_LOOPBACK_DATA_WRITE 0x15
#define USB_REQ_LOOPBACK_DATA_READ 0x16
#define USB_REQ_SET_INTERFACE_DS 0x17
/* specific requests for USB Power Delivery */
#define USB_REQ_GET_PARTNER_PDO 20
#define USB_REQ_GET_BATTERY_STATUS 21
#define USB_REQ_SET_PDO 22
#define USB_REQ_GET_VDM 23
#define USB_REQ_SEND_VDM 24
/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command,
* used by hubs to put ports into a new L1 suspend state, except that it
* forgot to define its number ...
*/
/*
* USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
* are read as a bit array returned by USB_REQ_GET_STATUS. (So there
* are at most sixteen features of each type.) Hubs may also support a
* new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend.
*/
#define USB_DEVICE_SELF_POWERED 0 /* (read only) */
#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */
#define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */
#define USB_DEVICE_BATTERY 2 /* (wireless) */
#define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */
#define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/
#define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */
#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */
#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
/*
* Test Mode Selectors
* See USB 2.0 spec Table 9-7
*/
#define TEST_J 1
#define TEST_K 2
#define TEST_SE0_NAK 3
#define TEST_PACKET 4
#define TEST_FORCE_EN 5
/*
* New Feature Selectors as added by USB 3.0
* See USB 3.0 spec Table 9-7
*/
#define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */
#define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */
#define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */
#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */
#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00
/*
* Suspend Options, Table 9-8 USB 3.0 spec
*/
#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
/*
* Interface status, Figure 9-5 USB 3.0 spec
*/
#define USB_INTRF_STAT_FUNC_RW_CAP 1
#define USB_INTRF_STAT_FUNC_RW 2
#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
#define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */
#define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */
#define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */
/*
* Feature selectors from Table 9-8 USB Power Delivery spec
*/
#define USB_DEVICE_BATTERY_WAKE_MASK 40
#define USB_DEVICE_OS_IS_PD_AWARE 41
#define USB_DEVICE_POLICY_MODE 42
#define USB_PORT_PR_SWAP 43
#define USB_PORT_GOTO_MIN 44
#define USB_PORT_RETURN_POWER 45
#define USB_PORT_ACCEPT_PD_REQUEST 46
#define USB_PORT_REJECT_PD_REQUEST 47
#define USB_PORT_PORT_PD_RESET 48
#define USB_PORT_C_PORT_PD_CHANGE 49
#define USB_PORT_CABLE_PD_RESET 50
#define USB_DEVICE_CHARGING_POLICY 54
/**
* struct usb_ctrlrequest - SETUP data for a USB device control request
* @bRequestType: matches the USB bmRequestType field
* @bRequest: matches the USB bRequest field
* @wValue: matches the USB wValue field (le16 byte order)
* @wIndex: matches the USB wIndex field (le16 byte order)
* @wLength: matches the USB wLength field (le16 byte order)
*
* This structure is used to send control requests to a USB device. It matches
* the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
* USB spec for a fuller description of the different fields, and what they are
* used for.
*
* Note that the driver for any interface can issue control requests.
* For most devices, interfaces don't coordinate with each other, so
* such requests may be made at any time.
*/
struct usb_ctrlrequest {
__u8 bRequestType;
__u8 bRequest;
__le16 wValue;
__le16 wIndex;
__le16 wLength;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/*
* STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or
* (rarely) accepted by SET_DESCRIPTOR.
*
* Note that all multi-byte values here are encoded in little endian
* byte order "on the wire". Within the kernel and when exposed
* through the Linux-USB APIs, they are not converted to cpu byte
* order; it is the responsibility of the client code to do this.
* The single exception is when device and configuration descriptors (but
* not other descriptors) are read from character devices
* (i.e. /dev/bus/usb/BBB/DDD);
* in this case the fields are converted to host endianness by the kernel.
*/
/*
* Descriptor types ... USB 2.0 spec table 9.5
*/
#define USB_DT_DEVICE 0x01
#define USB_DT_CONFIG 0x02
#define USB_DT_STRING 0x03
#define USB_DT_INTERFACE 0x04
#define USB_DT_ENDPOINT 0x05
#define USB_DT_DEVICE_QUALIFIER 0x06
#define USB_DT_OTHER_SPEED_CONFIG 0x07
#define USB_DT_INTERFACE_POWER 0x08
/* these are from a minor usb 2.0 revision (ECN) */
#define USB_DT_OTG 0x09
#define USB_DT_DEBUG 0x0a
#define USB_DT_INTERFACE_ASSOCIATION 0x0b
/* these are from the Wireless USB spec */
#define USB_DT_SECURITY 0x0c
#define USB_DT_KEY 0x0d
#define USB_DT_ENCRYPTION_TYPE 0x0e
#define USB_DT_BOS 0x0f
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
#define USB_DT_WIRE_ADAPTER 0x21
#define USB_DT_RPIPE 0x22
#define USB_DT_CS_RADIO_CONTROL 0x23
/* From the T10 UAS specification */
#define USB_DT_PIPE_USAGE 0x24
/* From the USB 3.0 spec */
#define USB_DT_SS_ENDPOINT_COMP 0x30
/* From the USB 3.1 spec */
#define USB_DT_SSP_ISOC_ENDPOINT_COMP 0x31
/* Conventional codes for class-specific descriptors. The convention is
* defined in the USB "Common Class" Spec (3.11). Individual class specs
* are authoritative for their usage, not the "common class" writeup.
*/
#define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE)
#define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG)
#define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING)
#define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE)
#define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT)
/* All standard descriptors have these 2 fields at the beginning */
struct usb_descriptor_header {
__u8 bLength;
__u8 bDescriptorType;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_DEVICE: Device descriptor */
struct usb_device_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 bcdUSB;
__u8 bDeviceClass;
__u8 bDeviceSubClass;
__u8 bDeviceProtocol;
__u8 bMaxPacketSize0;
__le16 idVendor;
__le16 idProduct;
__le16 bcdDevice;
__u8 iManufacturer;
__u8 iProduct;
__u8 iSerialNumber;
__u8 bNumConfigurations;
} __attribute__ ((packed));
#define USB_DT_DEVICE_SIZE 18
/*
* Device and/or Interface Class codes
* as found in bDeviceClass or bInterfaceClass
* and defined by www.usb.org documents
*/
#define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */
#define USB_CLASS_AUDIO 1
#define USB_CLASS_COMM 2
#define USB_CLASS_HID 3
#define USB_CLASS_PHYSICAL 5
#define USB_CLASS_STILL_IMAGE 6
#define USB_CLASS_PRINTER 7
#define USB_CLASS_MASS_STORAGE 8
#define USB_CLASS_HUB 9
#define USB_CLASS_CDC_DATA 0x0a
#define USB_CLASS_CSCID 0x0b /* chip+ smart card */
#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
#define USB_CLASS_VIDEO 0x0e
#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
#define USB_CLASS_VENDOR_SPEC 0xff
#define USB_SUBCLASS_VENDOR_SPEC 0xff
/*-------------------------------------------------------------------------*/
/* USB_DT_CONFIG: Configuration descriptor information.
*
* USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the
* descriptor type is different. Highspeed-capable devices can look
* different depending on what speed they're currently running. Only
* devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG
* descriptors.
*/
struct usb_config_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wTotalLength;
__u8 bNumInterfaces;
__u8 bConfigurationValue;
__u8 iConfiguration;
__u8 bmAttributes;
__u8 bMaxPower;
} __attribute__ ((packed));
#define USB_DT_CONFIG_SIZE 9
/* from config descriptor bmAttributes */
#define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */
#define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */
#define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */
#define USB_CONFIG_ATT_BATTERY (1 << 4) /* battery powered */
/*-------------------------------------------------------------------------*/
/* USB_DT_STRING: String descriptor */
struct usb_string_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wData[1]; /* UTF-16LE encoded */
} __attribute__ ((packed));
/* note that "string" zero is special, it holds language codes that
* the device supports, not Unicode characters.
*/
/*-------------------------------------------------------------------------*/
/* USB_DT_INTERFACE: Interface descriptor */
struct usb_interface_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bInterfaceNumber;
__u8 bAlternateSetting;
__u8 bNumEndpoints;
__u8 bInterfaceClass;
__u8 bInterfaceSubClass;
__u8 bInterfaceProtocol;
__u8 iInterface;
} __attribute__ ((packed));
#define USB_DT_INTERFACE_SIZE 9
/*-------------------------------------------------------------------------*/
/* USB_DT_ENDPOINT: Endpoint descriptor */
struct usb_endpoint_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bEndpointAddress;
__u8 bmAttributes;
__le16 wMaxPacketSize;
__u8 bInterval;
/* NOTE: these two are _only_ in audio endpoints. */
/* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */
__u8 bRefresh;
__u8 bSynchAddress;
} __attribute__ ((packed));
#define USB_DT_ENDPOINT_SIZE 7
#define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */
/*
* Endpoints
*/
#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */
#define USB_ENDPOINT_DIR_MASK 0x80
#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */
#define USB_ENDPOINT_XFER_CONTROL 0
#define USB_ENDPOINT_XFER_ISOC 1
#define USB_ENDPOINT_XFER_BULK 2
#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
#define USB_ENDPOINT_MAXP_MASK 0x07ff
#define USB_EP_MAXP_MULT_SHIFT 11
#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
#define USB_EP_MAXP_MULT(m) \
(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
/* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
#define USB_ENDPOINT_INTRTYPE 0x30
#define USB_ENDPOINT_INTR_PERIODIC (0 << 4)
#define USB_ENDPOINT_INTR_NOTIFICATION (1 << 4)
#define USB_ENDPOINT_SYNCTYPE 0x0c
#define USB_ENDPOINT_SYNC_NONE (0 << 2)
#define USB_ENDPOINT_SYNC_ASYNC (1 << 2)
#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2)
#define USB_ENDPOINT_SYNC_SYNC (3 << 2)
#define USB_ENDPOINT_USAGE_MASK 0x30
#define USB_ENDPOINT_USAGE_DATA 0x00
#define USB_ENDPOINT_USAGE_FEEDBACK 0x10
#define USB_ENDPOINT_USAGE_IMPLICIT_FB 0x20 /* Implicit feedback Data endpoint */
/*-------------------------------------------------------------------------*/
/**
* usb_endpoint_num - get the endpoint's number
* @epd: endpoint to be checked
*
* Returns @epd's number: 0 to 15.
*/
static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
{
return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
}
/**
* usb_endpoint_type - get the endpoint's transfer type
* @epd: endpoint to be checked
*
* Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
* to @epd's transfer type.
*/
static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
{
return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
}
/**
* usb_endpoint_dir_in - check if the endpoint has IN direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type IN, otherwise it returns false.
*/
static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
}
/**
* usb_endpoint_dir_out - check if the endpoint has OUT direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type OUT, otherwise it returns false.
*/
static inline int usb_endpoint_dir_out(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
}
/**
* usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type bulk, otherwise it returns false.
*/
static inline int usb_endpoint_xfer_bulk(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK);
}
/**
* usb_endpoint_xfer_control - check if the endpoint has control transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type control, otherwise it returns false.
*/
static inline int usb_endpoint_xfer_control(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_CONTROL);
}
/**
* usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type interrupt, otherwise it returns
* false.
*/
static inline int usb_endpoint_xfer_int(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT);
}
/**
* usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type isochronous, otherwise it returns
* false.
*/
static inline int usb_endpoint_xfer_isoc(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_ISOC);
}
/**
* usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
* @epd: endpoint to be checked
*
* Returns true if the endpoint has bulk transfer type and IN direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_bulk_in(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd);
}
/**
* usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
* @epd: endpoint to be checked
*
* Returns true if the endpoint has bulk transfer type and OUT direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_bulk_out(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd);
}
/**
* usb_endpoint_is_int_in - check if the endpoint is interrupt IN
* @epd: endpoint to be checked
*
* Returns true if the endpoint has interrupt transfer type and IN direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_int_in(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd);
}
/**
* usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
* @epd: endpoint to be checked
*
* Returns true if the endpoint has interrupt transfer type and OUT direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_int_out(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd);
}
/**
* usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
* @epd: endpoint to be checked
*
* Returns true if the endpoint has isochronous transfer type and IN direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_isoc_in(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd);
}
/**
* usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
* @epd: endpoint to be checked
*
* Returns true if the endpoint has isochronous transfer type and OUT direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_isoc_out(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd);
}
/**
* usb_endpoint_maxp - get endpoint's max packet size
* @epd: endpoint to be checked
*
* Returns @epd's max packet bits [10:0]
*/
static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
{
return __le16_to_cpu(epd->wMaxPacketSize) & USB_ENDPOINT_MAXP_MASK;
}
/**
* usb_endpoint_maxp_mult - get endpoint's transactional opportunities
* @epd: endpoint to be checked
*
* Return @epd's wMaxPacketSize[12:11] + 1
*/
static inline int
usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
{
int maxp = __le16_to_cpu(epd->wMaxPacketSize);
return USB_EP_MAXP_MULT(maxp) + 1;
}
static inline int usb_endpoint_interrupt_type(
const struct usb_endpoint_descriptor *epd)
{
return epd->bmAttributes & USB_ENDPOINT_INTRTYPE;
}
/*-------------------------------------------------------------------------*/
/* USB_DT_SSP_ISOC_ENDPOINT_COMP: SuperSpeedPlus Isochronous Endpoint Companion
* descriptor
*/
struct usb_ssp_isoc_ep_comp_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wReseved;
__le32 dwBytesPerInterval;
} __attribute__ ((packed));
#define USB_DT_SSP_ISOC_EP_COMP_SIZE 8
/*-------------------------------------------------------------------------*/
/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
struct usb_ss_ep_comp_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bMaxBurst;
__u8 bmAttributes;
__le16 wBytesPerInterval;
} __attribute__ ((packed));
#define USB_DT_SS_EP_COMP_SIZE 6
/* Bits 4:0 of bmAttributes if this is a bulk endpoint */
static inline int
usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
{
int max_streams;
if (!comp)
return 0;
max_streams = comp->bmAttributes & 0x1f;
if (!max_streams)
return 0;
max_streams = 1 << max_streams;
return max_streams;
}
/* Bits 1:0 of bmAttributes if this is an isoc endpoint */
#define USB_SS_MULT(p) (1 + ((p) & 0x3))
/* Bit 7 of bmAttributes if a SSP isoc endpoint companion descriptor exists */
#define USB_SS_SSP_ISOC_COMP(p) ((p) & (1 << 7))
/*-------------------------------------------------------------------------*/
/* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
struct usb_qualifier_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 bcdUSB;
__u8 bDeviceClass;
__u8 bDeviceSubClass;
__u8 bDeviceProtocol;
__u8 bMaxPacketSize0;
__u8 bNumConfigurations;
__u8 bRESERVED;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_OTG (from OTG 1.0a supplement) */
struct usb_otg_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bmAttributes; /* support for HNP, SRP, etc */
} __attribute__ ((packed));
/* USB_DT_OTG (from OTG 2.0 supplement) */
struct usb_otg20_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bmAttributes; /* support for HNP, SRP and ADP, etc */
__le16 bcdOTG; /* OTG and EH supplement release number
* in binary-coded decimal(i.e. 2.0 is 0200H)
*/
} __attribute__ ((packed));
/* from usb_otg_descriptor.bmAttributes */
#define USB_OTG_SRP (1 << 0)
#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
#define USB_OTG_ADP (1 << 2) /* support ADP */
#define OTG_STS_SELECTOR 0xF000 /* OTG status selector */
/*-------------------------------------------------------------------------*/
/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */
struct usb_debug_descriptor {
__u8 bLength;
__u8 bDescriptorType;
/* bulk endpoints with 8 byte maxpacket */
__u8 bDebugInEndpoint;
__u8 bDebugOutEndpoint;
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */
struct usb_interface_assoc_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bFirstInterface;
__u8 bInterfaceCount;
__u8 bFunctionClass;
__u8 bFunctionSubClass;
__u8 bFunctionProtocol;
__u8 iFunction;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_SECURITY: group of wireless security descriptors, including
* encryption types available for setting up a CC/association.
*/
struct usb_security_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wTotalLength;
__u8 bNumEncryptionTypes;
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_KEY: used with {GET,SET}_SECURITY_DATA; only public keys
* may be retrieved.
*/
struct usb_key_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 tTKID[3];
__u8 bReserved;
__u8 bKeyData[0];
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_ENCRYPTION_TYPE: bundled in DT_SECURITY groups */
struct usb_encryption_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bEncryptionType;
#define USB_ENC_TYPE_UNSECURE 0
#define USB_ENC_TYPE_WIRED 1 /* non-wireless mode */
#define USB_ENC_TYPE_CCM_1 2 /* aes128/cbc session */
#define USB_ENC_TYPE_RSA_1 3 /* rsa3072/sha1 auth */
__u8 bEncryptionValue; /* use in SET_ENCRYPTION */
__u8 bAuthKeyIndex;
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_BOS: group of device-level capabilities */
struct usb_bos_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wTotalLength;
__u8 bNumDeviceCaps;
} __attribute__((packed));
#define USB_DT_BOS_SIZE 5
/*-------------------------------------------------------------------------*/
/* USB_DT_DEVICE_CAPABILITY: grouped with BOS */
struct usb_dev_cap_header {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
} __attribute__((packed));
#define USB_CAP_TYPE_WIRELESS_USB 1
struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bmAttributes;
#define USB_WIRELESS_P2P_DRD (1 << 1)
#define USB_WIRELESS_BEACON_MASK (3 << 2)
#define USB_WIRELESS_BEACON_SELF (1 << 2)
#define USB_WIRELESS_BEACON_DIRECTED (2 << 2)
#define USB_WIRELESS_BEACON_NONE (3 << 2)
__le16 wPHYRates; /* bit rates, Mbps */
#define USB_WIRELESS_PHY_53 (1 << 0) /* always set */
#define USB_WIRELESS_PHY_80 (1 << 1)
#define USB_WIRELESS_PHY_107 (1 << 2) /* always set */
#define USB_WIRELESS_PHY_160 (1 << 3)
#define USB_WIRELESS_PHY_200 (1 << 4) /* always set */
#define USB_WIRELESS_PHY_320 (1 << 5)
#define USB_WIRELESS_PHY_400 (1 << 6)
#define USB_WIRELESS_PHY_480 (1 << 7)
__u8 bmTFITXPowerInfo; /* TFI power levels */
__u8 bmFFITXPowerInfo; /* FFI power levels */
__le16 bmBandGroup;
__u8 bReserved;
} __attribute__((packed));
/* USB 2.0 Extension descriptor */
#define USB_CAP_TYPE_EXT 2
struct usb_ext_cap_descriptor { /* Link Power Management */
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__le32 bmAttributes;
#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */
#define USB_BESL_SUPPORT (1 << 2) /* supports BESL */
#define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/
#define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */
#define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8)
#define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12)
} __attribute__((packed));
#define USB_DT_USB_EXT_CAP_SIZE 7
/*
* SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB
* specific device level capabilities
*/
#define USB_SS_CAP_TYPE 3
struct usb_ss_cap_descriptor { /* Link Power Management */
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bmAttributes;
#define USB_LTM_SUPPORT (1 << 1) /* supports LTM */
__le16 wSpeedSupported;
#define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */
#define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */
#define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */
#define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */
__u8 bFunctionalitySupport;
__u8 bU1devExitLat;
__le16 bU2DevExitLat;
} __attribute__((packed));
#define USB_DT_USB_SS_CAP_SIZE 10
/*
* Container ID Capability descriptor: Defines the instance unique ID used to
* identify the instance across all operating modes
*/
#define CONTAINER_ID_TYPE 4
struct usb_ss_container_id_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved;
__u8 ContainerID[16]; /* 128-bit number */
} __attribute__((packed));
#define USB_DT_USB_SS_CONTN_ID_SIZE 20
/*
* SuperSpeed Plus USB Capability descriptor: Defines the set of
* SuperSpeed Plus USB specific device level capabilities
*/
#define USB_SSP_CAP_TYPE 0xa
struct usb_ssp_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved;
__le32 bmAttributes;
#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */
#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */
__le16 wFunctionalitySupport;
#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf)
#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
__le16 wReserved;
__le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */
#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */
#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */
#define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */
#define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */
#define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */
#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
} __attribute__((packed));
/*
* USB Power Delivery Capability Descriptor:
* Defines capabilities for PD
*/
/* Defines the various PD Capabilities of this device */
#define USB_PD_POWER_DELIVERY_CAPABILITY 0x06
/* Provides information on each battery supported by the device */
#define USB_PD_BATTERY_INFO_CAPABILITY 0x07
/* The Consumer characteristics of a Port on the device */
#define USB_PD_PD_CONSUMER_PORT_CAPABILITY 0x08
/* The provider characteristics of a Port on the device */
#define USB_PD_PD_PROVIDER_PORT_CAPABILITY 0x09
struct usb_pd_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType; /* set to USB_PD_POWER_DELIVERY_CAPABILITY */
__u8 bReserved;
__le32 bmAttributes;
#define USB_PD_CAP_BATTERY_CHARGING (1 << 1) /* supports Battery Charging specification */
#define USB_PD_CAP_USB_PD (1 << 2) /* supports USB Power Delivery specification */
#define USB_PD_CAP_PROVIDER (1 << 3) /* can provide power */
#define USB_PD_CAP_CONSUMER (1 << 4) /* can consume power */
#define USB_PD_CAP_CHARGING_POLICY (1 << 5) /* supports CHARGING_POLICY feature */
#define USB_PD_CAP_TYPE_C_CURRENT (1 << 6) /* supports power capabilities defined in the USB Type-C Specification */
#define USB_PD_CAP_PWR_AC (1 << 8)
#define USB_PD_CAP_PWR_BAT (1 << 9)
#define USB_PD_CAP_PWR_USE_V_BUS (1 << 14)
__le16 bmProviderPorts; /* Bit zero refers to the UFP of the device */
__le16 bmConsumerPorts;
__le16 bcdBCVersion;
__le16 bcdPDVersion;
__le16 bcdUSBTypeCVersion;
} __attribute__((packed));
struct usb_pd_cap_battery_info_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
/* Index of string descriptor shall contain the user friendly name for this battery */
__u8 iBattery;
/* Index of string descriptor shall contain the Serial Number String for this battery */
__u8 iSerial;
__u8 iManufacturer;
__u8 bBatteryId; /* uniquely identifies this battery in status Messages */
__u8 bReserved;
/*
* Shall contain the Battery Charge value above which this
* battery is considered to be fully charged but not necessarily
* “topped off.”
*/
__le32 dwChargedThreshold; /* in mWh */
/*
* Shall contain the minimum charge level of this battery such
* that above this threshold, a device can be assured of being
* able to power up successfully (see Battery Charging 1.2).
*/
__le32 dwWeakThreshold; /* in mWh */
__le32 dwBatteryDesignCapacity; /* in mWh */
__le32 dwBatteryLastFullchargeCapacity; /* in mWh */
} __attribute__((packed));
struct usb_pd_cap_consumer_port_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved;
__u8 bmCapabilities;
/* port will oerate under: */
#define USB_PD_CAP_CONSUMER_BC (1 << 0) /* BC */
#define USB_PD_CAP_CONSUMER_PD (1 << 1) /* PD */
#define USB_PD_CAP_CONSUMER_TYPE_C (1 << 2) /* USB Type-C Current */
__le16 wMinVoltage; /* in 50mV units */
__le16 wMaxVoltage; /* in 50mV units */
__u16 wReserved;
__le32 dwMaxOperatingPower; /* in 10 mW - operating at steady state */
__le32 dwMaxPeakPower; /* in 10mW units - operating at peak power */
__le32 dwMaxPeakPowerTime; /* in 100ms units - duration of peak */
#define USB_PD_CAP_CONSUMER_UNKNOWN_PEAK_POWER_TIME 0xffff
} __attribute__((packed));
struct usb_pd_cap_provider_port_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved1;
__u8 bmCapabilities;
/* port will oerate under: */
#define USB_PD_CAP_PROVIDER_BC (1 << 0) /* BC */
#define USB_PD_CAP_PROVIDER_PD (1 << 1) /* PD */
#define USB_PD_CAP_PROVIDER_TYPE_C (1 << 2) /* USB Type-C Current */
__u8 bNumOfPDObjects;
__u8 bReserved2;
__le32 wPowerDataObject[];
} __attribute__((packed));
/*
* Precision time measurement capability descriptor: advertised by devices and
* hubs that support PTM
*/
#define USB_PTM_CAP_TYPE 0xb
struct usb_ptm_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
} __attribute__((packed));
/*
* The size of the descriptor for the Sublink Speed Attribute Count
* (SSAC) specified in bmAttributes[4:0].
*/
#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
/*-------------------------------------------------------------------------*/
/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with
* each endpoint descriptor for a wireless device
*/
struct usb_wireless_ep_comp_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bMaxBurst;
__u8 bMaxSequence;
__le16 wMaxStreamDelay;
__le16 wOverTheAirPacketSize;
__u8 bOverTheAirInterval;
__u8 bmCompAttributes;
#define USB_ENDPOINT_SWITCH_MASK 0x03 /* in bmCompAttributes */
#define USB_ENDPOINT_SWITCH_NO 0
#define USB_ENDPOINT_SWITCH_SWITCH 1
#define USB_ENDPOINT_SWITCH_SCALE 2
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless
* host and a device for connection set up, mutual authentication, and
* exchanging short lived session keys. The handshake depends on a CC.
*/
struct usb_handshake {
__u8 bMessageNumber;
__u8 bStatus;
__u8 tTKID[3];
__u8 bReserved;
__u8 CDID[16];
__u8 nonce[16];
__u8 MIC[8];
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC).
* A CC may also be set up using non-wireless secure channels (including
* wired USB!), and some devices may support CCs with multiple hosts.
*/
struct usb_connection_context {
__u8 CHID[16]; /* persistent host id */
__u8 CDID[16]; /* device id (unique w/in host context) */
__u8 CK[16]; /* connection key */
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB 2.0 defines three speeds, here's how Linux identifies them */
enum usb_device_speed {
USB_SPEED_UNKNOWN = 0, /* enumerating */
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
USB_SPEED_HIGH, /* usb 2.0 */
USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
USB_SPEED_SUPER, /* usb 3.0 */
USB_SPEED_SUPER_PLUS, /* usb 3.1 */
};
enum usb_device_state {
/* NOTATTACHED isn't in the USB spec, and this state acts
* the same as ATTACHED ... but it's clearer this way.
*/
USB_STATE_NOTATTACHED = 0,
/* chapter 9 and authentication (wireless) device states */
USB_STATE_ATTACHED,
USB_STATE_POWERED, /* wired */
USB_STATE_RECONNECTING, /* auth */
USB_STATE_UNAUTHENTICATED, /* auth */
USB_STATE_DEFAULT, /* limited function */
USB_STATE_ADDRESS,
USB_STATE_CONFIGURED, /* most functions */
USB_STATE_SUSPENDED
/* NOTE: there are actually four different SUSPENDED
* states, returning to POWERED, DEFAULT, ADDRESS, or
* CONFIGURED respectively when SOF tokens flow again.
* At this level there's no difference between L1 and L2
* suspend states. (L2 being original USB 1.1 suspend.)
*/
};
enum usb3_link_state {
USB3_LPM_U0 = 0,
USB3_LPM_U1,
USB3_LPM_U2,
USB3_LPM_U3
};
/*
* A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
* 0xff means the parent hub will accept transitions to U1, but will not
* initiate a transition.
*
* A U1 timeout of 0x1 to 0x7F also causes the hub to initiate a transition to
* U1 after that many microseconds. Timeouts of 0x80 to 0xFE are reserved
* values.
*
* A U2 timeout of 0x0 means the parent hub will reject any transitions to U2.
* 0xff means the parent hub will accept transitions to U2, but will not
* initiate a transition.
*
* A U2 timeout of 0x1 to 0xFE also causes the hub to initiate a transition to
* U2 after N*256 microseconds. Therefore a U2 timeout value of 0x1 means a U2
* idle timer of 256 microseconds, 0x2 means 512 microseconds, 0xFE means
* 65.024ms.
*/
#define USB3_LPM_DISABLED 0x0
#define USB3_LPM_U1_MAX_TIMEOUT 0x7F
#define USB3_LPM_U2_MAX_TIMEOUT 0xFE
#define USB3_LPM_DEVICE_INITIATED 0xFF
struct usb_set_sel_req {
__u8 u1_sel;
__u8 u1_pel;
__le16 u2_sel;
__le16 u2_pel;
} __attribute__ ((packed));
/*
* The Set System Exit Latency control transfer provides one byte each for
* U1 SEL and U1 PEL, so the max exit latency is 0xFF. U2 SEL and U2 PEL each
* are two bytes long.
*/
#define USB3_LPM_MAX_U1_SEL_PEL 0xFF
#define USB3_LPM_MAX_U2_SEL_PEL 0xFFFF
/*-------------------------------------------------------------------------*/
/*
* As per USB compliance update, a device that is actively drawing
* more than 100mA from USB must report itself as bus-powered in
* the GetStatus(DEVICE) call.
* http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
*/
#define USB_SELF_POWER_VBUS_MAX_DRAW 100
#endif /* _UAPI__LINUX_USB_CH9_H */
|
/*
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
* Wireless USB 1.0 (spread around). Linux has several APIs in C that
* need these:
*
* - the master/host side Linux-USB kernel driver API;
* - the "usbfs" user space API; and
* - the Linux "gadget" slave/device/peripheral side driver API.
*
* USB 2.0 adds an additional "On The Go" (OTG) mode, which lets systems
* act either as a USB master/host or as a USB slave/device. That means
* the master and slave side APIs benefit from working well together.
*
* There's also "Wireless USB", using low power short range radios for
* peripheral interconnection but otherwise building on the USB framework.
*
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
* probably handled that) or externally;
*
* [b] so that accessing bigger-than-a-bytes fields will never
* generate bus errors on any platform, even when the location of
* its descriptor inside a bundle isn't "naturally aligned", and
*
* [c] for consistency, removing all doubt even when it appears to
* someone that the two other points are non-issues for that
* particular descriptor type.
*/
#ifndef _UAPI__LINUX_USB_CH9_H
#define _UAPI__LINUX_USB_CH9_H
#include <linux/types.h> /* __u8 etc */
#include <asm/byteorder.h> /* le16_to_cpu */
/*-------------------------------------------------------------------------*/
/* CONTROL REQUEST SUPPORT */
/*
* USB directions
*
* This bit flag is used in endpoint descriptors' bEndpointAddress field.
* It's also one of three fields in control requests bRequestType.
*/
#define USB_DIR_OUT 0 /* to device */
#define USB_DIR_IN 0x80 /* to host */
/*
* USB types, the second of three bRequestType fields
*/
#define USB_TYPE_MASK (0x03 << 5)
#define USB_TYPE_STANDARD (0x00 << 5)
#define USB_TYPE_CLASS (0x01 << 5)
#define USB_TYPE_VENDOR (0x02 << 5)
#define USB_TYPE_RESERVED (0x03 << 5)
/*
* USB recipients, the third of three bRequestType fields
*/
#define USB_RECIP_MASK 0x1f
#define USB_RECIP_DEVICE 0x00
#define USB_RECIP_INTERFACE 0x01
#define USB_RECIP_ENDPOINT 0x02
#define USB_RECIP_OTHER 0x03
/* From Wireless USB 1.0 */
#define USB_RECIP_PORT 0x04
#define USB_RECIP_RPIPE 0x05
/*
* Standard requests, for the bRequest field of a SETUP packet.
*
* These are qualified by the bRequestType field, so that for example
* TYPE_CLASS or TYPE_VENDOR specific feature flags could be retrieved
* by a GET_STATUS request.
*/
#define USB_REQ_GET_STATUS 0x00
#define USB_REQ_CLEAR_FEATURE 0x01
#define USB_REQ_SET_FEATURE 0x03
#define USB_REQ_SET_ADDRESS 0x05
#define USB_REQ_GET_DESCRIPTOR 0x06
#define USB_REQ_SET_DESCRIPTOR 0x07
#define USB_REQ_GET_CONFIGURATION 0x08
#define USB_REQ_SET_CONFIGURATION 0x09
#define USB_REQ_GET_INTERFACE 0x0A
#define USB_REQ_SET_INTERFACE 0x0B
#define USB_REQ_SYNCH_FRAME 0x0C
#define USB_REQ_SET_SEL 0x30
#define USB_REQ_SET_ISOCH_DELAY 0x31
#define USB_REQ_SET_ENCRYPTION 0x0D /* Wireless USB */
#define USB_REQ_GET_ENCRYPTION 0x0E
#define USB_REQ_RPIPE_ABORT 0x0E
#define USB_REQ_SET_HANDSHAKE 0x0F
#define USB_REQ_RPIPE_RESET 0x0F
#define USB_REQ_GET_HANDSHAKE 0x10
#define USB_REQ_SET_CONNECTION 0x11
#define USB_REQ_SET_SECURITY_DATA 0x12
#define USB_REQ_GET_SECURITY_DATA 0x13
#define USB_REQ_SET_WUSB_DATA 0x14
#define USB_REQ_LOOPBACK_DATA_WRITE 0x15
#define USB_REQ_LOOPBACK_DATA_READ 0x16
#define USB_REQ_SET_INTERFACE_DS 0x17
/* specific requests for USB Power Delivery */
#define USB_REQ_GET_PARTNER_PDO 20
#define USB_REQ_GET_BATTERY_STATUS 21
#define USB_REQ_SET_PDO 22
#define USB_REQ_GET_VDM 23
#define USB_REQ_SEND_VDM 24
/* The Link Power Management (LPM) ECN defines USB_REQ_TEST_AND_SET command,
* used by hubs to put ports into a new L1 suspend state, except that it
* forgot to define its number ...
*/
/*
* USB feature flags are written using USB_REQ_{CLEAR,SET}_FEATURE, and
* are read as a bit array returned by USB_REQ_GET_STATUS. (So there
* are at most sixteen features of each type.) Hubs may also support a
* new USB_REQ_TEST_AND_SET_FEATURE to put ports into L1 suspend.
*/
#define USB_DEVICE_SELF_POWERED 0 /* (read only) */
#define USB_DEVICE_REMOTE_WAKEUP 1 /* dev may initiate wakeup */
#define USB_DEVICE_TEST_MODE 2 /* (wired high speed only) */
#define USB_DEVICE_BATTERY 2 /* (wireless) */
#define USB_DEVICE_B_HNP_ENABLE 3 /* (otg) dev may initiate HNP */
#define USB_DEVICE_WUSB_DEVICE 3 /* (wireless)*/
#define USB_DEVICE_A_HNP_SUPPORT 4 /* (otg) RH port supports HNP */
#define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */
#define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */
/*
* Test Mode Selectors
* See USB 2.0 spec Table 9-7
*/
#define TEST_J 1
#define TEST_K 2
#define TEST_SE0_NAK 3
#define TEST_PACKET 4
#define TEST_FORCE_EN 5
/*
* New Feature Selectors as added by USB 3.0
* See USB 3.0 spec Table 9-7
*/
#define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */
#define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */
#define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */
#define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */
#define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00
/*
* Suspend Options, Table 9-8 USB 3.0 spec
*/
#define USB_INTRF_FUNC_SUSPEND_LP (1 << (8 + 0))
#define USB_INTRF_FUNC_SUSPEND_RW (1 << (8 + 1))
/*
* Interface status, Figure 9-5 USB 3.0 spec
*/
#define USB_INTRF_STAT_FUNC_RW_CAP 1
#define USB_INTRF_STAT_FUNC_RW 2
#define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */
/* Bit array elements as returned by the USB_REQ_GET_STATUS request. */
#define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */
#define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */
#define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */
/*
* Feature selectors from Table 9-8 USB Power Delivery spec
*/
#define USB_DEVICE_BATTERY_WAKE_MASK 40
#define USB_DEVICE_OS_IS_PD_AWARE 41
#define USB_DEVICE_POLICY_MODE 42
#define USB_PORT_PR_SWAP 43
#define USB_PORT_GOTO_MIN 44
#define USB_PORT_RETURN_POWER 45
#define USB_PORT_ACCEPT_PD_REQUEST 46
#define USB_PORT_REJECT_PD_REQUEST 47
#define USB_PORT_PORT_PD_RESET 48
#define USB_PORT_C_PORT_PD_CHANGE 49
#define USB_PORT_CABLE_PD_RESET 50
#define USB_DEVICE_CHARGING_POLICY 54
/**
* struct usb_ctrlrequest - SETUP data for a USB device control request
* @bRequestType: matches the USB bmRequestType field
* @bRequest: matches the USB bRequest field
* @wValue: matches the USB wValue field (le16 byte order)
* @wIndex: matches the USB wIndex field (le16 byte order)
* @wLength: matches the USB wLength field (le16 byte order)
*
* This structure is used to send control requests to a USB device. It matches
* the different fields of the USB 2.0 Spec section 9.3, table 9-2. See the
* USB spec for a fuller description of the different fields, and what they are
* used for.
*
* Note that the driver for any interface can issue control requests.
* For most devices, interfaces don't coordinate with each other, so
* such requests may be made at any time.
*/
struct usb_ctrlrequest {
__u8 bRequestType;
__u8 bRequest;
__le16 wValue;
__le16 wIndex;
__le16 wLength;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/*
* STANDARD DESCRIPTORS ... as returned by GET_DESCRIPTOR, or
* (rarely) accepted by SET_DESCRIPTOR.
*
* Note that all multi-byte values here are encoded in little endian
* byte order "on the wire". Within the kernel and when exposed
* through the Linux-USB APIs, they are not converted to cpu byte
* order; it is the responsibility of the client code to do this.
* The single exception is when device and configuration descriptors (but
* not other descriptors) are read from character devices
* (i.e. /dev/bus/usb/BBB/DDD);
* in this case the fields are converted to host endianness by the kernel.
*/
/*
* Descriptor types ... USB 2.0 spec table 9.5
*/
#define USB_DT_DEVICE 0x01
#define USB_DT_CONFIG 0x02
#define USB_DT_STRING 0x03
#define USB_DT_INTERFACE 0x04
#define USB_DT_ENDPOINT 0x05
#define USB_DT_DEVICE_QUALIFIER 0x06
#define USB_DT_OTHER_SPEED_CONFIG 0x07
#define USB_DT_INTERFACE_POWER 0x08
/* these are from a minor usb 2.0 revision (ECN) */
#define USB_DT_OTG 0x09
#define USB_DT_DEBUG 0x0a
#define USB_DT_INTERFACE_ASSOCIATION 0x0b
/* these are from the Wireless USB spec */
#define USB_DT_SECURITY 0x0c
#define USB_DT_KEY 0x0d
#define USB_DT_ENCRYPTION_TYPE 0x0e
#define USB_DT_BOS 0x0f
#define USB_DT_DEVICE_CAPABILITY 0x10
#define USB_DT_WIRELESS_ENDPOINT_COMP 0x11
#define USB_DT_WIRE_ADAPTER 0x21
#define USB_DT_RPIPE 0x22
#define USB_DT_CS_RADIO_CONTROL 0x23
/* From the T10 UAS specification */
#define USB_DT_PIPE_USAGE 0x24
/* From the USB 3.0 spec */
#define USB_DT_SS_ENDPOINT_COMP 0x30
/* From the USB 3.1 spec */
#define USB_DT_SSP_ISOC_ENDPOINT_COMP 0x31
/* Conventional codes for class-specific descriptors. The convention is
* defined in the USB "Common Class" Spec (3.11). Individual class specs
* are authoritative for their usage, not the "common class" writeup.
*/
#define USB_DT_CS_DEVICE (USB_TYPE_CLASS | USB_DT_DEVICE)
#define USB_DT_CS_CONFIG (USB_TYPE_CLASS | USB_DT_CONFIG)
#define USB_DT_CS_STRING (USB_TYPE_CLASS | USB_DT_STRING)
#define USB_DT_CS_INTERFACE (USB_TYPE_CLASS | USB_DT_INTERFACE)
#define USB_DT_CS_ENDPOINT (USB_TYPE_CLASS | USB_DT_ENDPOINT)
/* All standard descriptors have these 2 fields at the beginning */
struct usb_descriptor_header {
__u8 bLength;
__u8 bDescriptorType;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_DEVICE: Device descriptor */
struct usb_device_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 bcdUSB;
__u8 bDeviceClass;
__u8 bDeviceSubClass;
__u8 bDeviceProtocol;
__u8 bMaxPacketSize0;
__le16 idVendor;
__le16 idProduct;
__le16 bcdDevice;
__u8 iManufacturer;
__u8 iProduct;
__u8 iSerialNumber;
__u8 bNumConfigurations;
} __attribute__ ((packed));
#define USB_DT_DEVICE_SIZE 18
/*
* Device and/or Interface Class codes
* as found in bDeviceClass or bInterfaceClass
* and defined by www.usb.org documents
*/
#define USB_CLASS_PER_INTERFACE 0 /* for DeviceClass */
#define USB_CLASS_AUDIO 1
#define USB_CLASS_COMM 2
#define USB_CLASS_HID 3
#define USB_CLASS_PHYSICAL 5
#define USB_CLASS_STILL_IMAGE 6
#define USB_CLASS_PRINTER 7
#define USB_CLASS_MASS_STORAGE 8
#define USB_CLASS_HUB 9
#define USB_CLASS_CDC_DATA 0x0a
#define USB_CLASS_CSCID 0x0b /* chip+ smart card */
#define USB_CLASS_CONTENT_SEC 0x0d /* content security */
#define USB_CLASS_VIDEO 0x0e
#define USB_CLASS_WIRELESS_CONTROLLER 0xe0
#define USB_CLASS_MISC 0xef
#define USB_CLASS_APP_SPEC 0xfe
#define USB_CLASS_VENDOR_SPEC 0xff
#define USB_SUBCLASS_VENDOR_SPEC 0xff
/*-------------------------------------------------------------------------*/
/* USB_DT_CONFIG: Configuration descriptor information.
*
* USB_DT_OTHER_SPEED_CONFIG is the same descriptor, except that the
* descriptor type is different. Highspeed-capable devices can look
* different depending on what speed they're currently running. Only
* devices with a USB_DT_DEVICE_QUALIFIER have any OTHER_SPEED_CONFIG
* descriptors.
*/
struct usb_config_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wTotalLength;
__u8 bNumInterfaces;
__u8 bConfigurationValue;
__u8 iConfiguration;
__u8 bmAttributes;
__u8 bMaxPower;
} __attribute__ ((packed));
#define USB_DT_CONFIG_SIZE 9
/* from config descriptor bmAttributes */
#define USB_CONFIG_ATT_ONE (1 << 7) /* must be set */
#define USB_CONFIG_ATT_SELFPOWER (1 << 6) /* self powered */
#define USB_CONFIG_ATT_WAKEUP (1 << 5) /* can wakeup */
#define USB_CONFIG_ATT_BATTERY (1 << 4) /* battery powered */
/*-------------------------------------------------------------------------*/
/* USB_DT_STRING: String descriptor */
struct usb_string_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wData[1]; /* UTF-16LE encoded */
} __attribute__ ((packed));
/* note that "string" zero is special, it holds language codes that
* the device supports, not Unicode characters.
*/
/*-------------------------------------------------------------------------*/
/* USB_DT_INTERFACE: Interface descriptor */
struct usb_interface_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bInterfaceNumber;
__u8 bAlternateSetting;
__u8 bNumEndpoints;
__u8 bInterfaceClass;
__u8 bInterfaceSubClass;
__u8 bInterfaceProtocol;
__u8 iInterface;
} __attribute__ ((packed));
#define USB_DT_INTERFACE_SIZE 9
/*-------------------------------------------------------------------------*/
/* USB_DT_ENDPOINT: Endpoint descriptor */
struct usb_endpoint_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bEndpointAddress;
__u8 bmAttributes;
__le16 wMaxPacketSize;
__u8 bInterval;
/* NOTE: these two are _only_ in audio endpoints. */
/* use USB_DT_ENDPOINT*_SIZE in bLength, not sizeof. */
__u8 bRefresh;
__u8 bSynchAddress;
} __attribute__ ((packed));
#define USB_DT_ENDPOINT_SIZE 7
#define USB_DT_ENDPOINT_AUDIO_SIZE 9 /* Audio extension */
/*
* Endpoints
*/
#define USB_ENDPOINT_NUMBER_MASK 0x0f /* in bEndpointAddress */
#define USB_ENDPOINT_DIR_MASK 0x80
#define USB_ENDPOINT_XFERTYPE_MASK 0x03 /* in bmAttributes */
#define USB_ENDPOINT_XFER_CONTROL 0
#define USB_ENDPOINT_XFER_ISOC 1
#define USB_ENDPOINT_XFER_BULK 2
#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_MAX_ADJUSTABLE 0x80
#define USB_ENDPOINT_MAXP_MASK 0x07ff
#define USB_EP_MAXP_MULT_SHIFT 11
#define USB_EP_MAXP_MULT_MASK (3 << USB_EP_MAXP_MULT_SHIFT)
#define USB_EP_MAXP_MULT(m) \
(((m) & USB_EP_MAXP_MULT_MASK) >> USB_EP_MAXP_MULT_SHIFT)
/* The USB 3.0 spec redefines bits 5:4 of bmAttributes as interrupt ep type. */
#define USB_ENDPOINT_INTRTYPE 0x30
#define USB_ENDPOINT_INTR_PERIODIC (0 << 4)
#define USB_ENDPOINT_INTR_NOTIFICATION (1 << 4)
#define USB_ENDPOINT_SYNCTYPE 0x0c
#define USB_ENDPOINT_SYNC_NONE (0 << 2)
#define USB_ENDPOINT_SYNC_ASYNC (1 << 2)
#define USB_ENDPOINT_SYNC_ADAPTIVE (2 << 2)
#define USB_ENDPOINT_SYNC_SYNC (3 << 2)
#define USB_ENDPOINT_USAGE_MASK 0x30
#define USB_ENDPOINT_USAGE_DATA 0x00
#define USB_ENDPOINT_USAGE_FEEDBACK 0x10
#define USB_ENDPOINT_USAGE_IMPLICIT_FB 0x20 /* Implicit feedback Data endpoint */
/*-------------------------------------------------------------------------*/
/**
* usb_endpoint_num - get the endpoint's number
* @epd: endpoint to be checked
*
* Returns @epd's number: 0 to 15.
*/
static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
{
return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
}
/**
* usb_endpoint_type - get the endpoint's transfer type
* @epd: endpoint to be checked
*
* Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
* to @epd's transfer type.
*/
static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
{
return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
}
/**
* usb_endpoint_dir_in - check if the endpoint has IN direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type IN, otherwise it returns false.
*/
static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
}
/**
* usb_endpoint_dir_out - check if the endpoint has OUT direction
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type OUT, otherwise it returns false.
*/
static inline int usb_endpoint_dir_out(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
}
/**
* usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type bulk, otherwise it returns false.
*/
static inline int usb_endpoint_xfer_bulk(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_BULK);
}
/**
* usb_endpoint_xfer_control - check if the endpoint has control transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type control, otherwise it returns false.
*/
static inline int usb_endpoint_xfer_control(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_CONTROL);
}
/**
* usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type interrupt, otherwise it returns
* false.
*/
static inline int usb_endpoint_xfer_int(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_INT);
}
/**
* usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
* @epd: endpoint to be checked
*
* Returns true if the endpoint is of type isochronous, otherwise it returns
* false.
*/
static inline int usb_endpoint_xfer_isoc(
const struct usb_endpoint_descriptor *epd)
{
return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
USB_ENDPOINT_XFER_ISOC);
}
/**
* usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
* @epd: endpoint to be checked
*
* Returns true if the endpoint has bulk transfer type and IN direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_bulk_in(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd);
}
/**
* usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
* @epd: endpoint to be checked
*
* Returns true if the endpoint has bulk transfer type and OUT direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_bulk_out(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd);
}
/**
* usb_endpoint_is_int_in - check if the endpoint is interrupt IN
* @epd: endpoint to be checked
*
* Returns true if the endpoint has interrupt transfer type and IN direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_int_in(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd);
}
/**
* usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
* @epd: endpoint to be checked
*
* Returns true if the endpoint has interrupt transfer type and OUT direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_int_out(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd);
}
/**
* usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
* @epd: endpoint to be checked
*
* Returns true if the endpoint has isochronous transfer type and IN direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_isoc_in(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd);
}
/**
* usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
* @epd: endpoint to be checked
*
* Returns true if the endpoint has isochronous transfer type and OUT direction,
* otherwise it returns false.
*/
static inline int usb_endpoint_is_isoc_out(
const struct usb_endpoint_descriptor *epd)
{
return usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd);
}
/**
* usb_endpoint_maxp - get endpoint's max packet size
* @epd: endpoint to be checked
*
* Returns @epd's max packet bits [10:0]
*/
static inline int usb_endpoint_maxp(const struct usb_endpoint_descriptor *epd)
{
return __le16_to_cpu(epd->wMaxPacketSize) & USB_ENDPOINT_MAXP_MASK;
}
/**
* usb_endpoint_maxp_mult - get endpoint's transactional opportunities
* @epd: endpoint to be checked
*
* Return @epd's wMaxPacketSize[12:11] + 1
*/
static inline int
usb_endpoint_maxp_mult(const struct usb_endpoint_descriptor *epd)
{
int maxp = __le16_to_cpu(epd->wMaxPacketSize);
return USB_EP_MAXP_MULT(maxp) + 1;
}
static inline int usb_endpoint_interrupt_type(
const struct usb_endpoint_descriptor *epd)
{
return epd->bmAttributes & USB_ENDPOINT_INTRTYPE;
}
/*-------------------------------------------------------------------------*/
/* USB_DT_SSP_ISOC_ENDPOINT_COMP: SuperSpeedPlus Isochronous Endpoint Companion
* descriptor
*/
struct usb_ssp_isoc_ep_comp_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wReseved;
__le32 dwBytesPerInterval;
} __attribute__ ((packed));
#define USB_DT_SSP_ISOC_EP_COMP_SIZE 8
/*-------------------------------------------------------------------------*/
/* USB_DT_SS_ENDPOINT_COMP: SuperSpeed Endpoint Companion descriptor */
struct usb_ss_ep_comp_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bMaxBurst;
__u8 bmAttributes;
__le16 wBytesPerInterval;
} __attribute__ ((packed));
#define USB_DT_SS_EP_COMP_SIZE 6
/* Bits 4:0 of bmAttributes if this is a bulk endpoint */
static inline int
usb_ss_max_streams(const struct usb_ss_ep_comp_descriptor *comp)
{
int max_streams;
if (!comp)
return 0;
max_streams = comp->bmAttributes & 0x1f;
if (!max_streams)
return 0;
max_streams = 1 << max_streams;
return max_streams;
}
/* Bits 1:0 of bmAttributes if this is an isoc endpoint */
#define USB_SS_MULT(p) (1 + ((p) & 0x3))
/* Bit 7 of bmAttributes if a SSP isoc endpoint companion descriptor exists */
#define USB_SS_SSP_ISOC_COMP(p) ((p) & (1 << 7))
/*-------------------------------------------------------------------------*/
/* USB_DT_DEVICE_QUALIFIER: Device Qualifier descriptor */
struct usb_qualifier_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 bcdUSB;
__u8 bDeviceClass;
__u8 bDeviceSubClass;
__u8 bDeviceProtocol;
__u8 bMaxPacketSize0;
__u8 bNumConfigurations;
__u8 bRESERVED;
} __attribute__ ((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_OTG (from OTG 1.0a supplement) */
struct usb_otg_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bmAttributes; /* support for HNP, SRP, etc */
} __attribute__ ((packed));
/* USB_DT_OTG (from OTG 2.0 supplement) */
struct usb_otg20_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bmAttributes; /* support for HNP, SRP and ADP, etc */
__le16 bcdOTG; /* OTG and EH supplement release number
* in binary-coded decimal(i.e. 2.0 is 0200H)
*/
} __attribute__ ((packed));
/* from usb_otg_descriptor.bmAttributes */
#define USB_OTG_SRP (1 << 0)
#define USB_OTG_HNP (1 << 1) /* swap host/device roles */
#define USB_OTG_ADP (1 << 2) /* support ADP */
#define OTG_STS_SELECTOR 0xF000 /* OTG status selector */
/*-------------------------------------------------------------------------*/
/* USB_DT_DEBUG: for special highspeed devices, replacing serial console */
struct usb_debug_descriptor {
__u8 bLength;
__u8 bDescriptorType;
/* bulk endpoints with 8 byte maxpacket */
__u8 bDebugInEndpoint;
__u8 bDebugOutEndpoint;
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_INTERFACE_ASSOCIATION: groups interfaces */
struct usb_interface_assoc_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bFirstInterface;
__u8 bInterfaceCount;
__u8 bFunctionClass;
__u8 bFunctionSubClass;
__u8 bFunctionProtocol;
__u8 iFunction;
} __attribute__ ((packed));
#define USB_DT_INTERFACE_ASSOCIATION_SIZE 8
/*-------------------------------------------------------------------------*/
/* USB_DT_SECURITY: group of wireless security descriptors, including
* encryption types available for setting up a CC/association.
*/
struct usb_security_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wTotalLength;
__u8 bNumEncryptionTypes;
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_KEY: used with {GET,SET}_SECURITY_DATA; only public keys
* may be retrieved.
*/
struct usb_key_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 tTKID[3];
__u8 bReserved;
__u8 bKeyData[0];
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_ENCRYPTION_TYPE: bundled in DT_SECURITY groups */
struct usb_encryption_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bEncryptionType;
#define USB_ENC_TYPE_UNSECURE 0
#define USB_ENC_TYPE_WIRED 1 /* non-wireless mode */
#define USB_ENC_TYPE_CCM_1 2 /* aes128/cbc session */
#define USB_ENC_TYPE_RSA_1 3 /* rsa3072/sha1 auth */
__u8 bEncryptionValue; /* use in SET_ENCRYPTION */
__u8 bAuthKeyIndex;
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_DT_BOS: group of device-level capabilities */
struct usb_bos_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__le16 wTotalLength;
__u8 bNumDeviceCaps;
} __attribute__((packed));
#define USB_DT_BOS_SIZE 5
/*-------------------------------------------------------------------------*/
/* USB_DT_DEVICE_CAPABILITY: grouped with BOS */
struct usb_dev_cap_header {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
} __attribute__((packed));
#define USB_CAP_TYPE_WIRELESS_USB 1
struct usb_wireless_cap_descriptor { /* Ultra Wide Band */
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bmAttributes;
#define USB_WIRELESS_P2P_DRD (1 << 1)
#define USB_WIRELESS_BEACON_MASK (3 << 2)
#define USB_WIRELESS_BEACON_SELF (1 << 2)
#define USB_WIRELESS_BEACON_DIRECTED (2 << 2)
#define USB_WIRELESS_BEACON_NONE (3 << 2)
__le16 wPHYRates; /* bit rates, Mbps */
#define USB_WIRELESS_PHY_53 (1 << 0) /* always set */
#define USB_WIRELESS_PHY_80 (1 << 1)
#define USB_WIRELESS_PHY_107 (1 << 2) /* always set */
#define USB_WIRELESS_PHY_160 (1 << 3)
#define USB_WIRELESS_PHY_200 (1 << 4) /* always set */
#define USB_WIRELESS_PHY_320 (1 << 5)
#define USB_WIRELESS_PHY_400 (1 << 6)
#define USB_WIRELESS_PHY_480 (1 << 7)
__u8 bmTFITXPowerInfo; /* TFI power levels */
__u8 bmFFITXPowerInfo; /* FFI power levels */
__le16 bmBandGroup;
__u8 bReserved;
} __attribute__((packed));
/* USB 2.0 Extension descriptor */
#define USB_CAP_TYPE_EXT 2
struct usb_ext_cap_descriptor { /* Link Power Management */
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__le32 bmAttributes;
#define USB_LPM_SUPPORT (1 << 1) /* supports LPM */
#define USB_BESL_SUPPORT (1 << 2) /* supports BESL */
#define USB_BESL_BASELINE_VALID (1 << 3) /* Baseline BESL valid*/
#define USB_BESL_DEEP_VALID (1 << 4) /* Deep BESL valid */
#define USB_GET_BESL_BASELINE(p) (((p) & (0xf << 8)) >> 8)
#define USB_GET_BESL_DEEP(p) (((p) & (0xf << 12)) >> 12)
} __attribute__((packed));
#define USB_DT_USB_EXT_CAP_SIZE 7
/*
* SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB
* specific device level capabilities
*/
#define USB_SS_CAP_TYPE 3
struct usb_ss_cap_descriptor { /* Link Power Management */
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bmAttributes;
#define USB_LTM_SUPPORT (1 << 1) /* supports LTM */
__le16 wSpeedSupported;
#define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */
#define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */
#define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */
#define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */
__u8 bFunctionalitySupport;
__u8 bU1devExitLat;
__le16 bU2DevExitLat;
} __attribute__((packed));
#define USB_DT_USB_SS_CAP_SIZE 10
/*
* Container ID Capability descriptor: Defines the instance unique ID used to
* identify the instance across all operating modes
*/
#define CONTAINER_ID_TYPE 4
struct usb_ss_container_id_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved;
__u8 ContainerID[16]; /* 128-bit number */
} __attribute__((packed));
#define USB_DT_USB_SS_CONTN_ID_SIZE 20
/*
* SuperSpeed Plus USB Capability descriptor: Defines the set of
* SuperSpeed Plus USB specific device level capabilities
*/
#define USB_SSP_CAP_TYPE 0xa
struct usb_ssp_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved;
__le32 bmAttributes;
#define USB_SSP_SUBLINK_SPEED_ATTRIBS (0x1f << 0) /* sublink speed entries */
#define USB_SSP_SUBLINK_SPEED_IDS (0xf << 5) /* speed ID entries */
__le16 wFunctionalitySupport;
#define USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID (0xf)
#define USB_SSP_MIN_RX_LANE_COUNT (0xf << 8)
#define USB_SSP_MIN_TX_LANE_COUNT (0xf << 12)
__le16 wReserved;
__le32 bmSublinkSpeedAttr[1]; /* list of sublink speed attrib entries */
#define USB_SSP_SUBLINK_SPEED_SSID (0xf) /* sublink speed ID */
#define USB_SSP_SUBLINK_SPEED_LSE (0x3 << 4) /* Lanespeed exponent */
#define USB_SSP_SUBLINK_SPEED_ST (0x3 << 6) /* Sublink type */
#define USB_SSP_SUBLINK_SPEED_RSVD (0x3f << 8) /* Reserved */
#define USB_SSP_SUBLINK_SPEED_LP (0x3 << 14) /* Link protocol */
#define USB_SSP_SUBLINK_SPEED_LSM (0xff << 16) /* Lanespeed mantissa */
} __attribute__((packed));
/*
* USB Power Delivery Capability Descriptor:
* Defines capabilities for PD
*/
/* Defines the various PD Capabilities of this device */
#define USB_PD_POWER_DELIVERY_CAPABILITY 0x06
/* Provides information on each battery supported by the device */
#define USB_PD_BATTERY_INFO_CAPABILITY 0x07
/* The Consumer characteristics of a Port on the device */
#define USB_PD_PD_CONSUMER_PORT_CAPABILITY 0x08
/* The provider characteristics of a Port on the device */
#define USB_PD_PD_PROVIDER_PORT_CAPABILITY 0x09
struct usb_pd_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType; /* set to USB_PD_POWER_DELIVERY_CAPABILITY */
__u8 bReserved;
__le32 bmAttributes;
#define USB_PD_CAP_BATTERY_CHARGING (1 << 1) /* supports Battery Charging specification */
#define USB_PD_CAP_USB_PD (1 << 2) /* supports USB Power Delivery specification */
#define USB_PD_CAP_PROVIDER (1 << 3) /* can provide power */
#define USB_PD_CAP_CONSUMER (1 << 4) /* can consume power */
#define USB_PD_CAP_CHARGING_POLICY (1 << 5) /* supports CHARGING_POLICY feature */
#define USB_PD_CAP_TYPE_C_CURRENT (1 << 6) /* supports power capabilities defined in the USB Type-C Specification */
#define USB_PD_CAP_PWR_AC (1 << 8)
#define USB_PD_CAP_PWR_BAT (1 << 9)
#define USB_PD_CAP_PWR_USE_V_BUS (1 << 14)
__le16 bmProviderPorts; /* Bit zero refers to the UFP of the device */
__le16 bmConsumerPorts;
__le16 bcdBCVersion;
__le16 bcdPDVersion;
__le16 bcdUSBTypeCVersion;
} __attribute__((packed));
struct usb_pd_cap_battery_info_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
/* Index of string descriptor shall contain the user friendly name for this battery */
__u8 iBattery;
/* Index of string descriptor shall contain the Serial Number String for this battery */
__u8 iSerial;
__u8 iManufacturer;
__u8 bBatteryId; /* uniquely identifies this battery in status Messages */
__u8 bReserved;
/*
* Shall contain the Battery Charge value above which this
* battery is considered to be fully charged but not necessarily
* “topped off.”
*/
__le32 dwChargedThreshold; /* in mWh */
/*
* Shall contain the minimum charge level of this battery such
* that above this threshold, a device can be assured of being
* able to power up successfully (see Battery Charging 1.2).
*/
__le32 dwWeakThreshold; /* in mWh */
__le32 dwBatteryDesignCapacity; /* in mWh */
__le32 dwBatteryLastFullchargeCapacity; /* in mWh */
} __attribute__((packed));
struct usb_pd_cap_consumer_port_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved;
__u8 bmCapabilities;
/* port will oerate under: */
#define USB_PD_CAP_CONSUMER_BC (1 << 0) /* BC */
#define USB_PD_CAP_CONSUMER_PD (1 << 1) /* PD */
#define USB_PD_CAP_CONSUMER_TYPE_C (1 << 2) /* USB Type-C Current */
__le16 wMinVoltage; /* in 50mV units */
__le16 wMaxVoltage; /* in 50mV units */
__u16 wReserved;
__le32 dwMaxOperatingPower; /* in 10 mW - operating at steady state */
__le32 dwMaxPeakPower; /* in 10mW units - operating at peak power */
__le32 dwMaxPeakPowerTime; /* in 100ms units - duration of peak */
#define USB_PD_CAP_CONSUMER_UNKNOWN_PEAK_POWER_TIME 0xffff
} __attribute__((packed));
struct usb_pd_cap_provider_port_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
__u8 bReserved1;
__u8 bmCapabilities;
/* port will oerate under: */
#define USB_PD_CAP_PROVIDER_BC (1 << 0) /* BC */
#define USB_PD_CAP_PROVIDER_PD (1 << 1) /* PD */
#define USB_PD_CAP_PROVIDER_TYPE_C (1 << 2) /* USB Type-C Current */
__u8 bNumOfPDObjects;
__u8 bReserved2;
__le32 wPowerDataObject[];
} __attribute__((packed));
/*
* Precision time measurement capability descriptor: advertised by devices and
* hubs that support PTM
*/
#define USB_PTM_CAP_TYPE 0xb
struct usb_ptm_cap_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bDevCapabilityType;
} __attribute__((packed));
/*
* The size of the descriptor for the Sublink Speed Attribute Count
* (SSAC) specified in bmAttributes[4:0].
*/
#define USB_DT_USB_SSP_CAP_SIZE(ssac) (16 + ssac * 4)
/*-------------------------------------------------------------------------*/
/* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with
* each endpoint descriptor for a wireless device
*/
struct usb_wireless_ep_comp_descriptor {
__u8 bLength;
__u8 bDescriptorType;
__u8 bMaxBurst;
__u8 bMaxSequence;
__le16 wMaxStreamDelay;
__le16 wOverTheAirPacketSize;
__u8 bOverTheAirInterval;
__u8 bmCompAttributes;
#define USB_ENDPOINT_SWITCH_MASK 0x03 /* in bmCompAttributes */
#define USB_ENDPOINT_SWITCH_NO 0
#define USB_ENDPOINT_SWITCH_SWITCH 1
#define USB_ENDPOINT_SWITCH_SCALE 2
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_REQ_SET_HANDSHAKE is a four-way handshake used between a wireless
* host and a device for connection set up, mutual authentication, and
* exchanging short lived session keys. The handshake depends on a CC.
*/
struct usb_handshake {
__u8 bMessageNumber;
__u8 bStatus;
__u8 tTKID[3];
__u8 bReserved;
__u8 CDID[16];
__u8 nonce[16];
__u8 MIC[8];
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB_REQ_SET_CONNECTION modifies or revokes a connection context (CC).
* A CC may also be set up using non-wireless secure channels (including
* wired USB!), and some devices may support CCs with multiple hosts.
*/
struct usb_connection_context {
__u8 CHID[16]; /* persistent host id */
__u8 CDID[16]; /* device id (unique w/in host context) */
__u8 CK[16]; /* connection key */
} __attribute__((packed));
/*-------------------------------------------------------------------------*/
/* USB 2.0 defines three speeds, here's how Linux identifies them */
enum usb_device_speed {
USB_SPEED_UNKNOWN = 0, /* enumerating */
USB_SPEED_LOW, USB_SPEED_FULL, /* usb 1.1 */
USB_SPEED_HIGH, /* usb 2.0 */
USB_SPEED_WIRELESS, /* wireless (usb 2.5) */
USB_SPEED_SUPER, /* usb 3.0 */
USB_SPEED_SUPER_PLUS, /* usb 3.1 */
};
enum usb_device_state {
/* NOTATTACHED isn't in the USB spec, and this state acts
* the same as ATTACHED ... but it's clearer this way.
*/
USB_STATE_NOTATTACHED = 0,
/* chapter 9 and authentication (wireless) device states */
USB_STATE_ATTACHED,
USB_STATE_POWERED, /* wired */
USB_STATE_RECONNECTING, /* auth */
USB_STATE_UNAUTHENTICATED, /* auth */
USB_STATE_DEFAULT, /* limited function */
USB_STATE_ADDRESS,
USB_STATE_CONFIGURED, /* most functions */
USB_STATE_SUSPENDED
/* NOTE: there are actually four different SUSPENDED
* states, returning to POWERED, DEFAULT, ADDRESS, or
* CONFIGURED respectively when SOF tokens flow again.
* At this level there's no difference between L1 and L2
* suspend states. (L2 being original USB 1.1 suspend.)
*/
};
enum usb3_link_state {
USB3_LPM_U0 = 0,
USB3_LPM_U1,
USB3_LPM_U2,
USB3_LPM_U3
};
/*
* A U1 timeout of 0x0 means the parent hub will reject any transitions to U1.
* 0xff means the parent hub will accept transitions to U1, but will not
* initiate a transition.
*
* A U1 timeout of 0x1 to 0x7F also causes the hub to initiate a transition to
* U1 after that many microseconds. Timeouts of 0x80 to 0xFE are reserved
* values.
*
* A U2 timeout of 0x0 means the parent hub will reject any transitions to U2.
* 0xff means the parent hub will accept transitions to U2, but will not
* initiate a transition.
*
* A U2 timeout of 0x1 to 0xFE also causes the hub to initiate a transition to
* U2 after N*256 microseconds. Therefore a U2 timeout value of 0x1 means a U2
* idle timer of 256 microseconds, 0x2 means 512 microseconds, 0xFE means
* 65.024ms.
*/
#define USB3_LPM_DISABLED 0x0
#define USB3_LPM_U1_MAX_TIMEOUT 0x7F
#define USB3_LPM_U2_MAX_TIMEOUT 0xFE
#define USB3_LPM_DEVICE_INITIATED 0xFF
struct usb_set_sel_req {
__u8 u1_sel;
__u8 u1_pel;
__le16 u2_sel;
__le16 u2_pel;
} __attribute__ ((packed));
/*
* The Set System Exit Latency control transfer provides one byte each for
* U1 SEL and U1 PEL, so the max exit latency is 0xFF. U2 SEL and U2 PEL each
* are two bytes long.
*/
#define USB3_LPM_MAX_U1_SEL_PEL 0xFF
#define USB3_LPM_MAX_U2_SEL_PEL 0xFFFF
/*-------------------------------------------------------------------------*/
/*
* As per USB compliance update, a device that is actively drawing
* more than 100mA from USB must report itself as bus-powered in
* the GetStatus(DEVICE) call.
* http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34
*/
#define USB_SELF_POWER_VBUS_MAX_DRAW 100
#endif /* _UAPI__LINUX_USB_CH9_H */
|
2912_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* -*- C++ -*-
* File: libraw_const.h
* Copyright 2008-2017 LibRaw LLC (info@libraw.org)
* Created: Sat Mar 8 , 2008
* LibRaw error codes
LibRaw is free software; you can redistribute it and/or modify
it under the terms of the one of two licenses as you choose:
1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1
(See file LICENSE.LGPL provided in LibRaw distribution archive for details).
2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
(See file LICENSE.CDDL provided in LibRaw distribution archive for details).
*/
#ifndef _LIBRAW_ERRORS_H
#define _LIBRAW_ERRORS_H
#define LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD 0.75
#define LIBRAW_DEFAULT_AUTO_BRIGHTNESS_THRESHOLD 0.01
#define LIBRAW_IFD_MAXCOUNT 10
enum LibRaw_openbayer_patterns
{
LIBRAW_OPENBAYER_RGGB = 0x94,
LIBRAW_OPENBAYER_BGGR = 0x16,
LIBRAW_OPENBAYER_GRBG = 0x61,
LIBRAW_OPENBAYER_GBRG = 0x49
};
enum LibRaw_dngfields_marks
{
LIBRAW_DNGFM_FORWARDMATRIX = 1,
LIBRAW_DNGFM_ILLUMINANT = 2,
LIBRAW_DNGFM_COLORMATRIX = 4,
LIBRAW_DNGFM_CALIBRATION = 8,
LIBRAW_DNGFM_ANALOGBALANCE = 16,
LIBRAW_DNGFM_BLACK = 32,
LIBRAW_DNGFM_WHITE = 64,
LIBRAW_DNGFM_OPCODE2 = 128,
LIBRAW_DNGFM_LINTABLE = 256,
LIBRAW_DNGFM_CROPORIGIN = 512,
LIBRAW_DNGFM_CROPSIZE = 1024,
LIBRAW_DNGFM_PREVIEWCS = 2048
};
enum LibRaw_whitebalance_code
{
/*
EXIF light sources
12 = FL-D; Daylight fluorescent (D 5700K – 7100K) (F1,F5)
13 = FL-N; Day white fluorescent (N 4600K – 5400K) (F7,F8)
14 = FL-W; Cool white fluorescent (W 3900K – 4500K) (F2,F6, office, store, warehouse)
15 = FL-WW; White fluorescent (WW 3200K – 3700K) (F3, residential)
16 = FL-L; Soft/Warm white fluorescent (L 2600K - 3250K) (F4, kitchen, bath)
*/
LIBRAW_WBI_Unknown = 0,
LIBRAW_WBI_Daylight = 1,
LIBRAW_WBI_Fluorescent = 2,
LIBRAW_WBI_Tungsten = 3,
LIBRAW_WBI_Flash = 4,
LIBRAW_WBI_FineWeather = 9,
LIBRAW_WBI_Cloudy = 10,
LIBRAW_WBI_Shade = 11,
LIBRAW_WBI_FL_D = 12,
LIBRAW_WBI_FL_N = 13,
LIBRAW_WBI_FL_W = 14,
LIBRAW_WBI_FL_WW = 15,
LIBRAW_WBI_FL_L = 16,
LIBRAW_WBI_Ill_A = 17,
LIBRAW_WBI_Ill_B = 18,
LIBRAW_WBI_Ill_C = 19,
LIBRAW_WBI_D55 = 20,
LIBRAW_WBI_D65 = 21,
LIBRAW_WBI_D75 = 22,
LIBRAW_WBI_D50 = 23,
LIBRAW_WBI_StudioTungsten = 24,
LIBRAW_WBI_Sunset = 64,
LIBRAW_WBI_Auto = 82,
LIBRAW_WBI_Custom = 83,
LIBRAW_WBI_Auto1 = 85,
LIBRAW_WBI_Auto2 = 86,
LIBRAW_WBI_Auto3 = 87,
LIBRAW_WBI_Auto4 = 88,
LIBRAW_WBI_Custom1 = 90,
LIBRAW_WBI_Custom2 = 91,
LIBRAW_WBI_Custom3 = 93,
LIBRAW_WBI_Custom4 = 93,
LIBRAW_WBI_Custom5 = 94,
LIBRAW_WBI_Custom6 = 95,
LIBRAW_WBI_Measured = 100,
LIBRAW_WBI_Underwater = 120,
LIBRAW_WBI_Other = 255
};
enum LibRaw_MultiExposure_related
{
LIBRAW_ME_NONE = 0,
LIBRAW_ME_SIMPLE = 1,
LIBRAW_ME_OVERLAY = 2,
LIBRAW_ME_HDR = 3
};
enum LibRaw_dng_processing
{
LIBRAW_DNG_NONE = 0,
LIBRAW_DNG_FLOAT = 1,
LIBRAW_DNG_LINEAR = 2,
LIBRAW_DNG_DEFLATE = 4,
LIBRAW_DNG_XTRANS = 8,
LIBRAW_DNG_OTHER = 16,
LIBRAW_DNG_8BIT = 32,
/*LIBRAW_DNG_LARGERANGE=64,*/ /* more than 16 bit integer */
LIBRAW_DNG_ALL = LIBRAW_DNG_FLOAT | LIBRAW_DNG_LINEAR | LIBRAW_DNG_XTRANS | LIBRAW_DNG_8BIT |
LIBRAW_DNG_OTHER /* |LIBRAW_DNG_LARGERANGE */,
LIBRAW_DNG_DEFAULT = LIBRAW_DNG_FLOAT | LIBRAW_DNG_LINEAR | LIBRAW_DNG_DEFLATE | LIBRAW_DNG_8BIT
};
enum LibRaw_runtime_capabilities
{
LIBRAW_CAPS_RAWSPEED = 1,
LIBRAW_CAPS_DNGSDK = 2
};
enum LibRaw_camera_mounts
{
LIBRAW_MOUNT_Unknown = 0,
LIBRAW_MOUNT_Minolta_A = 1,
LIBRAW_MOUNT_Sony_E = 2,
LIBRAW_MOUNT_Canon_EF = 3,
LIBRAW_MOUNT_Canon_EF_S = 4,
LIBRAW_MOUNT_Canon_EF_M = 5,
LIBRAW_MOUNT_Nikon_F = 6,
LIBRAW_MOUNT_Nikon_CX = 7, /* used in Nikon 1 series */
LIBRAW_MOUNT_FT = 8, /* original 4/3 */
LIBRAW_MOUNT_mFT = 9, /* micro 4/3 */
LIBRAW_MOUNT_Pentax_K = 10,
LIBRAW_MOUNT_Pentax_Q = 11,
LIBRAW_MOUNT_Pentax_645 = 12,
LIBRAW_MOUNT_Fuji_X = 13,
LIBRAW_MOUNT_Leica_M = 14,
LIBRAW_MOUNT_Leica_R = 15,
LIBRAW_MOUNT_Leica_S = 16,
LIBRAW_MOUNT_Samsung_NX = 17,
LIBRAW_MOUNT_RicohModule = 18,
LIBRAW_MOUNT_Samsung_NX_M = 19,
LIBRAW_MOUNT_Leica_T = 20,
LIBRAW_MOUNT_Contax_N = 21,
LIBRAW_MOUNT_Sigma_X3F = 22,
LIBRAW_MOUNT_Leica_SL = 23,
LIBRAW_MOUNT_FixedLens = 99
};
enum LibRaw_camera_formats
{
LIBRAW_FORMAT_APSC = 1,
LIBRAW_FORMAT_FF = 2,
LIBRAW_FORMAT_MF = 3,
LIBRAW_FORMAT_APSH = 4,
LIBRAW_FORMAT_1INCH = 5,
LIBRAW_FORMAT_FT = 8
};
enum LibRaw_sony_cameratypes
{
LIBRAW_SONY_DSC = 1,
LIBRAW_SONY_DSLR = 2,
LIBRAW_SONY_NEX = 3,
LIBRAW_SONY_SLT = 4,
LIBRAW_SONY_ILCE = 5,
LIBRAW_SONY_ILCA = 6
};
enum LibRaw_processing_options
{
LIBRAW_PROCESSING_SONYARW2_NONE = 0,
LIBRAW_PROCESSING_SONYARW2_BASEONLY = 1,
LIBRAW_PROCESSING_SONYARW2_DELTAONLY = 1 << 1,
LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE = 1 << 2,
LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE = 1 << 3,
LIBRAW_PROCESSING_SONYARW2_ALLFLAGS = LIBRAW_PROCESSING_SONYARW2_BASEONLY + LIBRAW_PROCESSING_SONYARW2_DELTAONLY +
LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE +
LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE,
LIBRAW_PROCESSING_DP2Q_INTERPOLATERG = 1 << 4,
LIBRAW_PROCESSING_DP2Q_INTERPOLATEAF = 1 << 5,
LIBRAW_PROCESSING_PENTAX_PS_ALLFRAMES = 1 << 6,
LIBRAW_PROCESSING_CONVERTFLOAT_TO_INT = 1 << 7,
LIBRAW_PROCESSING_SRAW_NO_RGB = 1 << 8,
LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE = 1 << 9,
LIBRAW_PROCESSING_NO_ROTATE_FOR_KODAK_THUMBNAILS = 1 << 11,
LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP = 1 << 12,
LIBRAW_PROCESSING_USE_PPM16_THUMBS = 1 << 13
};
enum LibRaw_decoder_flags
{
LIBRAW_DECODER_HASCURVE = 1 << 4,
LIBRAW_DECODER_SONYARW2 = 1 << 5,
LIBRAW_DECODER_TRYRAWSPEED = 1 << 6,
LIBRAW_DECODER_OWNALLOC = 1 << 7,
LIBRAW_DECODER_FIXEDMAXC = 1 << 8,
LIBRAW_DECODER_ADOBECOPYPIXEL = 1 << 9,
LIBRAW_DECODER_LEGACY_WITH_MARGINS = 1 << 10,
LIBRAW_DECODER_NOTSET = 1 << 15
};
#define LIBRAW_XTRANS 9
enum LibRaw_constructor_flags
{
LIBRAW_OPTIONS_NONE = 0,
LIBRAW_OPIONS_NO_MEMERR_CALLBACK = 1,
LIBRAW_OPIONS_NO_DATAERR_CALLBACK = 1 << 1
};
enum LibRaw_warnings
{
LIBRAW_WARN_NONE = 0,
LIBRAW_WARN_BAD_CAMERA_WB = 1 << 2,
LIBRAW_WARN_NO_METADATA = 1 << 3,
LIBRAW_WARN_NO_JPEGLIB = 1 << 4,
LIBRAW_WARN_NO_EMBEDDED_PROFILE = 1 << 5,
LIBRAW_WARN_NO_INPUT_PROFILE = 1 << 6,
LIBRAW_WARN_BAD_OUTPUT_PROFILE = 1 << 7,
LIBRAW_WARN_NO_BADPIXELMAP = 1 << 8,
LIBRAW_WARN_BAD_DARKFRAME_FILE = 1 << 9,
LIBRAW_WARN_BAD_DARKFRAME_DIM = 1 << 10,
LIBRAW_WARN_NO_JASPER = 1 << 11,
LIBRAW_WARN_RAWSPEED_PROBLEM = 1 << 12,
LIBRAW_WARN_RAWSPEED_UNSUPPORTED = 1 << 13,
LIBRAW_WARN_RAWSPEED_PROCESSED = 1 << 14,
LIBRAW_WARN_FALLBACK_TO_AHD = 1 << 15
};
enum LibRaw_exceptions
{
LIBRAW_EXCEPTION_NONE = 0,
LIBRAW_EXCEPTION_ALLOC = 1,
LIBRAW_EXCEPTION_DECODE_RAW = 2,
LIBRAW_EXCEPTION_DECODE_JPEG = 3,
LIBRAW_EXCEPTION_IO_EOF = 4,
LIBRAW_EXCEPTION_IO_CORRUPT = 5,
LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK = 6,
LIBRAW_EXCEPTION_BAD_CROP = 7,
LIBRAW_EXCEPTION_IO_BADFILE = 8,
LIBRAW_EXCEPTION_DECODE_JPEG2000 = 9
};
enum LibRaw_progress
{
LIBRAW_PROGRESS_START = 0,
LIBRAW_PROGRESS_OPEN = 1,
LIBRAW_PROGRESS_IDENTIFY = 1 << 1,
LIBRAW_PROGRESS_SIZE_ADJUST = 1 << 2,
LIBRAW_PROGRESS_LOAD_RAW = 1 << 3,
LIBRAW_PROGRESS_RAW2_IMAGE = 1 << 4,
LIBRAW_PROGRESS_REMOVE_ZEROES = 1 << 5,
LIBRAW_PROGRESS_BAD_PIXELS = 1 << 6,
LIBRAW_PROGRESS_DARK_FRAME = 1 << 7,
LIBRAW_PROGRESS_FOVEON_INTERPOLATE = 1 << 8,
LIBRAW_PROGRESS_SCALE_COLORS = 1 << 9,
LIBRAW_PROGRESS_PRE_INTERPOLATE = 1 << 10,
LIBRAW_PROGRESS_INTERPOLATE = 1 << 11,
LIBRAW_PROGRESS_MIX_GREEN = 1 << 12,
LIBRAW_PROGRESS_MEDIAN_FILTER = 1 << 13,
LIBRAW_PROGRESS_HIGHLIGHTS = 1 << 14,
LIBRAW_PROGRESS_FUJI_ROTATE = 1 << 15,
LIBRAW_PROGRESS_FLIP = 1 << 16,
LIBRAW_PROGRESS_APPLY_PROFILE = 1 << 17,
LIBRAW_PROGRESS_CONVERT_RGB = 1 << 18,
LIBRAW_PROGRESS_STRETCH = 1 << 19,
/* reserved */
LIBRAW_PROGRESS_STAGE20 = 1 << 20,
LIBRAW_PROGRESS_STAGE21 = 1 << 21,
LIBRAW_PROGRESS_STAGE22 = 1 << 22,
LIBRAW_PROGRESS_STAGE23 = 1 << 23,
LIBRAW_PROGRESS_STAGE24 = 1 << 24,
LIBRAW_PROGRESS_STAGE25 = 1 << 25,
LIBRAW_PROGRESS_STAGE26 = 1 << 26,
LIBRAW_PROGRESS_STAGE27 = 1 << 27,
LIBRAW_PROGRESS_THUMB_LOAD = 1 << 28,
LIBRAW_PROGRESS_TRESERVED1 = 1 << 29,
LIBRAW_PROGRESS_TRESERVED2 = 1 << 30,
LIBRAW_PROGRESS_TRESERVED3 = 1 << 31
};
#define LIBRAW_PROGRESS_THUMB_MASK 0x0fffffff
enum LibRaw_errors
{
LIBRAW_SUCCESS = 0,
LIBRAW_UNSPECIFIED_ERROR = -1,
LIBRAW_FILE_UNSUPPORTED = -2,
LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE = -3,
LIBRAW_OUT_OF_ORDER_CALL = -4,
LIBRAW_NO_THUMBNAIL = -5,
LIBRAW_UNSUPPORTED_THUMBNAIL = -6,
LIBRAW_INPUT_CLOSED = -7,
LIBRAW_NOT_IMPLEMENTED = -8,
LIBRAW_UNSUFFICIENT_MEMORY = -100007,
LIBRAW_DATA_ERROR = -100008,
LIBRAW_IO_ERROR = -100009,
LIBRAW_CANCELLED_BY_CALLBACK = -100010,
LIBRAW_BAD_CROP = -100011
};
#define LIBRAW_FATAL_ERROR(ec) ((ec) < -100000)
enum LibRaw_thumbnail_formats
{
LIBRAW_THUMBNAIL_UNKNOWN = 0,
LIBRAW_THUMBNAIL_JPEG = 1,
LIBRAW_THUMBNAIL_BITMAP = 2,
LIBRAW_THUMBNAIL_BITMAP16 = 3,
LIBRAW_THUMBNAIL_LAYER = 4,
LIBRAW_THUMBNAIL_ROLLEI = 5
};
enum LibRaw_image_formats
{
LIBRAW_IMAGE_JPEG = 1,
LIBRAW_IMAGE_BITMAP = 2
};
#endif
|
/* -*- C++ -*-
* File: libraw_const.h
* Copyright 2008-2017 LibRaw LLC (info@libraw.org)
* Created: Sat Mar 8 , 2008
* LibRaw error codes
LibRaw is free software; you can redistribute it and/or modify
it under the terms of the one of two licenses as you choose:
1. GNU LESSER GENERAL PUBLIC LICENSE version 2.1
(See file LICENSE.LGPL provided in LibRaw distribution archive for details).
2. COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0
(See file LICENSE.CDDL provided in LibRaw distribution archive for details).
*/
#ifndef _LIBRAW_ERRORS_H
#define _LIBRAW_ERRORS_H
#define LIBRAW_DEFAULT_ADJUST_MAXIMUM_THRESHOLD 0.75
#define LIBRAW_DEFAULT_AUTO_BRIGHTNESS_THRESHOLD 0.01
#define LIBRAW_IFD_MAXCOUNT 10
enum LibRaw_openbayer_patterns
{
LIBRAW_OPENBAYER_RGGB = 0x94,
LIBRAW_OPENBAYER_BGGR = 0x16,
LIBRAW_OPENBAYER_GRBG = 0x61,
LIBRAW_OPENBAYER_GBRG = 0x49
};
enum LibRaw_dngfields_marks
{
LIBRAW_DNGFM_FORWARDMATRIX = 1,
LIBRAW_DNGFM_ILLUMINANT = 2,
LIBRAW_DNGFM_COLORMATRIX = 4,
LIBRAW_DNGFM_CALIBRATION = 8,
LIBRAW_DNGFM_ANALOGBALANCE = 16,
LIBRAW_DNGFM_BLACK = 32,
LIBRAW_DNGFM_WHITE = 64,
LIBRAW_DNGFM_OPCODE2 = 128,
LIBRAW_DNGFM_LINTABLE = 256,
LIBRAW_DNGFM_CROPORIGIN = 512,
LIBRAW_DNGFM_CROPSIZE = 1024,
LIBRAW_DNGFM_PREVIEWCS = 2048
};
enum LibRaw_whitebalance_code
{
/*
EXIF light sources
12 = FL-D; Daylight fluorescent (D 5700K – 7100K) (F1,F5)
13 = FL-N; Day white fluorescent (N 4600K – 5400K) (F7,F8)
14 = FL-W; Cool white fluorescent (W 3900K – 4500K) (F2,F6, office, store, warehouse)
15 = FL-WW; White fluorescent (WW 3200K – 3700K) (F3, residential)
16 = FL-L; Soft/Warm white fluorescent (L 2600K - 3250K) (F4, kitchen, bath)
*/
LIBRAW_WBI_Unknown = 0,
LIBRAW_WBI_Daylight = 1,
LIBRAW_WBI_Fluorescent = 2,
LIBRAW_WBI_Tungsten = 3,
LIBRAW_WBI_Flash = 4,
LIBRAW_WBI_FineWeather = 9,
LIBRAW_WBI_Cloudy = 10,
LIBRAW_WBI_Shade = 11,
LIBRAW_WBI_FL_D = 12,
LIBRAW_WBI_FL_N = 13,
LIBRAW_WBI_FL_W = 14,
LIBRAW_WBI_FL_WW = 15,
LIBRAW_WBI_FL_L = 16,
LIBRAW_WBI_Ill_A = 17,
LIBRAW_WBI_Ill_B = 18,
LIBRAW_WBI_Ill_C = 19,
LIBRAW_WBI_D55 = 20,
LIBRAW_WBI_D65 = 21,
LIBRAW_WBI_D75 = 22,
LIBRAW_WBI_D50 = 23,
LIBRAW_WBI_StudioTungsten = 24,
LIBRAW_WBI_Sunset = 64,
LIBRAW_WBI_Auto = 82,
LIBRAW_WBI_Custom = 83,
LIBRAW_WBI_Auto1 = 85,
LIBRAW_WBI_Auto2 = 86,
LIBRAW_WBI_Auto3 = 87,
LIBRAW_WBI_Auto4 = 88,
LIBRAW_WBI_Custom1 = 90,
LIBRAW_WBI_Custom2 = 91,
LIBRAW_WBI_Custom3 = 93,
LIBRAW_WBI_Custom4 = 93,
LIBRAW_WBI_Custom5 = 94,
LIBRAW_WBI_Custom6 = 95,
LIBRAW_WBI_Measured = 100,
LIBRAW_WBI_Underwater = 120,
LIBRAW_WBI_Other = 255
};
enum LibRaw_MultiExposure_related
{
LIBRAW_ME_NONE = 0,
LIBRAW_ME_SIMPLE = 1,
LIBRAW_ME_OVERLAY = 2,
LIBRAW_ME_HDR = 3
};
enum LibRaw_dng_processing
{
LIBRAW_DNG_NONE = 0,
LIBRAW_DNG_FLOAT = 1,
LIBRAW_DNG_LINEAR = 2,
LIBRAW_DNG_DEFLATE = 4,
LIBRAW_DNG_XTRANS = 8,
LIBRAW_DNG_OTHER = 16,
LIBRAW_DNG_8BIT = 32,
/*LIBRAW_DNG_LARGERANGE=64,*/ /* more than 16 bit integer */
LIBRAW_DNG_ALL = LIBRAW_DNG_FLOAT | LIBRAW_DNG_LINEAR | LIBRAW_DNG_XTRANS | LIBRAW_DNG_8BIT |
LIBRAW_DNG_OTHER /* |LIBRAW_DNG_LARGERANGE */,
LIBRAW_DNG_DEFAULT = LIBRAW_DNG_FLOAT | LIBRAW_DNG_LINEAR | LIBRAW_DNG_DEFLATE | LIBRAW_DNG_8BIT
};
enum LibRaw_runtime_capabilities
{
LIBRAW_CAPS_RAWSPEED = 1,
LIBRAW_CAPS_DNGSDK = 2
};
enum LibRaw_camera_mounts
{
LIBRAW_MOUNT_Unknown = 0,
LIBRAW_MOUNT_Minolta_A = 1,
LIBRAW_MOUNT_Sony_E = 2,
LIBRAW_MOUNT_Canon_EF = 3,
LIBRAW_MOUNT_Canon_EF_S = 4,
LIBRAW_MOUNT_Canon_EF_M = 5,
LIBRAW_MOUNT_Nikon_F = 6,
LIBRAW_MOUNT_Nikon_CX = 7, /* used in Nikon 1 series */
LIBRAW_MOUNT_FT = 8, /* original 4/3 */
LIBRAW_MOUNT_mFT = 9, /* micro 4/3 */
LIBRAW_MOUNT_Pentax_K = 10,
LIBRAW_MOUNT_Pentax_Q = 11,
LIBRAW_MOUNT_Pentax_645 = 12,
LIBRAW_MOUNT_Fuji_X = 13,
LIBRAW_MOUNT_Leica_M = 14,
LIBRAW_MOUNT_Leica_R = 15,
LIBRAW_MOUNT_Leica_S = 16,
LIBRAW_MOUNT_Samsung_NX = 17,
LIBRAW_MOUNT_RicohModule = 18,
LIBRAW_MOUNT_Samsung_NX_M = 19,
LIBRAW_MOUNT_Leica_T = 20,
LIBRAW_MOUNT_Contax_N = 21,
LIBRAW_MOUNT_Sigma_X3F = 22,
LIBRAW_MOUNT_Leica_SL = 23,
LIBRAW_MOUNT_FixedLens = 99
};
enum LibRaw_camera_formats
{
LIBRAW_FORMAT_APSC = 1,
LIBRAW_FORMAT_FF = 2,
LIBRAW_FORMAT_MF = 3,
LIBRAW_FORMAT_APSH = 4,
LIBRAW_FORMAT_1INCH = 5,
LIBRAW_FORMAT_FT = 8
};
enum LibRaw_sony_cameratypes
{
LIBRAW_SONY_DSC = 1,
LIBRAW_SONY_DSLR = 2,
LIBRAW_SONY_NEX = 3,
LIBRAW_SONY_SLT = 4,
LIBRAW_SONY_ILCE = 5,
LIBRAW_SONY_ILCA = 6
};
enum LibRaw_processing_options
{
LIBRAW_PROCESSING_SONYARW2_NONE = 0,
LIBRAW_PROCESSING_SONYARW2_BASEONLY = 1,
LIBRAW_PROCESSING_SONYARW2_DELTAONLY = 1 << 1,
LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE = 1 << 2,
LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE = 1 << 3,
LIBRAW_PROCESSING_SONYARW2_ALLFLAGS = LIBRAW_PROCESSING_SONYARW2_BASEONLY + LIBRAW_PROCESSING_SONYARW2_DELTAONLY +
LIBRAW_PROCESSING_SONYARW2_DELTAZEROBASE +
LIBRAW_PROCESSING_SONYARW2_DELTATOVALUE,
LIBRAW_PROCESSING_DP2Q_INTERPOLATERG = 1 << 4,
LIBRAW_PROCESSING_DP2Q_INTERPOLATEAF = 1 << 5,
LIBRAW_PROCESSING_PENTAX_PS_ALLFRAMES = 1 << 6,
LIBRAW_PROCESSING_CONVERTFLOAT_TO_INT = 1 << 7,
LIBRAW_PROCESSING_SRAW_NO_RGB = 1 << 8,
LIBRAW_PROCESSING_SRAW_NO_INTERPOLATE = 1 << 9,
LIBRAW_PROCESSING_NO_ROTATE_FOR_KODAK_THUMBNAILS = 1 << 11,
LIBRAW_PROCESSING_USE_DNG_DEFAULT_CROP = 1 << 12,
LIBRAW_PROCESSING_USE_PPM16_THUMBS = 1 << 13
};
enum LibRaw_decoder_flags
{
LIBRAW_DECODER_HASCURVE = 1 << 4,
LIBRAW_DECODER_SONYARW2 = 1 << 5,
LIBRAW_DECODER_TRYRAWSPEED = 1 << 6,
LIBRAW_DECODER_OWNALLOC = 1 << 7,
LIBRAW_DECODER_FIXEDMAXC = 1 << 8,
LIBRAW_DECODER_ADOBECOPYPIXEL = 1 << 9,
LIBRAW_DECODER_LEGACY_WITH_MARGINS = 1 << 10,
LIBRAW_DECODER_NOTSET = 1 << 15
};
#define LIBRAW_XTRANS 9
enum LibRaw_constructor_flags
{
LIBRAW_OPTIONS_NONE = 0,
LIBRAW_OPIONS_NO_MEMERR_CALLBACK = 1,
LIBRAW_OPIONS_NO_DATAERR_CALLBACK = 1 << 1
};
enum LibRaw_warnings
{
LIBRAW_WARN_NONE = 0,
LIBRAW_WARN_BAD_CAMERA_WB = 1 << 2,
LIBRAW_WARN_NO_METADATA = 1 << 3,
LIBRAW_WARN_NO_JPEGLIB = 1 << 4,
LIBRAW_WARN_NO_EMBEDDED_PROFILE = 1 << 5,
LIBRAW_WARN_NO_INPUT_PROFILE = 1 << 6,
LIBRAW_WARN_BAD_OUTPUT_PROFILE = 1 << 7,
LIBRAW_WARN_NO_BADPIXELMAP = 1 << 8,
LIBRAW_WARN_BAD_DARKFRAME_FILE = 1 << 9,
LIBRAW_WARN_BAD_DARKFRAME_DIM = 1 << 10,
LIBRAW_WARN_NO_JASPER = 1 << 11,
LIBRAW_WARN_RAWSPEED_PROBLEM = 1 << 12,
LIBRAW_WARN_RAWSPEED_UNSUPPORTED = 1 << 13,
LIBRAW_WARN_RAWSPEED_PROCESSED = 1 << 14,
LIBRAW_WARN_FALLBACK_TO_AHD = 1 << 15,
LIBRAW_WARN_PARSEFUJI_PROCESSED = 1 << 16
};
enum LibRaw_exceptions
{
LIBRAW_EXCEPTION_NONE = 0,
LIBRAW_EXCEPTION_ALLOC = 1,
LIBRAW_EXCEPTION_DECODE_RAW = 2,
LIBRAW_EXCEPTION_DECODE_JPEG = 3,
LIBRAW_EXCEPTION_IO_EOF = 4,
LIBRAW_EXCEPTION_IO_CORRUPT = 5,
LIBRAW_EXCEPTION_CANCELLED_BY_CALLBACK = 6,
LIBRAW_EXCEPTION_BAD_CROP = 7,
LIBRAW_EXCEPTION_IO_BADFILE = 8,
LIBRAW_EXCEPTION_DECODE_JPEG2000 = 9
};
enum LibRaw_progress
{
LIBRAW_PROGRESS_START = 0,
LIBRAW_PROGRESS_OPEN = 1,
LIBRAW_PROGRESS_IDENTIFY = 1 << 1,
LIBRAW_PROGRESS_SIZE_ADJUST = 1 << 2,
LIBRAW_PROGRESS_LOAD_RAW = 1 << 3,
LIBRAW_PROGRESS_RAW2_IMAGE = 1 << 4,
LIBRAW_PROGRESS_REMOVE_ZEROES = 1 << 5,
LIBRAW_PROGRESS_BAD_PIXELS = 1 << 6,
LIBRAW_PROGRESS_DARK_FRAME = 1 << 7,
LIBRAW_PROGRESS_FOVEON_INTERPOLATE = 1 << 8,
LIBRAW_PROGRESS_SCALE_COLORS = 1 << 9,
LIBRAW_PROGRESS_PRE_INTERPOLATE = 1 << 10,
LIBRAW_PROGRESS_INTERPOLATE = 1 << 11,
LIBRAW_PROGRESS_MIX_GREEN = 1 << 12,
LIBRAW_PROGRESS_MEDIAN_FILTER = 1 << 13,
LIBRAW_PROGRESS_HIGHLIGHTS = 1 << 14,
LIBRAW_PROGRESS_FUJI_ROTATE = 1 << 15,
LIBRAW_PROGRESS_FLIP = 1 << 16,
LIBRAW_PROGRESS_APPLY_PROFILE = 1 << 17,
LIBRAW_PROGRESS_CONVERT_RGB = 1 << 18,
LIBRAW_PROGRESS_STRETCH = 1 << 19,
/* reserved */
LIBRAW_PROGRESS_STAGE20 = 1 << 20,
LIBRAW_PROGRESS_STAGE21 = 1 << 21,
LIBRAW_PROGRESS_STAGE22 = 1 << 22,
LIBRAW_PROGRESS_STAGE23 = 1 << 23,
LIBRAW_PROGRESS_STAGE24 = 1 << 24,
LIBRAW_PROGRESS_STAGE25 = 1 << 25,
LIBRAW_PROGRESS_STAGE26 = 1 << 26,
LIBRAW_PROGRESS_STAGE27 = 1 << 27,
LIBRAW_PROGRESS_THUMB_LOAD = 1 << 28,
LIBRAW_PROGRESS_TRESERVED1 = 1 << 29,
LIBRAW_PROGRESS_TRESERVED2 = 1 << 30,
LIBRAW_PROGRESS_TRESERVED3 = 1 << 31
};
#define LIBRAW_PROGRESS_THUMB_MASK 0x0fffffff
enum LibRaw_errors
{
LIBRAW_SUCCESS = 0,
LIBRAW_UNSPECIFIED_ERROR = -1,
LIBRAW_FILE_UNSUPPORTED = -2,
LIBRAW_REQUEST_FOR_NONEXISTENT_IMAGE = -3,
LIBRAW_OUT_OF_ORDER_CALL = -4,
LIBRAW_NO_THUMBNAIL = -5,
LIBRAW_UNSUPPORTED_THUMBNAIL = -6,
LIBRAW_INPUT_CLOSED = -7,
LIBRAW_NOT_IMPLEMENTED = -8,
LIBRAW_UNSUFFICIENT_MEMORY = -100007,
LIBRAW_DATA_ERROR = -100008,
LIBRAW_IO_ERROR = -100009,
LIBRAW_CANCELLED_BY_CALLBACK = -100010,
LIBRAW_BAD_CROP = -100011
};
#define LIBRAW_FATAL_ERROR(ec) ((ec) < -100000)
enum LibRaw_thumbnail_formats
{
LIBRAW_THUMBNAIL_UNKNOWN = 0,
LIBRAW_THUMBNAIL_JPEG = 1,
LIBRAW_THUMBNAIL_BITMAP = 2,
LIBRAW_THUMBNAIL_BITMAP16 = 3,
LIBRAW_THUMBNAIL_LAYER = 4,
LIBRAW_THUMBNAIL_ROLLEI = 5
};
enum LibRaw_image_formats
{
LIBRAW_IMAGE_JPEG = 1,
LIBRAW_IMAGE_BITMAP = 2
};
#endif
|
2945_2
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
// imagew-internals.h
// Part of ImageWorsener, Copyright (c) 2011 by Jason Summers.
// For more information, see the readme.txt file.
#define IW_INCLUDE_UTIL_FUNCTIONS
#include "imagew.h"
#define IW_COPYRIGHT_YEAR "2011" "\xe2\x80\x93" "2015"
#ifdef IW_WINDOWS
#define IW_INLINE __inline
#else
#define IW_INLINE inline
#endif
#define IW_MSG_MAX 200 // The usual max length of error messages, etc.
// Data type used for samples during some internal calculations
typedef double iw_tmpsample;
#ifdef IW_64BIT
#define IW_DEFAULT_MAX_DIMENSION 1000000
#define IW_DEFAULT_MAX_MALLOC 2000000000000
#else
#define IW_DEFAULT_MAX_DIMENSION 40000 // Must be less than sqrt(2^31).
#define IW_DEFAULT_MAX_MALLOC 2000000000
#endif
#define IW_BKGD_STRATEGY_EARLY 1 // Apply background before resizing
#define IW_BKGD_STRATEGY_LATE 2 // Apply background after resizing
#define IW_NUM_CHANNELTYPES 5 // 5, for R,G,B, Alpha, Gray
#define IW_CI_COUNT 4 // Number of channelinfo structs (=4, for R, G, B, A)
struct iw_rr_ctx; // "resize rows" state; see imagew-resize.c.
// "Raw" settings from the application.
struct iw_resize_settings {
int family;
int edge_policy;
int use_offset;
int disable_rrctx_cache;
double param1; // 'B' in Mitchell-Netravali cubics. "lobes" in Lanczos, etc.
double param2; // 'C' in Mitchell-Netravali cubics.
double blur_factor;
double out_true_size; // Size onto which to map the input image.
double translate; // Amount to move the image, before applying any channel offsets.
double channel_offset[3]; // Indexed by IW_CHANNELTYPE_[Red..Blue]
struct iw_rr_ctx *rrctx;
};
struct iw_channelinfo_in {
int channeltype;
int disable_fast_get_sample;
double maxcolorcode_dbl;
int maxcolorcode_int;
};
struct iw_channelinfo_intermed {
int channeltype;
int cvt_to_grayscale; // (on input)
int corresponding_input_channel; // (or the first of 3 channels if cvt_to_grayscale)
int corresponding_output_channel; // Can be -1 if no such channel.
double bkgd_color_lin; // Used if ctx->apply_bkgd && bkgd_strategy==EARLY
int need_unassoc_alpha_processing; // Is this a color channel in an image with transparency?
};
struct iw_channelinfo_out {
int ditherfamily;
int dithersubtype;
int channeltype;
// If restricting to a number of colors, colors are evenly
// spaced (as evenly spaced as possible) in the target color space.
int color_count; // 0=default
double maxcolorcode_dbl;
int maxcolorcode_int;
int use_nearest_color_table;
double bkgd1_color_lin; // Used if ctx->apply_bkgd
double bkgd2_color_lin; // Used if ctx->apply_bkgd && bkgd_checkerboard
};
struct iw_prng; // Defined imagew-util.c
// Tracks the current image properties. May change as we optimize the image.
struct iw_opt_ctx {
int height, width;
int imgtype;
int bit_depth;
size_t bpr;
// A pointer to the current pixels. May point to tmp_pixels, or
// to ctx->img2.pixels.
const iw_byte *pixelsptr;
// A place for optimized pixels. If this is non-NULL, it will be
// freed when IW is finished.
iw_byte *tmp_pixels;
int has_transparency;
int has_partial_transparency;
int has_16bit_precision;
int has_color;
int palette_is_grayscale;
struct iw_palette *palette;
int has_colorkey_trns;
unsigned int colorkey[3]; // Indexed by IW_CHANNELTYPE_[RED..BLUE]
int has_bkgdlabel;
unsigned int bkgdlabel[4]; // Indexed by IW_CHANNELTYPE_[RED..ALPHA]
};
struct iw_option_struct {
char *name;
char *val;
};
// Used to help separate settings that were requested by the caller,
// and that might not always be respected, or applicable.
struct iw_req_struct {
int output_depth; // Bits/sample requested by the caller.
int output_sample_type; // Reserved for future expansion.
int output_maxcolorcode[IW_NUM_CHANNELTYPES];
// Requested color counts; 0 = "not set"
int color_count[IW_NUM_CHANNELTYPES]; // Indexed by IW_CHANNELTYPE_[Red..Gray]
// Image size requested by user. The actual size to use is stored in .resize_settings.
int out_true_valid;
double out_true_width, out_true_height;
int output_rendering_intent;
int output_cs_valid;
struct iw_csdescr output_cs;
int suppress_output_cslabel;
int negate_target;
int bkgd_valid;
int bkgd_checkerboard; // 1=caller requested a checkerboard background
struct iw_color bkgd; // The requested (primary) background color (linear colorspace).
struct iw_color bkgd2; // The requested secondary background color.
int output_bkgd_label_valid;
struct iw_color output_bkgd_label; // Uses linear colorspace
int use_bkgd_label_from_file; // Prefer the bkgd color from the input file.
int suppress_output_bkgd_label;
// These are not used by the core library, but codecs may use them:
int output_format;
int compression; // IW_COMPRESSION_*. Suggested compression algorithm.
int page_to_read;
int include_screen;
int jpeg_samp_factor_h, jpeg_samp_factor_v; // 0 means default
int interlaced;
int bmp_no_fileheader;
struct iw_option_struct *options;
int options_count;
int options_numalloc;
};
struct iw_context {
int caller_api_version;
int use_count;
unsigned int output_profile;
iw_mallocfn_type mallocfn;
iw_freefn_type freefn;
iw_float32 *intermediate32;
iw_float32 *intermediate_alpha32;
iw_float32 *final_alpha32;
struct iw_channelinfo_in img1_ci[IW_CI_COUNT];
struct iw_image img1;
struct iw_csdescr img1cs;
int img1_imgtype_logical;
int img1_numchannels_physical;
int img1_numchannels_logical;
int img1_alpha_channel_index;
// The suggested background color read from the input file.
int img1_bkgd_label_set;
struct iw_color img1_bkgd_label_inputcs;
struct iw_color img1_bkgd_label_lin; // img1.bkgd_color_*
struct iw_channelinfo_intermed intermed_ci[IW_CI_COUNT];
int intermed_imgtype;
int intermed_numchannels;
int intermed_alpha_channel_index;
int intermed_canvas_width, intermed_canvas_height;
struct iw_image img2;
struct iw_csdescr img2cs;
struct iw_channelinfo_out img2_ci[IW_CI_COUNT];
int img2_numchannels;
int ditherfamily_by_channeltype[IW_NUM_CHANNELTYPES]; // Indexed by IW_CHANNELTYPE_[Red..Gray]
int dithersubtype_by_channeltype[IW_NUM_CHANNELTYPES]; // Indexed by IW_CHANNELTYPE_[Red..Gray]
int uses_errdiffdither;
struct iw_prng *prng; // Pseudorandom number generator state
// Indexed by IW_DIMENSION_*.
struct iw_resize_settings resize_settings[2];
int to_grayscale;
int apply_bkgd; // 1 = We will be applying a background color.
int apply_bkgd_strategy; // IW_BKGD_STRATEGY_*
int bkgd_checkerboard; // valid if apply_bkgd is set. 0=solid, 1=checkerboard
int bkgd_check_size;
int bkgd_check_origin[2]; // Indexed by IW_DIMENSION_*
#define IW_BKGD_COLOR_SOURCE_NONE 0 // Use a default color
#define IW_BKGD_COLOR_SOURCE_FILE 1 // Use ctx->img1_bkgd_label_lin
#define IW_BKGD_COLOR_SOURCE_REQ 2 // Use ctx->req.bkgd
int bkgd_color_source; // Valid if .apply_bkgd is set.
// Background color alpha samples. (The color samples are stored in
// iw_channelinfo_out.)
double bkgd1alpha, bkgd2alpha;
void *userdata;
iw_translatefn_type translate_fn;
iw_warningfn_type warning_fn;
int input_maxcolorcode_int; // Based on the source image's full bitdepth
double input_maxcolorcode;
int support_reduced_input_bitdepths;
int disable_output_lookup_tables;
int reduced_output_maxcolor_flag; // Are there any reduced output maxcolorcodes?
// Max number of rows for error-diffusion dithering, including current row.
#define IW_DITHER_MAXROWS 3
// Error accumulators for error-diffusion dithering.
double *dither_errors[IW_DITHER_MAXROWS]; // 0 is the current row.
int randomize; // 0 to use random_seed, nonzero to use a different seed every time.
int random_seed;
size_t max_malloc;
int max_width, max_height;
int error_flag;
char *error_msg;
struct iw_opt_ctx optctx;
int no_gamma; // Disable gamma correction. (IW_VAL_DISABLE_GAMMA)
int intclamp; // Clamp the intermediate samples to the 0.0-1.0 range.
int grayscale_formula; // IW_GSF_*
double grayscale_weight[3];
int pref_units; // IW_PREF_UNITS_*
// Optimization codes. Can be set to 0 to disallow this optimization
iw_byte opt_grayscale; // RGB-to-grayscale
iw_byte opt_palette; // Palette images
iw_byte opt_16_to_8; // Reduce >8 bitdepth to 8
iw_byte opt_strip_alpha; // RGBA->RGB or GA->G
iw_byte opt_binary_trns; // Color-keyed binary transparency
int canvas_width, canvas_height;
int input_start_x, input_start_y, input_w, input_h;
struct iw_req_struct req;
// Color correction tables, to improve performance.
double *input_color_corr_table;
// This is not for converting linear to the output colorspace; it's the
// same as input_color_corr_table except that it might have a different
// number of entries, and might be for a different colorspace.
double *output_rev_color_corr_table;
double *nearest_color_table;
struct iw_zlib_module *zlib_module;
};
// Defined imagew-util.c
struct iw_prng *iwpvt_prng_create(struct iw_context *ctx);
void iwpvt_prng_destroy(struct iw_context *ctx, struct iw_prng *prng);
void iwpvt_prng_set_random_seed(struct iw_prng *prng, int s);
iw_uint32 iwpvt_prng_rand(struct iw_prng *prng); // Returns a pseudorandom number.
int iwpvt_util_randomize(struct iw_prng *prng); // Returns the random seed that was used.
void* iwpvt_default_malloc(void *userdata, unsigned int flags, size_t n);
void iwpvt_default_free(void *userdata, void *mem);
char* iwpvt_strdup_dbl(struct iw_context *ctx, double n);
// Defined in imagew-resize.c
struct iw_rr_ctx *iwpvt_resize_rows_init(struct iw_context *ctx,
struct iw_resize_settings *rs, int channeltype, int num_in_pix, int num_out_pix);
void iwpvt_resize_rows_done(struct iw_rr_ctx *rrctx);
void iwpvt_resize_row_main(struct iw_rr_ctx *rrctx, iw_tmpsample *in_pix, iw_tmpsample *out_pix);
// Defined in imagew-opt.c
void iwpvt_optimize_image(struct iw_context *ctx);
|
// imagew-internals.h
// Part of ImageWorsener, Copyright (c) 2011 by Jason Summers.
// For more information, see the readme.txt file.
#define IW_INCLUDE_UTIL_FUNCTIONS
#include "imagew.h"
#define IW_COPYRIGHT_YEAR "2011" "\xe2\x80\x93" "2015"
#ifdef IW_WINDOWS
#define IW_INLINE __inline
#else
#define IW_INLINE inline
#endif
#define IW_MSG_MAX 200 // The usual max length of error messages, etc.
// Data type used for samples during some internal calculations
typedef double iw_tmpsample;
#ifdef IW_64BIT
#define IW_DEFAULT_MAX_DIMENSION 40000
#define IW_DEFAULT_MAX_MALLOC 2000000000
#else
#define IW_DEFAULT_MAX_DIMENSION 40000 // Must be less than sqrt(2^31).
#define IW_DEFAULT_MAX_MALLOC 2000000000
#endif
#define IW_BKGD_STRATEGY_EARLY 1 // Apply background before resizing
#define IW_BKGD_STRATEGY_LATE 2 // Apply background after resizing
#define IW_NUM_CHANNELTYPES 5 // 5, for R,G,B, Alpha, Gray
#define IW_CI_COUNT 4 // Number of channelinfo structs (=4, for R, G, B, A)
struct iw_rr_ctx; // "resize rows" state; see imagew-resize.c.
// "Raw" settings from the application.
struct iw_resize_settings {
int family;
int edge_policy;
int use_offset;
int disable_rrctx_cache;
double param1; // 'B' in Mitchell-Netravali cubics. "lobes" in Lanczos, etc.
double param2; // 'C' in Mitchell-Netravali cubics.
double blur_factor;
double out_true_size; // Size onto which to map the input image.
double translate; // Amount to move the image, before applying any channel offsets.
double channel_offset[3]; // Indexed by IW_CHANNELTYPE_[Red..Blue]
struct iw_rr_ctx *rrctx;
};
struct iw_channelinfo_in {
int channeltype;
int disable_fast_get_sample;
double maxcolorcode_dbl;
int maxcolorcode_int;
};
struct iw_channelinfo_intermed {
int channeltype;
int cvt_to_grayscale; // (on input)
int corresponding_input_channel; // (or the first of 3 channels if cvt_to_grayscale)
int corresponding_output_channel; // Can be -1 if no such channel.
double bkgd_color_lin; // Used if ctx->apply_bkgd && bkgd_strategy==EARLY
int need_unassoc_alpha_processing; // Is this a color channel in an image with transparency?
};
struct iw_channelinfo_out {
int ditherfamily;
int dithersubtype;
int channeltype;
// If restricting to a number of colors, colors are evenly
// spaced (as evenly spaced as possible) in the target color space.
int color_count; // 0=default
double maxcolorcode_dbl;
int maxcolorcode_int;
int use_nearest_color_table;
double bkgd1_color_lin; // Used if ctx->apply_bkgd
double bkgd2_color_lin; // Used if ctx->apply_bkgd && bkgd_checkerboard
};
struct iw_prng; // Defined imagew-util.c
// Tracks the current image properties. May change as we optimize the image.
struct iw_opt_ctx {
int height, width;
int imgtype;
int bit_depth;
size_t bpr;
// A pointer to the current pixels. May point to tmp_pixels, or
// to ctx->img2.pixels.
const iw_byte *pixelsptr;
// A place for optimized pixels. If this is non-NULL, it will be
// freed when IW is finished.
iw_byte *tmp_pixels;
int has_transparency;
int has_partial_transparency;
int has_16bit_precision;
int has_color;
int palette_is_grayscale;
struct iw_palette *palette;
int has_colorkey_trns;
unsigned int colorkey[3]; // Indexed by IW_CHANNELTYPE_[RED..BLUE]
int has_bkgdlabel;
unsigned int bkgdlabel[4]; // Indexed by IW_CHANNELTYPE_[RED..ALPHA]
};
struct iw_option_struct {
char *name;
char *val;
};
// Used to help separate settings that were requested by the caller,
// and that might not always be respected, or applicable.
struct iw_req_struct {
int output_depth; // Bits/sample requested by the caller.
int output_sample_type; // Reserved for future expansion.
int output_maxcolorcode[IW_NUM_CHANNELTYPES];
// Requested color counts; 0 = "not set"
int color_count[IW_NUM_CHANNELTYPES]; // Indexed by IW_CHANNELTYPE_[Red..Gray]
// Image size requested by user. The actual size to use is stored in .resize_settings.
int out_true_valid;
double out_true_width, out_true_height;
int output_rendering_intent;
int output_cs_valid;
struct iw_csdescr output_cs;
int suppress_output_cslabel;
int negate_target;
int bkgd_valid;
int bkgd_checkerboard; // 1=caller requested a checkerboard background
struct iw_color bkgd; // The requested (primary) background color (linear colorspace).
struct iw_color bkgd2; // The requested secondary background color.
int output_bkgd_label_valid;
struct iw_color output_bkgd_label; // Uses linear colorspace
int use_bkgd_label_from_file; // Prefer the bkgd color from the input file.
int suppress_output_bkgd_label;
// These are not used by the core library, but codecs may use them:
int output_format;
int compression; // IW_COMPRESSION_*. Suggested compression algorithm.
int page_to_read;
int include_screen;
int jpeg_samp_factor_h, jpeg_samp_factor_v; // 0 means default
int interlaced;
int bmp_no_fileheader;
struct iw_option_struct *options;
int options_count;
int options_numalloc;
};
struct iw_context {
int caller_api_version;
int use_count;
unsigned int output_profile;
iw_mallocfn_type mallocfn;
iw_freefn_type freefn;
iw_float32 *intermediate32;
iw_float32 *intermediate_alpha32;
iw_float32 *final_alpha32;
struct iw_channelinfo_in img1_ci[IW_CI_COUNT];
struct iw_image img1;
struct iw_csdescr img1cs;
int img1_imgtype_logical;
int img1_numchannels_physical;
int img1_numchannels_logical;
int img1_alpha_channel_index;
// The suggested background color read from the input file.
int img1_bkgd_label_set;
struct iw_color img1_bkgd_label_inputcs;
struct iw_color img1_bkgd_label_lin; // img1.bkgd_color_*
struct iw_channelinfo_intermed intermed_ci[IW_CI_COUNT];
int intermed_imgtype;
int intermed_numchannels;
int intermed_alpha_channel_index;
int intermed_canvas_width, intermed_canvas_height;
struct iw_image img2;
struct iw_csdescr img2cs;
struct iw_channelinfo_out img2_ci[IW_CI_COUNT];
int img2_numchannels;
int ditherfamily_by_channeltype[IW_NUM_CHANNELTYPES]; // Indexed by IW_CHANNELTYPE_[Red..Gray]
int dithersubtype_by_channeltype[IW_NUM_CHANNELTYPES]; // Indexed by IW_CHANNELTYPE_[Red..Gray]
int uses_errdiffdither;
struct iw_prng *prng; // Pseudorandom number generator state
// Indexed by IW_DIMENSION_*.
struct iw_resize_settings resize_settings[2];
int to_grayscale;
int apply_bkgd; // 1 = We will be applying a background color.
int apply_bkgd_strategy; // IW_BKGD_STRATEGY_*
int bkgd_checkerboard; // valid if apply_bkgd is set. 0=solid, 1=checkerboard
int bkgd_check_size;
int bkgd_check_origin[2]; // Indexed by IW_DIMENSION_*
#define IW_BKGD_COLOR_SOURCE_NONE 0 // Use a default color
#define IW_BKGD_COLOR_SOURCE_FILE 1 // Use ctx->img1_bkgd_label_lin
#define IW_BKGD_COLOR_SOURCE_REQ 2 // Use ctx->req.bkgd
int bkgd_color_source; // Valid if .apply_bkgd is set.
// Background color alpha samples. (The color samples are stored in
// iw_channelinfo_out.)
double bkgd1alpha, bkgd2alpha;
void *userdata;
iw_translatefn_type translate_fn;
iw_warningfn_type warning_fn;
int input_maxcolorcode_int; // Based on the source image's full bitdepth
double input_maxcolorcode;
int support_reduced_input_bitdepths;
int disable_output_lookup_tables;
int reduced_output_maxcolor_flag; // Are there any reduced output maxcolorcodes?
// Max number of rows for error-diffusion dithering, including current row.
#define IW_DITHER_MAXROWS 3
// Error accumulators for error-diffusion dithering.
double *dither_errors[IW_DITHER_MAXROWS]; // 0 is the current row.
int randomize; // 0 to use random_seed, nonzero to use a different seed every time.
int random_seed;
size_t max_malloc;
int max_width, max_height;
int error_flag;
char *error_msg;
struct iw_opt_ctx optctx;
int no_gamma; // Disable gamma correction. (IW_VAL_DISABLE_GAMMA)
int intclamp; // Clamp the intermediate samples to the 0.0-1.0 range.
int grayscale_formula; // IW_GSF_*
double grayscale_weight[3];
int pref_units; // IW_PREF_UNITS_*
// Optimization codes. Can be set to 0 to disallow this optimization
iw_byte opt_grayscale; // RGB-to-grayscale
iw_byte opt_palette; // Palette images
iw_byte opt_16_to_8; // Reduce >8 bitdepth to 8
iw_byte opt_strip_alpha; // RGBA->RGB or GA->G
iw_byte opt_binary_trns; // Color-keyed binary transparency
int canvas_width, canvas_height;
int input_start_x, input_start_y, input_w, input_h;
struct iw_req_struct req;
// Color correction tables, to improve performance.
double *input_color_corr_table;
// This is not for converting linear to the output colorspace; it's the
// same as input_color_corr_table except that it might have a different
// number of entries, and might be for a different colorspace.
double *output_rev_color_corr_table;
double *nearest_color_table;
struct iw_zlib_module *zlib_module;
};
// Defined imagew-util.c
struct iw_prng *iwpvt_prng_create(struct iw_context *ctx);
void iwpvt_prng_destroy(struct iw_context *ctx, struct iw_prng *prng);
void iwpvt_prng_set_random_seed(struct iw_prng *prng, int s);
iw_uint32 iwpvt_prng_rand(struct iw_prng *prng); // Returns a pseudorandom number.
int iwpvt_util_randomize(struct iw_prng *prng); // Returns the random seed that was used.
void* iwpvt_default_malloc(void *userdata, unsigned int flags, size_t n);
void iwpvt_default_free(void *userdata, void *mem);
char* iwpvt_strdup_dbl(struct iw_context *ctx, double n);
// Defined in imagew-resize.c
struct iw_rr_ctx *iwpvt_resize_rows_init(struct iw_context *ctx,
struct iw_resize_settings *rs, int channeltype, int num_in_pix, int num_out_pix);
void iwpvt_resize_rows_done(struct iw_rr_ctx *rrctx);
void iwpvt_resize_row_main(struct iw_rr_ctx *rrctx, iw_tmpsample *in_pix, iw_tmpsample *out_pix);
// Defined in imagew-opt.c
void iwpvt_optimize_image(struct iw_context *ctx);
|
3330_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef B43_DMA_H_
#define B43_DMA_H_
#include <linux/err.h>
#include "b43.h"
/* DMA-Interrupt reasons. */
#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
| (1 << 14) | (1 << 15))
#define B43_DMAIRQ_NONFATALMASK (1 << 13)
#define B43_DMAIRQ_RX_DONE (1 << 16)
/*** 32-bit DMA Engine. ***/
/* 32-bit DMA controller registers. */
#define B43_DMA32_TXCTL 0x00
#define B43_DMA32_TXENABLE 0x00000001
#define B43_DMA32_TXSUSPEND 0x00000002
#define B43_DMA32_TXLOOPBACK 0x00000004
#define B43_DMA32_TXFLUSH 0x00000010
#define B43_DMA32_TXADDREXT_MASK 0x00030000
#define B43_DMA32_TXADDREXT_SHIFT 16
#define B43_DMA32_TXRING 0x04
#define B43_DMA32_TXINDEX 0x08
#define B43_DMA32_TXSTATUS 0x0C
#define B43_DMA32_TXDPTR 0x00000FFF
#define B43_DMA32_TXSTATE 0x0000F000
#define B43_DMA32_TXSTAT_DISABLED 0x00000000
#define B43_DMA32_TXSTAT_ACTIVE 0x00001000
#define B43_DMA32_TXSTAT_IDLEWAIT 0x00002000
#define B43_DMA32_TXSTAT_STOPPED 0x00003000
#define B43_DMA32_TXSTAT_SUSP 0x00004000
#define B43_DMA32_TXERROR 0x000F0000
#define B43_DMA32_TXERR_NOERR 0x00000000
#define B43_DMA32_TXERR_PROT 0x00010000
#define B43_DMA32_TXERR_UNDERRUN 0x00020000
#define B43_DMA32_TXERR_BUFREAD 0x00030000
#define B43_DMA32_TXERR_DESCREAD 0x00040000
#define B43_DMA32_TXACTIVE 0xFFF00000
#define B43_DMA32_RXCTL 0x10
#define B43_DMA32_RXENABLE 0x00000001
#define B43_DMA32_RXFROFF_MASK 0x000000FE
#define B43_DMA32_RXFROFF_SHIFT 1
#define B43_DMA32_RXDIRECTFIFO 0x00000100
#define B43_DMA32_RXADDREXT_MASK 0x00030000
#define B43_DMA32_RXADDREXT_SHIFT 16
#define B43_DMA32_RXRING 0x14
#define B43_DMA32_RXINDEX 0x18
#define B43_DMA32_RXSTATUS 0x1C
#define B43_DMA32_RXDPTR 0x00000FFF
#define B43_DMA32_RXSTATE 0x0000F000
#define B43_DMA32_RXSTAT_DISABLED 0x00000000
#define B43_DMA32_RXSTAT_ACTIVE 0x00001000
#define B43_DMA32_RXSTAT_IDLEWAIT 0x00002000
#define B43_DMA32_RXSTAT_STOPPED 0x00003000
#define B43_DMA32_RXERROR 0x000F0000
#define B43_DMA32_RXERR_NOERR 0x00000000
#define B43_DMA32_RXERR_PROT 0x00010000
#define B43_DMA32_RXERR_OVERFLOW 0x00020000
#define B43_DMA32_RXERR_BUFWRITE 0x00030000
#define B43_DMA32_RXERR_DESCREAD 0x00040000
#define B43_DMA32_RXACTIVE 0xFFF00000
/* 32-bit DMA descriptor. */
struct b43_dmadesc32 {
__le32 control;
__le32 address;
} __packed;
#define B43_DMA32_DCTL_BYTECNT 0x00001FFF
#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define B43_DMA32_DCTL_ADDREXT_SHIFT 16
#define B43_DMA32_DCTL_DTABLEEND 0x10000000
#define B43_DMA32_DCTL_IRQ 0x20000000
#define B43_DMA32_DCTL_FRAMEEND 0x40000000
#define B43_DMA32_DCTL_FRAMESTART 0x80000000
/*** 64-bit DMA Engine. ***/
/* 64-bit DMA controller registers. */
#define B43_DMA64_TXCTL 0x00
#define B43_DMA64_TXENABLE 0x00000001
#define B43_DMA64_TXSUSPEND 0x00000002
#define B43_DMA64_TXLOOPBACK 0x00000004
#define B43_DMA64_TXFLUSH 0x00000010
#define B43_DMA64_TXADDREXT_MASK 0x00030000
#define B43_DMA64_TXADDREXT_SHIFT 16
#define B43_DMA64_TXINDEX 0x04
#define B43_DMA64_TXRINGLO 0x08
#define B43_DMA64_TXRINGHI 0x0C
#define B43_DMA64_TXSTATUS 0x10
#define B43_DMA64_TXSTATDPTR 0x00001FFF
#define B43_DMA64_TXSTAT 0xF0000000
#define B43_DMA64_TXSTAT_DISABLED 0x00000000
#define B43_DMA64_TXSTAT_ACTIVE 0x10000000
#define B43_DMA64_TXSTAT_IDLEWAIT 0x20000000
#define B43_DMA64_TXSTAT_STOPPED 0x30000000
#define B43_DMA64_TXSTAT_SUSP 0x40000000
#define B43_DMA64_TXERROR 0x14
#define B43_DMA64_TXERRDPTR 0x0001FFFF
#define B43_DMA64_TXERR 0xF0000000
#define B43_DMA64_TXERR_NOERR 0x00000000
#define B43_DMA64_TXERR_PROT 0x10000000
#define B43_DMA64_TXERR_UNDERRUN 0x20000000
#define B43_DMA64_TXERR_TRANSFER 0x30000000
#define B43_DMA64_TXERR_DESCREAD 0x40000000
#define B43_DMA64_TXERR_CORE 0x50000000
#define B43_DMA64_RXCTL 0x20
#define B43_DMA64_RXENABLE 0x00000001
#define B43_DMA64_RXFROFF_MASK 0x000000FE
#define B43_DMA64_RXFROFF_SHIFT 1
#define B43_DMA64_RXDIRECTFIFO 0x00000100
#define B43_DMA64_RXADDREXT_MASK 0x00030000
#define B43_DMA64_RXADDREXT_SHIFT 16
#define B43_DMA64_RXINDEX 0x24
#define B43_DMA64_RXRINGLO 0x28
#define B43_DMA64_RXRINGHI 0x2C
#define B43_DMA64_RXSTATUS 0x30
#define B43_DMA64_RXSTATDPTR 0x00001FFF
#define B43_DMA64_RXSTAT 0xF0000000
#define B43_DMA64_RXSTAT_DISABLED 0x00000000
#define B43_DMA64_RXSTAT_ACTIVE 0x10000000
#define B43_DMA64_RXSTAT_IDLEWAIT 0x20000000
#define B43_DMA64_RXSTAT_STOPPED 0x30000000
#define B43_DMA64_RXSTAT_SUSP 0x40000000
#define B43_DMA64_RXERROR 0x34
#define B43_DMA64_RXERRDPTR 0x0001FFFF
#define B43_DMA64_RXERR 0xF0000000
#define B43_DMA64_RXERR_NOERR 0x00000000
#define B43_DMA64_RXERR_PROT 0x10000000
#define B43_DMA64_RXERR_UNDERRUN 0x20000000
#define B43_DMA64_RXERR_TRANSFER 0x30000000
#define B43_DMA64_RXERR_DESCREAD 0x40000000
#define B43_DMA64_RXERR_CORE 0x50000000
/* 64-bit DMA descriptor. */
struct b43_dmadesc64 {
__le32 control0;
__le32 control1;
__le32 address_low;
__le32 address_high;
} __packed;
#define B43_DMA64_DCTL0_DTABLEEND 0x10000000
#define B43_DMA64_DCTL0_IRQ 0x20000000
#define B43_DMA64_DCTL0_FRAMEEND 0x40000000
#define B43_DMA64_DCTL0_FRAMESTART 0x80000000
#define B43_DMA64_DCTL1_BYTECNT 0x00001FFF
#define B43_DMA64_DCTL1_ADDREXT_MASK 0x00030000
#define B43_DMA64_DCTL1_ADDREXT_SHIFT 16
struct b43_dmadesc_generic {
union {
struct b43_dmadesc32 dma32;
struct b43_dmadesc64 dma64;
} __packed;
} __packed;
/* Misc DMA constants */
#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64
#define B43_DMA0_RX_BUFFERSIZE IEEE80211_MAX_FRAME_LEN
/* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
#define b43_dma_ptr_is_poisoned(ptr) (unlikely((ptr) == B43_DMA_PTR_POISON))
struct sk_buff;
struct b43_private;
struct b43_txstatus;
struct b43_dmadesc_meta {
/* The kernel DMA-able buffer. */
struct sk_buff *skb;
/* DMA base bus-address of the descriptor buffer. */
dma_addr_t dmaaddr;
/* ieee80211 TX status. Only used once per 802.11 frag. */
bool is_last_fragment;
};
struct b43_dmaring;
/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */
struct b43_dma_ops {
struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring,
int slot,
struct b43_dmadesc_meta **
meta);
void (*fill_descriptor) (struct b43_dmaring * ring,
struct b43_dmadesc_generic * desc,
dma_addr_t dmaaddr, u16 bufsize, int start,
int end, int irq);
void (*poke_tx) (struct b43_dmaring * ring, int slot);
void (*tx_suspend) (struct b43_dmaring * ring);
void (*tx_resume) (struct b43_dmaring * ring);
int (*get_current_rxslot) (struct b43_dmaring * ring);
void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
};
enum b43_dmatype {
B43_DMA_30BIT = 30,
B43_DMA_32BIT = 32,
B43_DMA_64BIT = 64,
};
struct b43_dmaring {
/* Lowlevel DMA ops. */
const struct b43_dma_ops *ops;
/* Kernel virtual base address of the ring memory. */
void *descbase;
/* Meta data about all descriptors. */
struct b43_dmadesc_meta *meta;
/* Cache of TX headers for each TX frame.
* This is to avoid an allocation on each TX.
* This is NULL for an RX ring.
*/
u8 *txhdr_cache;
/* (Unadjusted) DMA base bus-address of the ring memory. */
dma_addr_t dmabase;
/* Number of descriptor slots in the ring. */
int nr_slots;
/* Number of used descriptor slots. */
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
u16 rx_buffersize;
/* The MMIO base register of the DMA controller. */
u16 mmio_base;
/* DMA controller index number (0-5). */
int index;
/* Boolean. Is this a TX ring? */
bool tx;
/* The type of DMA engine used. */
enum b43_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */
int max_used_slots;
/* Last time we injected a ring overflow. */
unsigned long last_injected_overflow;
/* Statistics: Number of successfully transmitted packets */
u64 nr_succeed_tx_packets;
/* Statistics: Number of failed TX packets */
u64 nr_failed_tx_packets;
/* Statistics: Total number of TX plus all retries. */
u64 nr_total_packet_tries;
#endif /* CONFIG_B43_DEBUG */
};
static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
{
return b43_read32(ring->dev, ring->mmio_base + offset);
}
static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
{
b43_write32(ring->dev, ring->mmio_base + offset, value);
}
int b43_dma_init(struct b43_wldev *dev);
void b43_dma_free(struct b43_wldev *dev);
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
int b43_dma_tx(struct b43_wldev *dev,
struct sk_buff *skb);
void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
void b43_dma_rx(struct b43_dmaring *ring);
void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
unsigned int engine_index, bool enable);
#endif /* B43_DMA_H_ */
|
#ifndef B43_DMA_H_
#define B43_DMA_H_
#include <linux/err.h>
#include "b43.h"
/* DMA-Interrupt reasons. */
#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
| (1 << 14) | (1 << 15))
#define B43_DMAIRQ_NONFATALMASK (1 << 13)
#define B43_DMAIRQ_RX_DONE (1 << 16)
/*** 32-bit DMA Engine. ***/
/* 32-bit DMA controller registers. */
#define B43_DMA32_TXCTL 0x00
#define B43_DMA32_TXENABLE 0x00000001
#define B43_DMA32_TXSUSPEND 0x00000002
#define B43_DMA32_TXLOOPBACK 0x00000004
#define B43_DMA32_TXFLUSH 0x00000010
#define B43_DMA32_TXADDREXT_MASK 0x00030000
#define B43_DMA32_TXADDREXT_SHIFT 16
#define B43_DMA32_TXRING 0x04
#define B43_DMA32_TXINDEX 0x08
#define B43_DMA32_TXSTATUS 0x0C
#define B43_DMA32_TXDPTR 0x00000FFF
#define B43_DMA32_TXSTATE 0x0000F000
#define B43_DMA32_TXSTAT_DISABLED 0x00000000
#define B43_DMA32_TXSTAT_ACTIVE 0x00001000
#define B43_DMA32_TXSTAT_IDLEWAIT 0x00002000
#define B43_DMA32_TXSTAT_STOPPED 0x00003000
#define B43_DMA32_TXSTAT_SUSP 0x00004000
#define B43_DMA32_TXERROR 0x000F0000
#define B43_DMA32_TXERR_NOERR 0x00000000
#define B43_DMA32_TXERR_PROT 0x00010000
#define B43_DMA32_TXERR_UNDERRUN 0x00020000
#define B43_DMA32_TXERR_BUFREAD 0x00030000
#define B43_DMA32_TXERR_DESCREAD 0x00040000
#define B43_DMA32_TXACTIVE 0xFFF00000
#define B43_DMA32_RXCTL 0x10
#define B43_DMA32_RXENABLE 0x00000001
#define B43_DMA32_RXFROFF_MASK 0x000000FE
#define B43_DMA32_RXFROFF_SHIFT 1
#define B43_DMA32_RXDIRECTFIFO 0x00000100
#define B43_DMA32_RXADDREXT_MASK 0x00030000
#define B43_DMA32_RXADDREXT_SHIFT 16
#define B43_DMA32_RXRING 0x14
#define B43_DMA32_RXINDEX 0x18
#define B43_DMA32_RXSTATUS 0x1C
#define B43_DMA32_RXDPTR 0x00000FFF
#define B43_DMA32_RXSTATE 0x0000F000
#define B43_DMA32_RXSTAT_DISABLED 0x00000000
#define B43_DMA32_RXSTAT_ACTIVE 0x00001000
#define B43_DMA32_RXSTAT_IDLEWAIT 0x00002000
#define B43_DMA32_RXSTAT_STOPPED 0x00003000
#define B43_DMA32_RXERROR 0x000F0000
#define B43_DMA32_RXERR_NOERR 0x00000000
#define B43_DMA32_RXERR_PROT 0x00010000
#define B43_DMA32_RXERR_OVERFLOW 0x00020000
#define B43_DMA32_RXERR_BUFWRITE 0x00030000
#define B43_DMA32_RXERR_DESCREAD 0x00040000
#define B43_DMA32_RXACTIVE 0xFFF00000
/* 32-bit DMA descriptor. */
struct b43_dmadesc32 {
__le32 control;
__le32 address;
} __packed;
#define B43_DMA32_DCTL_BYTECNT 0x00001FFF
#define B43_DMA32_DCTL_ADDREXT_MASK 0x00030000
#define B43_DMA32_DCTL_ADDREXT_SHIFT 16
#define B43_DMA32_DCTL_DTABLEEND 0x10000000
#define B43_DMA32_DCTL_IRQ 0x20000000
#define B43_DMA32_DCTL_FRAMEEND 0x40000000
#define B43_DMA32_DCTL_FRAMESTART 0x80000000
/*** 64-bit DMA Engine. ***/
/* 64-bit DMA controller registers. */
#define B43_DMA64_TXCTL 0x00
#define B43_DMA64_TXENABLE 0x00000001
#define B43_DMA64_TXSUSPEND 0x00000002
#define B43_DMA64_TXLOOPBACK 0x00000004
#define B43_DMA64_TXFLUSH 0x00000010
#define B43_DMA64_TXADDREXT_MASK 0x00030000
#define B43_DMA64_TXADDREXT_SHIFT 16
#define B43_DMA64_TXINDEX 0x04
#define B43_DMA64_TXRINGLO 0x08
#define B43_DMA64_TXRINGHI 0x0C
#define B43_DMA64_TXSTATUS 0x10
#define B43_DMA64_TXSTATDPTR 0x00001FFF
#define B43_DMA64_TXSTAT 0xF0000000
#define B43_DMA64_TXSTAT_DISABLED 0x00000000
#define B43_DMA64_TXSTAT_ACTIVE 0x10000000
#define B43_DMA64_TXSTAT_IDLEWAIT 0x20000000
#define B43_DMA64_TXSTAT_STOPPED 0x30000000
#define B43_DMA64_TXSTAT_SUSP 0x40000000
#define B43_DMA64_TXERROR 0x14
#define B43_DMA64_TXERRDPTR 0x0001FFFF
#define B43_DMA64_TXERR 0xF0000000
#define B43_DMA64_TXERR_NOERR 0x00000000
#define B43_DMA64_TXERR_PROT 0x10000000
#define B43_DMA64_TXERR_UNDERRUN 0x20000000
#define B43_DMA64_TXERR_TRANSFER 0x30000000
#define B43_DMA64_TXERR_DESCREAD 0x40000000
#define B43_DMA64_TXERR_CORE 0x50000000
#define B43_DMA64_RXCTL 0x20
#define B43_DMA64_RXENABLE 0x00000001
#define B43_DMA64_RXFROFF_MASK 0x000000FE
#define B43_DMA64_RXFROFF_SHIFT 1
#define B43_DMA64_RXDIRECTFIFO 0x00000100
#define B43_DMA64_RXADDREXT_MASK 0x00030000
#define B43_DMA64_RXADDREXT_SHIFT 16
#define B43_DMA64_RXINDEX 0x24
#define B43_DMA64_RXRINGLO 0x28
#define B43_DMA64_RXRINGHI 0x2C
#define B43_DMA64_RXSTATUS 0x30
#define B43_DMA64_RXSTATDPTR 0x00001FFF
#define B43_DMA64_RXSTAT 0xF0000000
#define B43_DMA64_RXSTAT_DISABLED 0x00000000
#define B43_DMA64_RXSTAT_ACTIVE 0x10000000
#define B43_DMA64_RXSTAT_IDLEWAIT 0x20000000
#define B43_DMA64_RXSTAT_STOPPED 0x30000000
#define B43_DMA64_RXSTAT_SUSP 0x40000000
#define B43_DMA64_RXERROR 0x34
#define B43_DMA64_RXERRDPTR 0x0001FFFF
#define B43_DMA64_RXERR 0xF0000000
#define B43_DMA64_RXERR_NOERR 0x00000000
#define B43_DMA64_RXERR_PROT 0x10000000
#define B43_DMA64_RXERR_UNDERRUN 0x20000000
#define B43_DMA64_RXERR_TRANSFER 0x30000000
#define B43_DMA64_RXERR_DESCREAD 0x40000000
#define B43_DMA64_RXERR_CORE 0x50000000
/* 64-bit DMA descriptor. */
struct b43_dmadesc64 {
__le32 control0;
__le32 control1;
__le32 address_low;
__le32 address_high;
} __packed;
#define B43_DMA64_DCTL0_DTABLEEND 0x10000000
#define B43_DMA64_DCTL0_IRQ 0x20000000
#define B43_DMA64_DCTL0_FRAMEEND 0x40000000
#define B43_DMA64_DCTL0_FRAMESTART 0x80000000
#define B43_DMA64_DCTL1_BYTECNT 0x00001FFF
#define B43_DMA64_DCTL1_ADDREXT_MASK 0x00030000
#define B43_DMA64_DCTL1_ADDREXT_SHIFT 16
struct b43_dmadesc_generic {
union {
struct b43_dmadesc32 dma32;
struct b43_dmadesc64 dma64;
} __packed;
} __packed;
/* Misc DMA constants */
#define B43_DMA_RINGMEMSIZE PAGE_SIZE
#define B43_DMA0_RX_FRAMEOFFSET 30
/* DMA engine tuning knobs */
#define B43_TXRING_SLOTS 256
#define B43_RXRING_SLOTS 64
#define B43_DMA0_RX_BUFFERSIZE (B43_DMA0_RX_FRAMEOFFSET + IEEE80211_MAX_FRAME_LEN)
/* Pointer poison */
#define B43_DMA_PTR_POISON ((void *)ERR_PTR(-ENOMEM))
#define b43_dma_ptr_is_poisoned(ptr) (unlikely((ptr) == B43_DMA_PTR_POISON))
struct sk_buff;
struct b43_private;
struct b43_txstatus;
struct b43_dmadesc_meta {
/* The kernel DMA-able buffer. */
struct sk_buff *skb;
/* DMA base bus-address of the descriptor buffer. */
dma_addr_t dmaaddr;
/* ieee80211 TX status. Only used once per 802.11 frag. */
bool is_last_fragment;
};
struct b43_dmaring;
/* Lowlevel DMA operations that differ between 32bit and 64bit DMA. */
struct b43_dma_ops {
struct b43_dmadesc_generic *(*idx2desc) (struct b43_dmaring * ring,
int slot,
struct b43_dmadesc_meta **
meta);
void (*fill_descriptor) (struct b43_dmaring * ring,
struct b43_dmadesc_generic * desc,
dma_addr_t dmaaddr, u16 bufsize, int start,
int end, int irq);
void (*poke_tx) (struct b43_dmaring * ring, int slot);
void (*tx_suspend) (struct b43_dmaring * ring);
void (*tx_resume) (struct b43_dmaring * ring);
int (*get_current_rxslot) (struct b43_dmaring * ring);
void (*set_current_rxslot) (struct b43_dmaring * ring, int slot);
};
enum b43_dmatype {
B43_DMA_30BIT = 30,
B43_DMA_32BIT = 32,
B43_DMA_64BIT = 64,
};
struct b43_dmaring {
/* Lowlevel DMA ops. */
const struct b43_dma_ops *ops;
/* Kernel virtual base address of the ring memory. */
void *descbase;
/* Meta data about all descriptors. */
struct b43_dmadesc_meta *meta;
/* Cache of TX headers for each TX frame.
* This is to avoid an allocation on each TX.
* This is NULL for an RX ring.
*/
u8 *txhdr_cache;
/* (Unadjusted) DMA base bus-address of the ring memory. */
dma_addr_t dmabase;
/* Number of descriptor slots in the ring. */
int nr_slots;
/* Number of used descriptor slots. */
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
u16 rx_buffersize;
/* The MMIO base register of the DMA controller. */
u16 mmio_base;
/* DMA controller index number (0-5). */
int index;
/* Boolean. Is this a TX ring? */
bool tx;
/* The type of DMA engine used. */
enum b43_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
/* The QOS priority assigned to this ring. Only used for TX rings.
* This is the mac80211 "queue" value. */
u8 queue_prio;
struct b43_wldev *dev;
#ifdef CONFIG_B43_DEBUG
/* Maximum number of used slots. */
int max_used_slots;
/* Last time we injected a ring overflow. */
unsigned long last_injected_overflow;
/* Statistics: Number of successfully transmitted packets */
u64 nr_succeed_tx_packets;
/* Statistics: Number of failed TX packets */
u64 nr_failed_tx_packets;
/* Statistics: Total number of TX plus all retries. */
u64 nr_total_packet_tries;
#endif /* CONFIG_B43_DEBUG */
};
static inline u32 b43_dma_read(struct b43_dmaring *ring, u16 offset)
{
return b43_read32(ring->dev, ring->mmio_base + offset);
}
static inline void b43_dma_write(struct b43_dmaring *ring, u16 offset, u32 value)
{
b43_write32(ring->dev, ring->mmio_base + offset, value);
}
int b43_dma_init(struct b43_wldev *dev);
void b43_dma_free(struct b43_wldev *dev);
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
int b43_dma_tx(struct b43_wldev *dev,
struct sk_buff *skb);
void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
void b43_dma_rx(struct b43_dmaring *ring);
void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
unsigned int engine_index, bool enable);
#endif /* B43_DMA_H_ */
|
3508_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#ifndef __INCORE_DOT_H__
#define __INCORE_DOT_H__
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/workqueue.h>
#include <linux/dlm.h>
#include <linux/buffer_head.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/completion.h>
#include <linux/rbtree.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
struct gfs2_log_operations;
struct gfs2_log_element;
struct gfs2_holder;
struct gfs2_glock;
struct gfs2_quota_data;
struct gfs2_trans;
struct gfs2_ail;
struct gfs2_jdesc;
struct gfs2_sbd;
struct lm_lockops;
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
struct gfs2_log_header_host {
u64 lh_sequence; /* Sequence number of this transaction */
u32 lh_flags; /* GFS2_LOG_HEAD_... */
u32 lh_tail; /* Block number of log tail */
u32 lh_blkno;
u32 lh_hash;
};
/*
* Structure of operations that are associated with each
* type of element in the log.
*/
struct gfs2_log_operations {
void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
void (*lo_before_commit) (struct gfs2_sbd *sdp);
void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
void (*lo_before_scan) (struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, int pass);
int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
struct gfs2_log_descriptor *ld, __be64 *ptr,
int pass);
void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
const char *lo_name;
};
struct gfs2_log_element {
struct list_head le_list;
const struct gfs2_log_operations *le_ops;
};
#define GBF_FULL 1
struct gfs2_bitmap {
struct buffer_head *bi_bh;
char *bi_clone;
unsigned long bi_flags;
u32 bi_offset;
u32 bi_start;
u32 bi_len;
};
struct gfs2_rgrpd {
struct rb_node rd_node; /* Link with superblock */
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
u64 rd_addr; /* grp block disk address */
u64 rd_data0; /* first data location */
u32 rd_length; /* length of rgrp header in fs blocks */
u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free;
u32 rd_free_clone;
u32 rd_dinodes;
u64 rd_igeneration;
struct gfs2_bitmap *rd_bits;
struct gfs2_sbd *rd_sbd;
u32 rd_last_alloc;
u32 rd_flags;
#define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
#define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
#define GFS2_RDF_ERROR 0x40000000 /* error in rg */
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
};
enum gfs2_state_bits {
BH_Pinned = BH_PrivateStart,
BH_Escaped = BH_PrivateStart + 1,
};
BUFFER_FNS(Pinned, pinned)
TAS_BUFFER_FNS(Pinned, pinned)
BUFFER_FNS(Escaped, escaped)
TAS_BUFFER_FNS(Escaped, escaped)
struct gfs2_bufdata {
struct buffer_head *bd_bh;
struct gfs2_glock *bd_gl;
union {
struct list_head list_tr;
u64 blkno;
} u;
#define bd_list_tr u.list_tr
#define bd_blkno u.blkno
struct gfs2_log_element bd_le;
struct gfs2_ail *bd_ail;
struct list_head bd_ail_st_list;
struct list_head bd_ail_gl_list;
};
/*
* Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
* prefix of lock_dlm_ gets awkward.
*/
#define GDLM_STRNAME_BYTES 25
#define GDLM_LVB_SIZE 32
enum {
DFL_BLOCK_LOCKS = 0,
};
struct lm_lockname {
u64 ln_number;
unsigned int ln_type;
};
#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \
((name1)->ln_type == (name2)->ln_type))
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_flags;
#define GLOF_ASPACE 1
};
enum {
/* States */
HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
HIF_FIRST = 7,
HIF_WAIT = 10,
};
struct gfs2_holder {
struct list_head gh_list;
struct gfs2_glock *gh_gl;
struct pid *gh_owner_pid;
unsigned int gh_state;
unsigned gh_flags;
int gh_error;
unsigned long gh_iflags; /* HIF_... */
unsigned long gh_ip;
};
enum {
GLF_LOCK = 1,
GLF_DEMOTE = 3,
GLF_PENDING_DEMOTE = 4,
GLF_DEMOTE_IN_PROGRESS = 5,
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
GLF_INVALIDATE_IN_PROGRESS = 8,
GLF_REPLY_PENDING = 9,
GLF_INITIAL = 10,
GLF_FROZEN = 11,
GLF_QUEUED = 12,
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
};
struct gfs2_glock {
struct hlist_bl_node gl_list;
unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name;
atomic_t gl_ref;
spinlock_t gl_spin;
/* State fields protected by gl_spin */
unsigned int gl_state:2, /* Current state */
gl_target:2, /* Target state */
gl_demote_state:2, /* State requested by remote node */
gl_req:2, /* State in last dlm request */
gl_reply:8; /* Last reply from the dlm */
unsigned int gl_hash;
unsigned long gl_demote_time; /* time of first demote request */
long gl_hold_time;
struct list_head gl_holders;
const struct gfs2_glock_operations *gl_ops;
char gl_strname[GDLM_STRNAME_BYTES];
struct dlm_lksb gl_lksb;
char gl_lvb[32];
unsigned long gl_tchange;
void *gl_object;
struct list_head gl_lru;
struct gfs2_sbd *gl_sbd;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
atomic_t gl_revokes;
struct delayed_work gl_work;
struct work_struct gl_delete;
struct rcu_head gl_rcu;
};
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
struct gfs2_alloc {
/* Quota stuff */
struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
unsigned int al_qd_num;
u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
u32 al_alloced; /* Filled in by gfs2_alloc_*() */
/* Filled in by gfs2_inplace_reserve() */
unsigned int al_line;
char *al_file;
struct gfs2_holder al_rgd_gh;
};
enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
};
struct gfs2_inode {
struct inode i_inode;
u64 i_no_addr;
u64 i_no_formal_ino;
u64 i_generation;
u64 i_eattr;
unsigned long i_flags; /* GIF_... */
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_alloc *i_alloc;
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
struct list_head i_trunc_list;
__be64 *i_hash_cache;
u32 i_entries;
u32 i_diskflags;
u8 i_height;
u8 i_depth;
};
/*
* Since i_inode is the first element of struct gfs2_inode,
* this is effectively a cast.
*/
static inline struct gfs2_inode *GFS2_I(struct inode *inode)
{
return container_of(inode, struct gfs2_inode, i_inode);
}
static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
}
struct gfs2_file {
struct mutex f_fl_mutex;
struct gfs2_holder f_fl_gh;
};
struct gfs2_revoke_replay {
struct list_head rr_list;
u64 rr_blkno;
unsigned int rr_where;
};
enum {
QDF_USER = 0,
QDF_CHANGE = 1,
QDF_LOCKED = 2,
QDF_REFRESH = 3,
};
struct gfs2_quota_data {
struct list_head qd_list;
struct list_head qd_reclaim;
atomic_t qd_count;
u32 qd_id;
unsigned long qd_flags; /* QDF_... */
s64 qd_change;
s64 qd_change_sync;
unsigned int qd_slot;
unsigned int qd_slot_count;
struct buffer_head *qd_bh;
struct gfs2_quota_change *qd_bh_qc;
unsigned int qd_bh_count;
struct gfs2_glock *qd_gl;
struct gfs2_quota_lvb qd_qb;
u64 qd_sync_gen;
unsigned long qd_last_warn;
};
struct gfs2_trans {
unsigned long tr_ip;
unsigned int tr_blocks;
unsigned int tr_revokes;
unsigned int tr_reserved;
struct gfs2_holder tr_t_gh;
int tr_touched;
unsigned int tr_num_buf;
unsigned int tr_num_buf_new;
unsigned int tr_num_databuf_new;
unsigned int tr_num_buf_rm;
unsigned int tr_num_databuf_rm;
struct list_head tr_list_buf;
unsigned int tr_num_revoke;
unsigned int tr_num_revoke_rm;
};
struct gfs2_ail {
struct list_head ai_list;
unsigned int ai_first;
struct list_head ai_ail1_list;
struct list_head ai_ail2_list;
};
struct gfs2_journal_extent {
struct list_head extent_list;
unsigned int lblock; /* First logical block */
u64 dblock; /* First disk block */
u64 blocks;
};
struct gfs2_jdesc {
struct list_head jd_list;
struct list_head extent_list;
struct work_struct jd_work;
struct inode *jd_inode;
unsigned long jd_flags;
#define JDF_RECOVERY 1
unsigned int jd_jid;
unsigned int jd_blocks;
};
struct gfs2_statfs_change_host {
s64 sc_total;
s64 sc_free;
s64 sc_dinodes;
};
#define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
#define GFS2_QUOTA_OFF 0
#define GFS2_QUOTA_ACCOUNT 1
#define GFS2_QUOTA_ON 2
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
#define GFS2_DATA_WRITEBACK 1
#define GFS2_DATA_ORDERED 2
#define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
#define GFS2_ERRORS_WITHDRAW 0
#define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
#define GFS2_ERRORS_RO 2 /* place holder for future feature */
#define GFS2_ERRORS_PANIC 3
struct gfs2_args {
char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
unsigned int ar_spectator:1; /* Don't get a journal */
unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
unsigned int ar_debug:1; /* Oops on errors */
unsigned int ar_posix_acl:1; /* Enable posix acls */
unsigned int ar_quota:2; /* off/account/on */
unsigned int ar_suiddir:1; /* suiddir support */
unsigned int ar_data:2; /* ordered/writeback */
unsigned int ar_meta:1; /* mount metafs */
unsigned int ar_discard:1; /* discard requests */
unsigned int ar_errors:2; /* errors=withdraw | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
int ar_commit; /* Commit interval */
int ar_statfs_quantum; /* The fast statfs interval */
int ar_quota_quantum; /* The quota interval */
int ar_statfs_percent; /* The % change to force sync */
};
struct gfs2_tune {
spinlock_t gt_spin;
unsigned int gt_logd_secs;
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
unsigned int gt_quota_scale_num; /* Numerator */
unsigned int gt_quota_scale_den; /* Denominator */
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
};
enum {
SDF_JOURNAL_CHECKED = 0,
SDF_JOURNAL_LIVE = 1,
SDF_SHUTDOWN = 2,
SDF_NOBARRIERS = 3,
SDF_NORECOVERY = 4,
SDF_DEMOTE = 5,
SDF_NOJOURNALID = 6,
};
#define GFS2_FSNAME_LEN 256
struct gfs2_inum_host {
u64 no_formal_ino;
u64 no_addr;
};
struct gfs2_sb_host {
u32 sb_magic;
u32 sb_type;
u32 sb_format;
u32 sb_fs_format;
u32 sb_multihost_format;
u32 sb_bsize;
u32 sb_bsize_shift;
struct gfs2_inum_host sb_master_dir;
struct gfs2_inum_host sb_root_dir;
char sb_lockproto[GFS2_LOCKNAME_LEN];
char sb_locktable[GFS2_LOCKNAME_LEN];
};
/*
* lm_mount() return values
*
* ls_jid - the journal ID this node should use
* ls_first - this node is the first to mount the file system
* ls_lockspace - lock module's context for this file system
* ls_ops - lock module's functions
*/
struct lm_lockstruct {
int ls_jid;
unsigned int ls_first;
unsigned int ls_first_done;
unsigned int ls_nodir;
const struct lm_lockops *ls_ops;
unsigned long ls_flags;
dlm_lockspace_t *ls_dlm;
int ls_recover_jid_done;
int ls_recover_jid_status;
};
struct gfs2_sbd {
struct super_block *sd_vfs;
struct kobject sd_kobj;
unsigned long sd_flags; /* SDF_... */
struct gfs2_sb_host sd_sb;
/* Constants computed on mount */
u32 sd_fsb2bb;
u32 sd_fsb2bb_shift;
u32 sd_diptrs; /* Number of pointers in a dinode */
u32 sd_inptrs; /* Number of pointers in a indirect block */
u32 sd_jbsize; /* Size of a journaled data block */
u32 sd_hash_bsize; /* sizeof(exhash block) */
u32 sd_hash_bsize_shift;
u32 sd_hash_ptrs; /* Number of pointers in a hash block */
u32 sd_qc_per_block;
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
u32 sd_max_height; /* Max height of a file's metadata tree */
u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
u32 sd_max_jheight; /* Max height of journaled file's meta tree */
u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
struct gfs2_args sd_args; /* Mount arguments */
struct gfs2_tune sd_tune; /* Filesystem tuning structure */
/* Lock Stuff */
struct lm_lockstruct sd_lockstruct;
struct gfs2_holder sd_live_gh;
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_trans_gl;
wait_queue_head_t sd_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
/* Inode Stuff */
struct dentry *sd_master_dir;
struct dentry *sd_root_dir;
struct inode *sd_jindex;
struct inode *sd_statfs_inode;
struct inode *sd_sc_inode;
struct inode *sd_qc_inode;
struct inode *sd_rindex;
struct inode *sd_quota_inode;
/* StatFS stuff */
spinlock_t sd_statfs_spin;
struct gfs2_statfs_change_host sd_statfs_master;
struct gfs2_statfs_change_host sd_statfs_local;
int sd_statfs_force_sync;
/* Resource group stuff */
int sd_rindex_uptodate;
spinlock_t sd_rindex_spin;
struct mutex sd_rindex_mutex;
struct rb_root sd_rindex_tree;
unsigned int sd_rgrps;
unsigned int sd_max_rg_data;
/* Journal index stuff */
struct list_head sd_jindex_list;
spinlock_t sd_jindex_spin;
struct mutex sd_jindex_mutex;
unsigned int sd_journals;
struct gfs2_jdesc *sd_jdesc;
struct gfs2_holder sd_journal_gh;
struct gfs2_holder sd_jinode_gh;
struct gfs2_holder sd_sc_gh;
struct gfs2_holder sd_qc_gh;
/* Daemon stuff */
struct task_struct *sd_logd_process;
struct task_struct *sd_quotad_process;
/* Quota stuff */
struct list_head sd_quota_list;
atomic_t sd_quota_count;
struct mutex sd_quota_mutex;
wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
spinlock_t sd_trunc_lock;
unsigned int sd_quota_slots;
unsigned int sd_quota_chunks;
unsigned char **sd_quota_bitmap;
u64 sd_quota_sync_gen;
/* Log stuff */
spinlock_t sd_log_lock;
unsigned int sd_log_blks_reserved;
unsigned int sd_log_commited_buf;
unsigned int sd_log_commited_databuf;
int sd_log_commited_revoke;
atomic_t sd_log_pinned;
unsigned int sd_log_num_buf;
unsigned int sd_log_num_revoke;
unsigned int sd_log_num_rg;
unsigned int sd_log_num_databuf;
struct list_head sd_log_le_buf;
struct list_head sd_log_le_revoke;
struct list_head sd_log_le_rg;
struct list_head sd_log_le_databuf;
struct list_head sd_log_le_ordered;
atomic_t sd_log_thresh1;
atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
wait_queue_head_t sd_log_waitq;
wait_queue_head_t sd_logd_waitq;
u64 sd_log_sequence;
unsigned int sd_log_head;
unsigned int sd_log_tail;
int sd_log_idle;
struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight;
wait_queue_head_t sd_log_flush_wait;
unsigned int sd_log_flush_head;
u64 sd_log_flush_wrapped;
spinlock_t sd_ail_lock;
struct list_head sd_ail1_list;
struct list_head sd_ail2_list;
/* Replay stuff */
struct list_head sd_revoke_list;
unsigned int sd_replay_tail;
unsigned int sd_found_blocks;
unsigned int sd_found_revokes;
unsigned int sd_replayed_blocks;
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_lock;
unsigned int sd_freeze_count;
char sd_fsname[GFS2_FSNAME_LEN];
char sd_table_name[GFS2_FSNAME_LEN];
char sd_proto_name[GFS2_FSNAME_LEN];
/* Debugging crud */
unsigned long sd_last_warning;
struct dentry *debugfs_dir; /* debugfs directory */
struct dentry *debugfs_dentry_glocks; /* for debugfs */
};
#endif /* __INCORE_DOT_H__ */
|
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
#ifndef __INCORE_DOT_H__
#define __INCORE_DOT_H__
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/workqueue.h>
#include <linux/dlm.h>
#include <linux/buffer_head.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <linux/completion.h>
#include <linux/rbtree.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
struct gfs2_log_operations;
struct gfs2_log_element;
struct gfs2_holder;
struct gfs2_glock;
struct gfs2_quota_data;
struct gfs2_trans;
struct gfs2_ail;
struct gfs2_jdesc;
struct gfs2_sbd;
struct lm_lockops;
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
struct gfs2_log_header_host {
u64 lh_sequence; /* Sequence number of this transaction */
u32 lh_flags; /* GFS2_LOG_HEAD_... */
u32 lh_tail; /* Block number of log tail */
u32 lh_blkno;
u32 lh_hash;
};
/*
* Structure of operations that are associated with each
* type of element in the log.
*/
struct gfs2_log_operations {
void (*lo_add) (struct gfs2_sbd *sdp, struct gfs2_log_element *le);
void (*lo_before_commit) (struct gfs2_sbd *sdp);
void (*lo_after_commit) (struct gfs2_sbd *sdp, struct gfs2_ail *ai);
void (*lo_before_scan) (struct gfs2_jdesc *jd,
struct gfs2_log_header_host *head, int pass);
int (*lo_scan_elements) (struct gfs2_jdesc *jd, unsigned int start,
struct gfs2_log_descriptor *ld, __be64 *ptr,
int pass);
void (*lo_after_scan) (struct gfs2_jdesc *jd, int error, int pass);
const char *lo_name;
};
struct gfs2_log_element {
struct list_head le_list;
const struct gfs2_log_operations *le_ops;
};
#define GBF_FULL 1
struct gfs2_bitmap {
struct buffer_head *bi_bh;
char *bi_clone;
unsigned long bi_flags;
u32 bi_offset;
u32 bi_start;
u32 bi_len;
};
struct gfs2_rgrpd {
struct rb_node rd_node; /* Link with superblock */
struct gfs2_glock *rd_gl; /* Glock for this rgrp */
u64 rd_addr; /* grp block disk address */
u64 rd_data0; /* first data location */
u32 rd_length; /* length of rgrp header in fs blocks */
u32 rd_data; /* num of data blocks in rgrp */
u32 rd_bitbytes; /* number of bytes in data bitmaps */
u32 rd_free;
u32 rd_free_clone;
u32 rd_dinodes;
u64 rd_igeneration;
struct gfs2_bitmap *rd_bits;
struct gfs2_sbd *rd_sbd;
u32 rd_last_alloc;
u32 rd_flags;
#define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
#define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
#define GFS2_RDF_ERROR 0x40000000 /* error in rg */
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
};
enum gfs2_state_bits {
BH_Pinned = BH_PrivateStart,
BH_Escaped = BH_PrivateStart + 1,
BH_Zeronew = BH_PrivateStart + 2,
};
BUFFER_FNS(Pinned, pinned)
TAS_BUFFER_FNS(Pinned, pinned)
BUFFER_FNS(Escaped, escaped)
TAS_BUFFER_FNS(Escaped, escaped)
BUFFER_FNS(Zeronew, zeronew)
TAS_BUFFER_FNS(Zeronew, zeronew)
struct gfs2_bufdata {
struct buffer_head *bd_bh;
struct gfs2_glock *bd_gl;
union {
struct list_head list_tr;
u64 blkno;
} u;
#define bd_list_tr u.list_tr
#define bd_blkno u.blkno
struct gfs2_log_element bd_le;
struct gfs2_ail *bd_ail;
struct list_head bd_ail_st_list;
struct list_head bd_ail_gl_list;
};
/*
* Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
* prefix of lock_dlm_ gets awkward.
*/
#define GDLM_STRNAME_BYTES 25
#define GDLM_LVB_SIZE 32
enum {
DFL_BLOCK_LOCKS = 0,
};
struct lm_lockname {
u64 ln_number;
unsigned int ln_type;
};
#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \
((name1)->ln_type == (name2)->ln_type))
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
void (*go_callback) (struct gfs2_glock *gl);
const int go_type;
const unsigned long go_flags;
#define GLOF_ASPACE 1
};
enum {
/* States */
HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
HIF_FIRST = 7,
HIF_WAIT = 10,
};
struct gfs2_holder {
struct list_head gh_list;
struct gfs2_glock *gh_gl;
struct pid *gh_owner_pid;
unsigned int gh_state;
unsigned gh_flags;
int gh_error;
unsigned long gh_iflags; /* HIF_... */
unsigned long gh_ip;
};
enum {
GLF_LOCK = 1,
GLF_DEMOTE = 3,
GLF_PENDING_DEMOTE = 4,
GLF_DEMOTE_IN_PROGRESS = 5,
GLF_DIRTY = 6,
GLF_LFLUSH = 7,
GLF_INVALIDATE_IN_PROGRESS = 8,
GLF_REPLY_PENDING = 9,
GLF_INITIAL = 10,
GLF_FROZEN = 11,
GLF_QUEUED = 12,
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
};
struct gfs2_glock {
struct hlist_bl_node gl_list;
unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name;
atomic_t gl_ref;
spinlock_t gl_spin;
/* State fields protected by gl_spin */
unsigned int gl_state:2, /* Current state */
gl_target:2, /* Target state */
gl_demote_state:2, /* State requested by remote node */
gl_req:2, /* State in last dlm request */
gl_reply:8; /* Last reply from the dlm */
unsigned int gl_hash;
unsigned long gl_demote_time; /* time of first demote request */
long gl_hold_time;
struct list_head gl_holders;
const struct gfs2_glock_operations *gl_ops;
char gl_strname[GDLM_STRNAME_BYTES];
struct dlm_lksb gl_lksb;
char gl_lvb[32];
unsigned long gl_tchange;
void *gl_object;
struct list_head gl_lru;
struct gfs2_sbd *gl_sbd;
struct list_head gl_ail_list;
atomic_t gl_ail_count;
atomic_t gl_revokes;
struct delayed_work gl_work;
struct work_struct gl_delete;
struct rcu_head gl_rcu;
};
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
struct gfs2_alloc {
/* Quota stuff */
struct gfs2_quota_data *al_qd[2*MAXQUOTAS];
struct gfs2_holder al_qd_ghs[2*MAXQUOTAS];
unsigned int al_qd_num;
u32 al_requested; /* Filled in by caller of gfs2_inplace_reserve() */
u32 al_alloced; /* Filled in by gfs2_alloc_*() */
/* Filled in by gfs2_inplace_reserve() */
unsigned int al_line;
char *al_file;
struct gfs2_holder al_rgd_gh;
};
enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
};
struct gfs2_inode {
struct inode i_inode;
u64 i_no_addr;
u64 i_no_formal_ino;
u64 i_generation;
u64 i_eattr;
unsigned long i_flags; /* GIF_... */
struct gfs2_glock *i_gl; /* Move into i_gh? */
struct gfs2_holder i_iopen_gh;
struct gfs2_holder i_gh; /* for prepare/commit_write only */
struct gfs2_alloc *i_alloc;
struct gfs2_rgrpd *i_rgd;
u64 i_goal; /* goal block for allocations */
struct rw_semaphore i_rw_mutex;
struct list_head i_trunc_list;
__be64 *i_hash_cache;
u32 i_entries;
u32 i_diskflags;
u8 i_height;
u8 i_depth;
};
/*
* Since i_inode is the first element of struct gfs2_inode,
* this is effectively a cast.
*/
static inline struct gfs2_inode *GFS2_I(struct inode *inode)
{
return container_of(inode, struct gfs2_inode, i_inode);
}
static inline struct gfs2_sbd *GFS2_SB(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
}
struct gfs2_file {
struct mutex f_fl_mutex;
struct gfs2_holder f_fl_gh;
};
struct gfs2_revoke_replay {
struct list_head rr_list;
u64 rr_blkno;
unsigned int rr_where;
};
enum {
QDF_USER = 0,
QDF_CHANGE = 1,
QDF_LOCKED = 2,
QDF_REFRESH = 3,
};
struct gfs2_quota_data {
struct list_head qd_list;
struct list_head qd_reclaim;
atomic_t qd_count;
u32 qd_id;
unsigned long qd_flags; /* QDF_... */
s64 qd_change;
s64 qd_change_sync;
unsigned int qd_slot;
unsigned int qd_slot_count;
struct buffer_head *qd_bh;
struct gfs2_quota_change *qd_bh_qc;
unsigned int qd_bh_count;
struct gfs2_glock *qd_gl;
struct gfs2_quota_lvb qd_qb;
u64 qd_sync_gen;
unsigned long qd_last_warn;
};
struct gfs2_trans {
unsigned long tr_ip;
unsigned int tr_blocks;
unsigned int tr_revokes;
unsigned int tr_reserved;
struct gfs2_holder tr_t_gh;
int tr_touched;
unsigned int tr_num_buf;
unsigned int tr_num_buf_new;
unsigned int tr_num_databuf_new;
unsigned int tr_num_buf_rm;
unsigned int tr_num_databuf_rm;
struct list_head tr_list_buf;
unsigned int tr_num_revoke;
unsigned int tr_num_revoke_rm;
};
struct gfs2_ail {
struct list_head ai_list;
unsigned int ai_first;
struct list_head ai_ail1_list;
struct list_head ai_ail2_list;
};
struct gfs2_journal_extent {
struct list_head extent_list;
unsigned int lblock; /* First logical block */
u64 dblock; /* First disk block */
u64 blocks;
};
struct gfs2_jdesc {
struct list_head jd_list;
struct list_head extent_list;
struct work_struct jd_work;
struct inode *jd_inode;
unsigned long jd_flags;
#define JDF_RECOVERY 1
unsigned int jd_jid;
unsigned int jd_blocks;
};
struct gfs2_statfs_change_host {
s64 sc_total;
s64 sc_free;
s64 sc_dinodes;
};
#define GFS2_QUOTA_DEFAULT GFS2_QUOTA_OFF
#define GFS2_QUOTA_OFF 0
#define GFS2_QUOTA_ACCOUNT 1
#define GFS2_QUOTA_ON 2
#define GFS2_DATA_DEFAULT GFS2_DATA_ORDERED
#define GFS2_DATA_WRITEBACK 1
#define GFS2_DATA_ORDERED 2
#define GFS2_ERRORS_DEFAULT GFS2_ERRORS_WITHDRAW
#define GFS2_ERRORS_WITHDRAW 0
#define GFS2_ERRORS_CONTINUE 1 /* place holder for future feature */
#define GFS2_ERRORS_RO 2 /* place holder for future feature */
#define GFS2_ERRORS_PANIC 3
struct gfs2_args {
char ar_lockproto[GFS2_LOCKNAME_LEN]; /* Name of the Lock Protocol */
char ar_locktable[GFS2_LOCKNAME_LEN]; /* Name of the Lock Table */
char ar_hostdata[GFS2_LOCKNAME_LEN]; /* Host specific data */
unsigned int ar_spectator:1; /* Don't get a journal */
unsigned int ar_localflocks:1; /* Let the VFS do flock|fcntl */
unsigned int ar_debug:1; /* Oops on errors */
unsigned int ar_posix_acl:1; /* Enable posix acls */
unsigned int ar_quota:2; /* off/account/on */
unsigned int ar_suiddir:1; /* suiddir support */
unsigned int ar_data:2; /* ordered/writeback */
unsigned int ar_meta:1; /* mount metafs */
unsigned int ar_discard:1; /* discard requests */
unsigned int ar_errors:2; /* errors=withdraw | panic */
unsigned int ar_nobarrier:1; /* do not send barriers */
int ar_commit; /* Commit interval */
int ar_statfs_quantum; /* The fast statfs interval */
int ar_quota_quantum; /* The quota interval */
int ar_statfs_percent; /* The % change to force sync */
};
struct gfs2_tune {
spinlock_t gt_spin;
unsigned int gt_logd_secs;
unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
unsigned int gt_quota_scale_num; /* Numerator */
unsigned int gt_quota_scale_den; /* Denominator */
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
unsigned int gt_complain_secs;
unsigned int gt_statfs_quantum;
unsigned int gt_statfs_slow;
};
enum {
SDF_JOURNAL_CHECKED = 0,
SDF_JOURNAL_LIVE = 1,
SDF_SHUTDOWN = 2,
SDF_NOBARRIERS = 3,
SDF_NORECOVERY = 4,
SDF_DEMOTE = 5,
SDF_NOJOURNALID = 6,
};
#define GFS2_FSNAME_LEN 256
struct gfs2_inum_host {
u64 no_formal_ino;
u64 no_addr;
};
struct gfs2_sb_host {
u32 sb_magic;
u32 sb_type;
u32 sb_format;
u32 sb_fs_format;
u32 sb_multihost_format;
u32 sb_bsize;
u32 sb_bsize_shift;
struct gfs2_inum_host sb_master_dir;
struct gfs2_inum_host sb_root_dir;
char sb_lockproto[GFS2_LOCKNAME_LEN];
char sb_locktable[GFS2_LOCKNAME_LEN];
};
/*
* lm_mount() return values
*
* ls_jid - the journal ID this node should use
* ls_first - this node is the first to mount the file system
* ls_lockspace - lock module's context for this file system
* ls_ops - lock module's functions
*/
struct lm_lockstruct {
int ls_jid;
unsigned int ls_first;
unsigned int ls_first_done;
unsigned int ls_nodir;
const struct lm_lockops *ls_ops;
unsigned long ls_flags;
dlm_lockspace_t *ls_dlm;
int ls_recover_jid_done;
int ls_recover_jid_status;
};
struct gfs2_sbd {
struct super_block *sd_vfs;
struct kobject sd_kobj;
unsigned long sd_flags; /* SDF_... */
struct gfs2_sb_host sd_sb;
/* Constants computed on mount */
u32 sd_fsb2bb;
u32 sd_fsb2bb_shift;
u32 sd_diptrs; /* Number of pointers in a dinode */
u32 sd_inptrs; /* Number of pointers in a indirect block */
u32 sd_jbsize; /* Size of a journaled data block */
u32 sd_hash_bsize; /* sizeof(exhash block) */
u32 sd_hash_bsize_shift;
u32 sd_hash_ptrs; /* Number of pointers in a hash block */
u32 sd_qc_per_block;
u32 sd_max_dirres; /* Max blocks needed to add a directory entry */
u32 sd_max_height; /* Max height of a file's metadata tree */
u64 sd_heightsize[GFS2_MAX_META_HEIGHT + 1];
u32 sd_max_jheight; /* Max height of journaled file's meta tree */
u64 sd_jheightsize[GFS2_MAX_META_HEIGHT + 1];
struct gfs2_args sd_args; /* Mount arguments */
struct gfs2_tune sd_tune; /* Filesystem tuning structure */
/* Lock Stuff */
struct lm_lockstruct sd_lockstruct;
struct gfs2_holder sd_live_gh;
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_trans_gl;
wait_queue_head_t sd_glock_wait;
atomic_t sd_glock_disposal;
struct completion sd_locking_init;
/* Inode Stuff */
struct dentry *sd_master_dir;
struct dentry *sd_root_dir;
struct inode *sd_jindex;
struct inode *sd_statfs_inode;
struct inode *sd_sc_inode;
struct inode *sd_qc_inode;
struct inode *sd_rindex;
struct inode *sd_quota_inode;
/* StatFS stuff */
spinlock_t sd_statfs_spin;
struct gfs2_statfs_change_host sd_statfs_master;
struct gfs2_statfs_change_host sd_statfs_local;
int sd_statfs_force_sync;
/* Resource group stuff */
int sd_rindex_uptodate;
spinlock_t sd_rindex_spin;
struct mutex sd_rindex_mutex;
struct rb_root sd_rindex_tree;
unsigned int sd_rgrps;
unsigned int sd_max_rg_data;
/* Journal index stuff */
struct list_head sd_jindex_list;
spinlock_t sd_jindex_spin;
struct mutex sd_jindex_mutex;
unsigned int sd_journals;
struct gfs2_jdesc *sd_jdesc;
struct gfs2_holder sd_journal_gh;
struct gfs2_holder sd_jinode_gh;
struct gfs2_holder sd_sc_gh;
struct gfs2_holder sd_qc_gh;
/* Daemon stuff */
struct task_struct *sd_logd_process;
struct task_struct *sd_quotad_process;
/* Quota stuff */
struct list_head sd_quota_list;
atomic_t sd_quota_count;
struct mutex sd_quota_mutex;
wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
spinlock_t sd_trunc_lock;
unsigned int sd_quota_slots;
unsigned int sd_quota_chunks;
unsigned char **sd_quota_bitmap;
u64 sd_quota_sync_gen;
/* Log stuff */
spinlock_t sd_log_lock;
unsigned int sd_log_blks_reserved;
unsigned int sd_log_commited_buf;
unsigned int sd_log_commited_databuf;
int sd_log_commited_revoke;
atomic_t sd_log_pinned;
unsigned int sd_log_num_buf;
unsigned int sd_log_num_revoke;
unsigned int sd_log_num_rg;
unsigned int sd_log_num_databuf;
struct list_head sd_log_le_buf;
struct list_head sd_log_le_revoke;
struct list_head sd_log_le_rg;
struct list_head sd_log_le_databuf;
struct list_head sd_log_le_ordered;
atomic_t sd_log_thresh1;
atomic_t sd_log_thresh2;
atomic_t sd_log_blks_free;
wait_queue_head_t sd_log_waitq;
wait_queue_head_t sd_logd_waitq;
u64 sd_log_sequence;
unsigned int sd_log_head;
unsigned int sd_log_tail;
int sd_log_idle;
struct rw_semaphore sd_log_flush_lock;
atomic_t sd_log_in_flight;
wait_queue_head_t sd_log_flush_wait;
unsigned int sd_log_flush_head;
u64 sd_log_flush_wrapped;
spinlock_t sd_ail_lock;
struct list_head sd_ail1_list;
struct list_head sd_ail2_list;
/* Replay stuff */
struct list_head sd_revoke_list;
unsigned int sd_replay_tail;
unsigned int sd_found_blocks;
unsigned int sd_found_revokes;
unsigned int sd_replayed_blocks;
/* For quiescing the filesystem */
struct gfs2_holder sd_freeze_gh;
struct mutex sd_freeze_lock;
unsigned int sd_freeze_count;
char sd_fsname[GFS2_FSNAME_LEN];
char sd_table_name[GFS2_FSNAME_LEN];
char sd_proto_name[GFS2_FSNAME_LEN];
/* Debugging crud */
unsigned long sd_last_warning;
struct dentry *debugfs_dir; /* debugfs directory */
struct dentry *debugfs_dentry_glocks; /* for debugfs */
};
#endif /* __INCORE_DOT_H__ */
|
3522_2
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/max.h"
#include "tensorflow/lite/kernels/internal/min.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// A generic reduce method that can be used for reduce_sum, reduce_mean, etc.
// This method iterates through input data and reduce elements along the
// dimensions given in axis.
template <typename In, typename Out>
inline bool Reduce(const In* input_data, const int* input_dims,
const int* output_dims, const int input_num_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
Out reducer(const Out current, const In in),
Out* output_data) {
// Reset input iterator.
for (int idx = 0; idx < input_num_dims; ++idx) {
input_iter[idx] = 0;
}
// Iterate through input_data.
do {
size_t input_offset =
ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr);
size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims,
input_iter, num_axis, axis);
output_data[output_offset] =
reducer(output_data[output_offset], input_data[input_offset]);
} while (NextIndex(input_num_dims, input_dims, input_iter));
return true;
}
// This method parses the input 'axis' to remove duplicates and handle negative
// values, and returns a valid 'out_axis'
inline bool ResolveAxis(const int num_dims, const int* axis,
const int64_t num_axis, int* out_axis,
int* out_num_axis) {
*out_num_axis = 0; // Just in case.
// Short-circuit axis resolution for scalars; the axis will go unused.
if (num_dims == 0) {
return true;
}
// o(n^2) is fine since out_num_axis should be really small, mostly <= 4
for (int64_t idx = 0; idx < num_axis; ++idx) {
// Handle negative index. A positive index 'p_idx' can be represented as a
// negative index 'n_idx' as: n_idx = p_idx-num_dims
// eg: For num_dims=3, [0, 1, 2] is the same as [-3, -2, -1] */
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
TFLITE_DCHECK(current >= 0 && current < num_dims);
bool is_dup = false;
for (int j = 0; j < *out_num_axis; ++j) {
if (out_axis[j] == current) {
is_dup = true;
break;
}
}
if (!is_dup) {
out_axis[*out_num_axis] = current;
*out_num_axis += 1;
}
}
return true;
}
// This method expects that output_data has been initialized.
template <typename In, typename Out>
inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
const int* output_dims, const int input_num_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
Out* output_data) {
auto reducer = [](const Out current, const In in) -> Out {
const Out actual_in = static_cast<Out>(in);
return current + actual_in;
};
return Reduce<In, Out>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, axis, num_axis, input_iter, reducer,
output_data);
}
template <typename T>
inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
const T init_value, T* data) {
size_t num_elements = 1;
for (int idx = 0; idx < num_dims; ++idx) {
size_t current = static_cast<size_t>(dims[idx]);
// Overflow prevention.
if (num_elements > std::numeric_limits<size_t>::max() / current) {
return false;
}
num_elements *= current;
}
for (size_t idx = 0; idx < num_elements; ++idx) {
data[idx] = init_value;
}
return true;
}
// Computes the generic value (i.e., sum/max/min/prod) of elements across
// dimensions given in axis. It needs to pass in init_value and reducer.
template <typename T>
inline bool ReduceGeneric(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int64_t num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis,
T init_value,
T reducer(const T current, const T in)) {
// Reset output data.
if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
output_data)) {
return false;
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, reducer, output_data);
}
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis.
template <typename T, typename U>
inline bool Mean(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis, U* temp_sum) {
ruy::profiler::ScopeLabel label("Mean");
// Reset output data.
size_t num_outputs = 1;
for (int idx = 0; idx < output_num_dims; ++idx) {
size_t current = static_cast<size_t>(output_dims[idx]);
// Overflow prevention.
if (num_outputs > std::numeric_limits<size_t>::max() / current) {
return false;
}
num_outputs *= current;
}
for (size_t idx = 0; idx < num_outputs; ++idx) {
output_data[idx] = T();
temp_sum[idx] = U();
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
if (!ReduceSumImpl<T, U>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, temp_sum)) {
return false;
}
// Calculate mean by dividing output_data by num of aggregated element.
size_t num_elements_in_axis = 1;
for (int idx = 0; idx < num_resolved_axis; ++idx) {
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
// Overflow prevention.
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
return false;
}
num_elements_in_axis *= current;
}
if (num_elements_in_axis > 0) {
for (size_t idx = 0; idx < num_outputs; ++idx) {
output_data[idx] =
static_cast<T>(temp_sum[idx] / static_cast<U>(num_elements_in_axis));
}
}
return true;
}
template <typename T>
inline void Mean(const tflite::MeanParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Mean4D");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
float value = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
value += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
value / (input_width * input_height);
}
}
}
inline void Mean(const tflite::MeanParams& op_params,
const RuntimeShape& unextended_input_shape,
const uint8_t* input_data, int32_t input_zero_point,
float input_scale, const RuntimeShape& unextended_output_shape,
uint8_t* output_data, int32_t output_zero_point,
float output_scale) {
ruy::profiler::ScopeLabel label("Mean4D/Uint8");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const float num_elements_in_axis = input_width * input_height;
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
constexpr int32_t kMinValue = std::numeric_limits<uint8_t>::min();
constexpr int32_t kMaxValue = std::numeric_limits<uint8_t>::max();
int32_t bias =
output_zero_point -
static_cast<int32_t>(input_zero_point * input_scale / output_scale);
double real_scale =
static_cast<double>(input_scale / (num_elements_in_axis * output_scale));
int32_t multiplier;
int shift;
QuantizeMultiplier(real_scale, &multiplier, &shift);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
int32_t acc = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
acc += bias;
acc = std::min(std::max(acc, kMinValue), kMaxValue);
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
static_cast<uint8_t>(acc);
}
}
}
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis for quantized values.
template <typename T, typename U>
inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point,
float input_scale, const int* input_dims,
const int input_num_dims, T* output_data,
int32_t output_zero_point, float output_scale,
const int* output_dims,
const int output_num_dims, const int* axis,
const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis, U* temp_sum,
bool compute_sum) {
const bool uint8_case = std::is_same<T, uint8_t>::value;
const bool int16_case = std::is_same<T, int16_t>::value;
if (uint8_case) {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Uint8" : "Mean/Uint8");
} else if (int16_case) {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int16" : "Mean/Int16");
} else {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int8" : "Mean/Int8");
}
// Reset output data.
size_t num_outputs = 1;
for (int idx = 0; idx < output_num_dims; ++idx) {
size_t current = static_cast<size_t>(output_dims[idx]);
// Overflow prevention.
if (num_outputs > std::numeric_limits<size_t>::max() / current) {
return false;
}
num_outputs *= current;
}
for (size_t idx = 0; idx < num_outputs; ++idx) {
output_data[idx] = T();
temp_sum[idx] = U();
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
if (!ReduceSumImpl<T, U>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, temp_sum)) {
return false;
}
// Calculate mean by dividing output_data by num of aggregated element.
size_t num_elements_in_axis = 1;
for (int idx = 0; idx < num_resolved_axis; ++idx) {
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
// Overflow prevention.
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
return false;
}
num_elements_in_axis *= current;
}
if (num_elements_in_axis > 0) {
const float scale = input_scale / output_scale;
if (compute_sum) {
// TODO(b/116341117): Eliminate float and do this completely in 8bit.
const float bias =
-input_zero_point * scale * num_elements_in_axis + 0.5f;
for (size_t idx = 0; idx < num_outputs; ++idx) {
const U value =
static_cast<U>(TfLiteRound(temp_sum[idx] * scale + bias)) +
output_zero_point;
output_data[idx] = static_cast<T>(value);
}
} else {
const float bias = -input_zero_point * scale + 0.5f;
for (size_t idx = 0; idx < num_outputs; ++idx) {
float float_mean = static_cast<float>(temp_sum[idx]) /
static_cast<float>(num_elements_in_axis);
float result = TfLiteMin(
TfLiteRound(float_mean * scale + bias) + output_zero_point,
static_cast<float>(std::numeric_limits<T>::max()));
result = TfLiteMax(result,
static_cast<float>(std::numeric_limits<T>::min()));
output_data[idx] = static_cast<T>(result);
}
}
}
return true;
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/max.h"
#include "tensorflow/lite/kernels/internal/min.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// A generic reduce method that can be used for reduce_sum, reduce_mean, etc.
// This method iterates through input data and reduce elements along the
// dimensions given in axis.
template <typename In, typename Out>
inline bool Reduce(const In* input_data, const int* input_dims,
const int* output_dims, const int input_num_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
Out reducer(const Out current, const In in),
Out* output_data) {
// Reset input iterator.
for (int idx = 0; idx < input_num_dims; ++idx) {
input_iter[idx] = 0;
}
// Iterate through input_data.
do {
size_t input_offset =
ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr);
size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims,
input_iter, num_axis, axis);
output_data[output_offset] =
reducer(output_data[output_offset], input_data[input_offset]);
} while (NextIndex(input_num_dims, input_dims, input_iter));
return true;
}
// This method parses the input 'axis' to remove duplicates and handle negative
// values, and returns a valid 'out_axis'
inline bool ResolveAxis(const int num_dims, const int* axis,
const int64_t num_axis, int* out_axis,
int* out_num_axis) {
*out_num_axis = 0; // Just in case.
// Short-circuit axis resolution for scalars; the axis will go unused.
if (num_dims == 0) {
return true;
}
// o(n^2) is fine since out_num_axis should be really small, mostly <= 4
for (int64_t idx = 0; idx < num_axis; ++idx) {
// Handle negative index. A positive index 'p_idx' can be represented as a
// negative index 'n_idx' as: n_idx = p_idx-num_dims
// eg: For num_dims=3, [0, 1, 2] is the same as [-3, -2, -1] */
int current = axis[idx] < 0 ? (axis[idx] + num_dims) : axis[idx];
TFLITE_DCHECK(current >= 0 && current < num_dims);
if (current < 0 || current >= num_dims) {
return false;
}
bool is_dup = false;
for (int j = 0; j < *out_num_axis; ++j) {
if (out_axis[j] == current) {
is_dup = true;
break;
}
}
if (!is_dup) {
out_axis[*out_num_axis] = current;
*out_num_axis += 1;
}
}
return true;
}
// This method expects that output_data has been initialized.
template <typename In, typename Out>
inline bool ReduceSumImpl(const In* input_data, const int* input_dims,
const int* output_dims, const int input_num_dims,
const int output_num_dims, const int* axis,
const int num_axis, int* input_iter,
Out* output_data) {
auto reducer = [](const Out current, const In in) -> Out {
const Out actual_in = static_cast<Out>(in);
return current + actual_in;
};
return Reduce<In, Out>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, axis, num_axis, input_iter, reducer,
output_data);
}
template <typename T>
inline bool InitTensorDataForReduce(const int* dims, const int num_dims,
const T init_value, T* data) {
size_t num_elements = 1;
for (int idx = 0; idx < num_dims; ++idx) {
size_t current = static_cast<size_t>(dims[idx]);
// Overflow prevention.
if (num_elements > std::numeric_limits<size_t>::max() / current) {
return false;
}
num_elements *= current;
}
for (size_t idx = 0; idx < num_elements; ++idx) {
data[idx] = init_value;
}
return true;
}
// Computes the generic value (i.e., sum/max/min/prod) of elements across
// dimensions given in axis. It needs to pass in init_value and reducer.
template <typename T>
inline bool ReduceGeneric(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int64_t num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis,
T init_value,
T reducer(const T current, const T in)) {
// Reset output data.
if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value,
output_data)) {
return false;
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
return Reduce<T, T>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, reducer, output_data);
}
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis.
template <typename T, typename U>
inline bool Mean(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis, U* temp_sum) {
ruy::profiler::ScopeLabel label("Mean");
// Reset output data.
size_t num_outputs = 1;
for (int idx = 0; idx < output_num_dims; ++idx) {
size_t current = static_cast<size_t>(output_dims[idx]);
// Overflow prevention.
if (num_outputs > std::numeric_limits<size_t>::max() / current) {
return false;
}
num_outputs *= current;
}
for (size_t idx = 0; idx < num_outputs; ++idx) {
output_data[idx] = T();
temp_sum[idx] = U();
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
if (!ReduceSumImpl<T, U>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, temp_sum)) {
return false;
}
// Calculate mean by dividing output_data by num of aggregated element.
size_t num_elements_in_axis = 1;
for (int idx = 0; idx < num_resolved_axis; ++idx) {
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
// Overflow prevention.
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
return false;
}
num_elements_in_axis *= current;
}
if (num_elements_in_axis > 0) {
for (size_t idx = 0; idx < num_outputs; ++idx) {
output_data[idx] =
static_cast<T>(temp_sum[idx] / static_cast<U>(num_elements_in_axis));
}
}
return true;
}
template <typename T>
inline void Mean(const tflite::MeanParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Mean4D");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
float value = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
value += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
value / (input_width * input_height);
}
}
}
inline void Mean(const tflite::MeanParams& op_params,
const RuntimeShape& unextended_input_shape,
const uint8_t* input_data, int32_t input_zero_point,
float input_scale, const RuntimeShape& unextended_output_shape,
uint8_t* output_data, int32_t output_zero_point,
float output_scale) {
ruy::profiler::ScopeLabel label("Mean4D/Uint8");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const float num_elements_in_axis = input_width * input_height;
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
constexpr int32_t kMinValue = std::numeric_limits<uint8_t>::min();
constexpr int32_t kMaxValue = std::numeric_limits<uint8_t>::max();
int32_t bias =
output_zero_point -
static_cast<int32_t>(input_zero_point * input_scale / output_scale);
double real_scale =
static_cast<double>(input_scale / (num_elements_in_axis * output_scale));
int32_t multiplier;
int shift;
QuantizeMultiplier(real_scale, &multiplier, &shift);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
int32_t acc = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
acc += bias;
acc = std::min(std::max(acc, kMinValue), kMaxValue);
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
static_cast<uint8_t>(acc);
}
}
}
// Computes the mean of elements across dimensions given in axis.
// It does so in two stages, first calculates the sum of elements along the axis
// then divides it by the number of element in axis for quantized values.
template <typename T, typename U>
inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point,
float input_scale, const int* input_dims,
const int input_num_dims, T* output_data,
int32_t output_zero_point, float output_scale,
const int* output_dims,
const int output_num_dims, const int* axis,
const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis, U* temp_sum,
bool compute_sum) {
const bool uint8_case = std::is_same<T, uint8_t>::value;
const bool int16_case = std::is_same<T, int16_t>::value;
if (uint8_case) {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Uint8" : "Mean/Uint8");
} else if (int16_case) {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int16" : "Mean/Int16");
} else {
ruy::profiler::ScopeLabel label(compute_sum ? "Sum/Int8" : "Mean/Int8");
}
// Reset output data.
size_t num_outputs = 1;
for (int idx = 0; idx < output_num_dims; ++idx) {
size_t current = static_cast<size_t>(output_dims[idx]);
// Overflow prevention.
if (num_outputs > std::numeric_limits<size_t>::max() / current) {
return false;
}
num_outputs *= current;
}
for (size_t idx = 0; idx < num_outputs; ++idx) {
output_data[idx] = T();
temp_sum[idx] = U();
}
// Resolve axis.
int num_resolved_axis = 0;
if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis,
&num_resolved_axis)) {
return false;
}
if (!ReduceSumImpl<T, U>(input_data, input_dims, output_dims, input_num_dims,
output_num_dims, resolved_axis, num_resolved_axis,
temp_index, temp_sum)) {
return false;
}
// Calculate mean by dividing output_data by num of aggregated element.
size_t num_elements_in_axis = 1;
for (int idx = 0; idx < num_resolved_axis; ++idx) {
size_t current = static_cast<size_t>(input_dims[resolved_axis[idx]]);
// Overflow prevention.
if (current > (std::numeric_limits<size_t>::max() / num_elements_in_axis)) {
return false;
}
num_elements_in_axis *= current;
}
if (num_elements_in_axis > 0) {
const float scale = input_scale / output_scale;
if (compute_sum) {
// TODO(b/116341117): Eliminate float and do this completely in 8bit.
const float bias =
-input_zero_point * scale * num_elements_in_axis + 0.5f;
for (size_t idx = 0; idx < num_outputs; ++idx) {
const U value =
static_cast<U>(TfLiteRound(temp_sum[idx] * scale + bias)) +
output_zero_point;
output_data[idx] = static_cast<T>(value);
}
} else {
const float bias = -input_zero_point * scale + 0.5f;
for (size_t idx = 0; idx < num_outputs; ++idx) {
float float_mean = static_cast<float>(temp_sum[idx]) /
static_cast<float>(num_elements_in_axis);
float result = TfLiteMin(
TfLiteRound(float_mean * scale + bias) + output_zero_point,
static_cast<float>(std::numeric_limits<T>::max()));
result = TfLiteMax(result,
static_cast<float>(std::numeric_limits<T>::min()));
output_data[idx] = static_cast<T>(result);
}
}
}
return true;
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_
|
4155_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* $OpenBSD: monitor.h,v 1.19 2015/01/19 19:52:16 markus Exp $ */
/*
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MONITOR_H_
#define _MONITOR_H_
/* Please keep *_REQ_* values on even numbers and *_ANS_* on odd numbers */
enum monitor_reqtype {
MONITOR_REQ_MODULI = 0, MONITOR_ANS_MODULI = 1,
MONITOR_REQ_FREE = 2,
MONITOR_REQ_AUTHSERV = 4,
MONITOR_REQ_SIGN = 6, MONITOR_ANS_SIGN = 7,
MONITOR_REQ_PWNAM = 8, MONITOR_ANS_PWNAM = 9,
MONITOR_REQ_AUTH2_READ_BANNER = 10, MONITOR_ANS_AUTH2_READ_BANNER = 11,
MONITOR_REQ_AUTHPASSWORD = 12, MONITOR_ANS_AUTHPASSWORD = 13,
MONITOR_REQ_BSDAUTHQUERY = 14, MONITOR_ANS_BSDAUTHQUERY = 15,
MONITOR_REQ_BSDAUTHRESPOND = 16, MONITOR_ANS_BSDAUTHRESPOND = 17,
MONITOR_REQ_SKEYQUERY = 18, MONITOR_ANS_SKEYQUERY = 19,
MONITOR_REQ_SKEYRESPOND = 20, MONITOR_ANS_SKEYRESPOND = 21,
MONITOR_REQ_KEYALLOWED = 22, MONITOR_ANS_KEYALLOWED = 23,
MONITOR_REQ_KEYVERIFY = 24, MONITOR_ANS_KEYVERIFY = 25,
MONITOR_REQ_KEYEXPORT = 26,
MONITOR_REQ_PTY = 28, MONITOR_ANS_PTY = 29,
MONITOR_REQ_PTYCLEANUP = 30,
MONITOR_REQ_SESSKEY = 32, MONITOR_ANS_SESSKEY = 33,
MONITOR_REQ_SESSID = 34,
MONITOR_REQ_RSAKEYALLOWED = 36, MONITOR_ANS_RSAKEYALLOWED = 37,
MONITOR_REQ_RSACHALLENGE = 38, MONITOR_ANS_RSACHALLENGE = 39,
MONITOR_REQ_RSARESPONSE = 40, MONITOR_ANS_RSARESPONSE = 41,
MONITOR_REQ_GSSSETUP = 42, MONITOR_ANS_GSSSETUP = 43,
MONITOR_REQ_GSSSTEP = 44, MONITOR_ANS_GSSSTEP = 45,
MONITOR_REQ_GSSUSEROK = 46, MONITOR_ANS_GSSUSEROK = 47,
MONITOR_REQ_GSSCHECKMIC = 48, MONITOR_ANS_GSSCHECKMIC = 49,
MONITOR_REQ_TERM = 50,
};
struct mm_master;
struct monitor {
int m_recvfd;
int m_sendfd;
int m_log_recvfd;
int m_log_sendfd;
struct mm_master *m_zback;
struct mm_master *m_zlib;
struct kex **m_pkex;
pid_t m_pid;
};
struct monitor *monitor_init(void);
void monitor_reinit(struct monitor *);
void monitor_sync(struct monitor *);
struct Authctxt;
void monitor_child_preauth(struct Authctxt *, struct monitor *);
void monitor_child_postauth(struct monitor *);
struct mon_table;
int monitor_read(struct monitor*, struct mon_table *, struct mon_table **);
/* Prototypes for request sending and receiving */
void mm_request_send(int, enum monitor_reqtype, Buffer *);
void mm_request_receive(int, Buffer *);
void mm_request_receive_expect(int, enum monitor_reqtype, Buffer *);
#endif /* _MONITOR_H_ */
|
/* $OpenBSD: monitor.h,v 1.20 2016/09/28 16:33:07 djm Exp $ */
/*
* Copyright 2002 Niels Provos <provos@citi.umich.edu>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _MONITOR_H_
#define _MONITOR_H_
/* Please keep *_REQ_* values on even numbers and *_ANS_* on odd numbers */
enum monitor_reqtype {
MONITOR_REQ_MODULI = 0, MONITOR_ANS_MODULI = 1,
MONITOR_REQ_FREE = 2,
MONITOR_REQ_AUTHSERV = 4,
MONITOR_REQ_SIGN = 6, MONITOR_ANS_SIGN = 7,
MONITOR_REQ_PWNAM = 8, MONITOR_ANS_PWNAM = 9,
MONITOR_REQ_AUTH2_READ_BANNER = 10, MONITOR_ANS_AUTH2_READ_BANNER = 11,
MONITOR_REQ_AUTHPASSWORD = 12, MONITOR_ANS_AUTHPASSWORD = 13,
MONITOR_REQ_BSDAUTHQUERY = 14, MONITOR_ANS_BSDAUTHQUERY = 15,
MONITOR_REQ_BSDAUTHRESPOND = 16, MONITOR_ANS_BSDAUTHRESPOND = 17,
MONITOR_REQ_SKEYQUERY = 18, MONITOR_ANS_SKEYQUERY = 19,
MONITOR_REQ_SKEYRESPOND = 20, MONITOR_ANS_SKEYRESPOND = 21,
MONITOR_REQ_KEYALLOWED = 22, MONITOR_ANS_KEYALLOWED = 23,
MONITOR_REQ_KEYVERIFY = 24, MONITOR_ANS_KEYVERIFY = 25,
MONITOR_REQ_KEYEXPORT = 26,
MONITOR_REQ_PTY = 28, MONITOR_ANS_PTY = 29,
MONITOR_REQ_PTYCLEANUP = 30,
MONITOR_REQ_SESSKEY = 32, MONITOR_ANS_SESSKEY = 33,
MONITOR_REQ_SESSID = 34,
MONITOR_REQ_RSAKEYALLOWED = 36, MONITOR_ANS_RSAKEYALLOWED = 37,
MONITOR_REQ_RSACHALLENGE = 38, MONITOR_ANS_RSACHALLENGE = 39,
MONITOR_REQ_RSARESPONSE = 40, MONITOR_ANS_RSARESPONSE = 41,
MONITOR_REQ_GSSSETUP = 42, MONITOR_ANS_GSSSETUP = 43,
MONITOR_REQ_GSSSTEP = 44, MONITOR_ANS_GSSSTEP = 45,
MONITOR_REQ_GSSUSEROK = 46, MONITOR_ANS_GSSUSEROK = 47,
MONITOR_REQ_GSSCHECKMIC = 48, MONITOR_ANS_GSSCHECKMIC = 49,
MONITOR_REQ_TERM = 50,
};
struct monitor {
int m_recvfd;
int m_sendfd;
int m_log_recvfd;
int m_log_sendfd;
struct kex **m_pkex;
pid_t m_pid;
};
struct monitor *monitor_init(void);
void monitor_reinit(struct monitor *);
struct Authctxt;
void monitor_child_preauth(struct Authctxt *, struct monitor *);
void monitor_child_postauth(struct monitor *);
struct mon_table;
int monitor_read(struct monitor*, struct mon_table *, struct mon_table **);
/* Prototypes for request sending and receiving */
void mm_request_send(int, enum monitor_reqtype, Buffer *);
void mm_request_receive(int, Buffer *);
void mm_request_receive_expect(int, enum monitor_reqtype, Buffer *);
#endif /* _MONITOR_H_ */
|
4767_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License.
obtain a copy of the License at
http://www.imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore private memory methods.
*/
#ifndef _MAGICKCORE_MEMORY_PRIVATE_H
#define _MAGICKCORE_MEMORY_PRIVATE_H
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(MAGICK_TARGET_CPU) && (MAGICK_TARGET_CPU == powerpc)
# define CACHE_LINE_SIZE 128
#else
# define CACHE_LINE_SIZE 64
#endif
#define CacheAlign(size) ((size) < CACHE_LINE_SIZE ? CACHE_LINE_SIZE : (size))
#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
#if !defined(__ICC)
#define MagickAssumeAligned(address) \
__builtin_assume_aligned((address),CACHE_LINE_SIZE)
#else
#define MagickAssumeAligned(address) (address)
#endif
#else
#define MagickAssumeAligned(address) (address)
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
/*
Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License.
obtain a copy of the License at
http://www.imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore private memory methods.
*/
#ifndef _MAGICKCORE_MEMORY_PRIVATE_H
#define _MAGICKCORE_MEMORY_PRIVATE_H
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#if defined(MAGICK_TARGET_CPU) && (MAGICK_TARGET_CPU == powerpc)
# define CACHE_LINE_SIZE 128
#else
# define CACHE_LINE_SIZE 64
#endif
#define CacheAlign(size) ((size) < CACHE_LINE_SIZE ? CACHE_LINE_SIZE : (size))
#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 6))
#if !defined(__ICC)
#define MagickAssumeAligned(address) \
__builtin_assume_aligned((address),CACHE_LINE_SIZE)
#else
#define MagickAssumeAligned(address) (address)
#endif
#else
#define MagickAssumeAligned(address) (address)
#endif
MagickExport MagickBooleanType
HeapOverflowSanityCheck(const size_t,const size_t) magick_alloc_sizes(1,2);
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
4788_3
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifndef __C_SECURITY_TLS_H__
#define __C_SECURITY_TLS_H__
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This header should not be compiled without HAVE_GNUTLS defined"
#endif
#include <rfb/CSecurity.h>
#include <rfb/SSecurityVeNCrypt.h>
#include <rfb/Security.h>
#include <rfb/UserMsgBox.h>
#include <rdr/InStream.h>
#include <rdr/OutStream.h>
#include <gnutls/gnutls.h>
namespace rfb {
class UserMsgBox;
class CSecurityTLS : public CSecurity {
public:
CSecurityTLS(bool _anon);
virtual ~CSecurityTLS();
virtual bool processMsg(CConnection* cc);
virtual int getType() const { return anon ? secTypeTLSNone : secTypeX509None; }
virtual const char* description() const
{ return anon ? "TLS Encryption without VncAuth" : "X509 Encryption without VncAuth"; }
static void setDefaults();
static StringParameter X509CA;
static StringParameter X509CRL;
static UserMsgBox *msg;
protected:
void shutdown(bool needbye);
void freeResources();
void setParam();
void checkSession();
CConnection *client;
private:
static void initGlobal();
gnutls_session_t session;
gnutls_anon_client_credentials_t anon_cred;
gnutls_certificate_credentials_t cert_cred;
bool anon;
char *cafile, *crlfile;
rdr::InStream* fis;
rdr::OutStream* fos;
};
}
#endif
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifndef __C_SECURITY_TLS_H__
#define __C_SECURITY_TLS_H__
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This header should not be compiled without HAVE_GNUTLS defined"
#endif
#include <rfb/CSecurity.h>
#include <rfb/SSecurityVeNCrypt.h>
#include <rfb/Security.h>
#include <rfb/UserMsgBox.h>
#include <rdr/InStream.h>
#include <rdr/OutStream.h>
#include <gnutls/gnutls.h>
namespace rfb {
class UserMsgBox;
class CSecurityTLS : public CSecurity {
public:
CSecurityTLS(bool _anon);
virtual ~CSecurityTLS();
virtual bool processMsg(CConnection* cc);
virtual int getType() const { return anon ? secTypeTLSNone : secTypeX509None; }
virtual const char* description() const
{ return anon ? "TLS Encryption without VncAuth" : "X509 Encryption without VncAuth"; }
static void setDefaults();
static StringParameter X509CA;
static StringParameter X509CRL;
static UserMsgBox *msg;
protected:
void shutdown(bool needbye);
void freeResources();
void setParam();
void checkSession();
CConnection *client;
private:
gnutls_session_t session;
gnutls_anon_client_credentials_t anon_cred;
gnutls_certificate_credentials_t cert_cred;
bool anon;
char *cafile, *crlfile;
rdr::InStream* fis;
rdr::OutStream* fos;
};
}
#endif
|
4842_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifndef __S_SECURITY_TLS_H__
#define __S_SECURITY_TLS_H__
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This header should not be included without HAVE_GNUTLS defined"
#endif
#include <rfb/SSecurity.h>
#include <rfb/SSecurityVeNCrypt.h>
#include <rdr/InStream.h>
#include <rdr/OutStream.h>
#include <gnutls/gnutls.h>
namespace rfb {
class SSecurityTLS : public SSecurity {
public:
SSecurityTLS(bool _anon);
virtual ~SSecurityTLS();
virtual bool processMsg(SConnection* sc);
virtual const char* getUserName() const {return 0;}
virtual int getType() const { return anon ? secTypeTLSNone : secTypeX509None;}
static StringParameter X509_CertFile;
static StringParameter X509_KeyFile;
protected:
void shutdown();
void setParams(gnutls_session_t session);
private:
static void initGlobal();
gnutls_session_t session;
gnutls_dh_params_t dh_params;
gnutls_anon_server_credentials_t anon_cred;
gnutls_certificate_credentials_t cert_cred;
char *keyfile, *certfile;
int type;
bool anon;
rdr::InStream* fis;
rdr::OutStream* fos;
};
}
#endif
|
/*
* Copyright (C) 2004 Red Hat Inc.
* Copyright (C) 2005 Martin Koegler
* Copyright (C) 2010 TigerVNC Team
*
* This is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This software is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this software; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
* USA.
*/
#ifndef __S_SECURITY_TLS_H__
#define __S_SECURITY_TLS_H__
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#ifndef HAVE_GNUTLS
#error "This header should not be included without HAVE_GNUTLS defined"
#endif
#include <rfb/SSecurity.h>
#include <rfb/SSecurityVeNCrypt.h>
#include <rdr/InStream.h>
#include <rdr/OutStream.h>
#include <gnutls/gnutls.h>
namespace rfb {
class SSecurityTLS : public SSecurity {
public:
SSecurityTLS(bool _anon);
virtual ~SSecurityTLS();
virtual bool processMsg(SConnection* sc);
virtual const char* getUserName() const {return 0;}
virtual int getType() const { return anon ? secTypeTLSNone : secTypeX509None;}
static StringParameter X509_CertFile;
static StringParameter X509_KeyFile;
protected:
void shutdown();
void setParams(gnutls_session_t session);
private:
gnutls_session_t session;
gnutls_dh_params_t dh_params;
gnutls_anon_server_credentials_t anon_cred;
gnutls_certificate_credentials_t cert_cred;
char *keyfile, *certfile;
int type;
bool anon;
rdr::InStream* fis;
rdr::OutStream* fos;
};
}
#endif
|
4842_3
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef LIST_OBJECTS_H
#define LIST_OBJECTS_H
typedef void (*show_commit_fn)(struct commit *, void *);
typedef void (*show_object_fn)(struct object *, struct strbuf *, const char *, void *);
void traverse_commit_list(struct rev_info *, show_commit_fn, show_object_fn, void *);
typedef void (*show_edge_fn)(struct commit *);
void mark_edges_uninteresting(struct rev_info *, show_edge_fn);
#endif
|
#ifndef LIST_OBJECTS_H
#define LIST_OBJECTS_H
typedef void (*show_commit_fn)(struct commit *, void *);
typedef void (*show_object_fn)(struct object *, const char *, void *);
void traverse_commit_list(struct rev_info *, show_commit_fn, show_object_fn, void *);
typedef void (*show_edge_fn)(struct commit *);
void mark_edges_uninteresting(struct rev_info *, show_edge_fn);
#endif
|
4946_3
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef REVISION_H
#define REVISION_H
#include "parse-options.h"
#include "grep.h"
#include "notes.h"
#include "commit.h"
#include "diff.h"
/* Remember to update object flag allocation in object.h */
#define SEEN (1u<<0)
#define UNINTERESTING (1u<<1)
#define TREESAME (1u<<2)
#define SHOWN (1u<<3)
#define TMP_MARK (1u<<4) /* for isolated cases; clean after use */
#define BOUNDARY (1u<<5)
#define CHILD_SHOWN (1u<<6)
#define ADDED (1u<<7) /* Parents already parsed and added? */
#define SYMMETRIC_LEFT (1u<<8)
#define PATCHSAME (1u<<9)
#define BOTTOM (1u<<10)
#define TRACK_LINEAR (1u<<26)
#define ALL_REV_FLAGS (((1u<<11)-1) | TRACK_LINEAR)
#define DECORATE_SHORT_REFS 1
#define DECORATE_FULL_REFS 2
struct rev_info;
struct log_info;
struct string_list;
struct saved_parents;
struct rev_cmdline_info {
unsigned int nr;
unsigned int alloc;
struct rev_cmdline_entry {
struct object *item;
const char *name;
enum {
REV_CMD_REF,
REV_CMD_PARENTS_ONLY,
REV_CMD_LEFT,
REV_CMD_RIGHT,
REV_CMD_MERGE_BASE,
REV_CMD_REV
} whence;
unsigned flags;
} *rev;
};
#define REVISION_WALK_WALK 0
#define REVISION_WALK_NO_WALK_SORTED 1
#define REVISION_WALK_NO_WALK_UNSORTED 2
struct rev_info {
/* Starting list */
struct commit_list *commits;
struct object_array pending;
/* Parents of shown commits */
struct object_array boundary_commits;
/* The end-points specified by the end user */
struct rev_cmdline_info cmdline;
/* excluding from --branches, --refs, etc. expansion */
struct string_list *ref_excludes;
/* Basic information */
const char *prefix;
const char *def;
struct pathspec prune_data;
/* topo-sort */
enum rev_sort_order sort_order;
unsigned int early_output:1,
ignore_missing:1,
ignore_missing_links:1;
/* Traversal flags */
unsigned int dense:1,
prune:1,
no_walk:2,
show_all:1,
remove_empty_trees:1,
simplify_history:1,
topo_order:1,
simplify_merges:1,
simplify_by_decoration:1,
tag_objects:1,
tree_objects:1,
blob_objects:1,
verify_objects:1,
edge_hint:1,
edge_hint_aggressive:1,
limited:1,
unpacked:1,
boundary:2,
count:1,
left_right:1,
left_only:1,
right_only:1,
rewrite_parents:1,
print_parents:1,
show_source:1,
show_decorations:1,
reverse:1,
reverse_output_stage:1,
cherry_pick:1,
cherry_mark:1,
bisect:1,
ancestry_path:1,
first_parent_only:1,
line_level_traverse:1;
/* Diff flags */
unsigned int diff:1,
full_diff:1,
show_root_diff:1,
no_commit_id:1,
verbose_header:1,
ignore_merges:1,
combine_merges:1,
dense_combined_merges:1,
always_show_header:1;
/* Format info */
unsigned int shown_one:1,
shown_dashes:1,
show_merge:1,
show_notes:1,
show_notes_given:1,
show_signature:1,
pretty_given:1,
abbrev_commit:1,
abbrev_commit_given:1,
zero_commit:1,
use_terminator:1,
missing_newline:1,
date_mode_explicit:1,
preserve_subject:1;
unsigned int disable_stdin:1;
unsigned int leak_pending:1;
/* --show-linear-break */
unsigned int track_linear:1,
track_first_time:1,
linear:1;
struct date_mode date_mode;
unsigned int abbrev;
enum cmit_fmt commit_format;
struct log_info *loginfo;
int nr, total;
const char *mime_boundary;
const char *patch_suffix;
int numbered_files;
int reroll_count;
char *message_id;
struct ident_split from_ident;
struct string_list *ref_message_ids;
int add_signoff;
const char *extra_headers;
const char *log_reencode;
const char *subject_prefix;
int no_inline;
int show_log_size;
struct string_list *mailmap;
/* Filter by commit log message */
struct grep_opt grep_filter;
/* Negate the match of grep_filter */
int invert_grep;
/* Display history graph */
struct git_graph *graph;
/* special limits */
int skip_count;
int max_count;
unsigned long max_age;
unsigned long min_age;
int min_parents;
int max_parents;
int (*include_check)(struct commit *, void *);
void *include_check_data;
/* diff info for patches and for paths limiting */
struct diff_options diffopt;
struct diff_options pruning;
struct reflog_walk_info *reflog_info;
struct decoration children;
struct decoration merge_simplification;
struct decoration treesame;
/* notes-specific options: which refs to show */
struct display_notes_opt notes_opt;
/* commit counts */
int count_left;
int count_right;
int count_same;
/* line level range that we are chasing */
struct decoration line_log_data;
/* copies of the parent lists, for --full-diff display */
struct saved_parents *saved_parents_slab;
struct commit_list *previous_parents;
const char *break_bar;
};
extern int ref_excluded(struct string_list *, const char *path);
void clear_ref_exclusion(struct string_list **);
void add_ref_exclusion(struct string_list **, const char *exclude);
#define REV_TREE_SAME 0
#define REV_TREE_NEW 1 /* Only new files */
#define REV_TREE_OLD 2 /* Only files removed */
#define REV_TREE_DIFFERENT 3 /* Mixed changes */
/* revision.c */
typedef void (*show_early_output_fn_t)(struct rev_info *, struct commit_list *);
extern volatile show_early_output_fn_t show_early_output;
struct setup_revision_opt {
const char *def;
void (*tweak)(struct rev_info *, struct setup_revision_opt *);
const char *submodule;
int assume_dashdash;
unsigned revarg_opt;
};
extern void init_revisions(struct rev_info *revs, const char *prefix);
extern int setup_revisions(int argc, const char **argv, struct rev_info *revs,
struct setup_revision_opt *);
extern void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
const struct option *options,
const char * const usagestr[]);
#define REVARG_CANNOT_BE_FILENAME 01
#define REVARG_COMMITTISH 02
extern int handle_revision_arg(const char *arg, struct rev_info *revs,
int flags, unsigned revarg_opt);
extern void reset_revision_walk(void);
extern int prepare_revision_walk(struct rev_info *revs);
extern struct commit *get_revision(struct rev_info *revs);
extern char *get_revision_mark(const struct rev_info *revs,
const struct commit *commit);
extern void put_revision_mark(const struct rev_info *revs,
const struct commit *commit);
extern void mark_parents_uninteresting(struct commit *commit);
extern void mark_tree_uninteresting(struct tree *tree);
char *path_name(struct strbuf *path, const char *name);
extern void show_object_with_name(FILE *, struct object *,
struct strbuf *, const char *);
extern void add_pending_object(struct rev_info *revs,
struct object *obj, const char *name);
extern void add_pending_sha1(struct rev_info *revs,
const char *name, const unsigned char *sha1,
unsigned int flags);
extern void add_head_to_pending(struct rev_info *);
extern void add_reflogs_to_pending(struct rev_info *, unsigned int flags);
extern void add_index_objects_to_pending(struct rev_info *, unsigned int flags);
enum commit_action {
commit_ignore,
commit_show,
commit_error
};
extern enum commit_action get_commit_action(struct rev_info *revs,
struct commit *commit);
extern enum commit_action simplify_commit(struct rev_info *revs,
struct commit *commit);
enum rewrite_result {
rewrite_one_ok,
rewrite_one_noparents,
rewrite_one_error
};
typedef enum rewrite_result (*rewrite_parent_fn_t)(struct rev_info *revs, struct commit **pp);
extern int rewrite_parents(struct rev_info *revs, struct commit *commit,
rewrite_parent_fn_t rewrite_parent);
/*
* The log machinery saves the original parent list so that
* get_saved_parents() can later tell what the real parents of the
* commits are, when commit->parents has been modified by history
* simpification.
*
* get_saved_parents() will transparently return commit->parents if
* history simplification is off.
*/
extern struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit);
#endif
|
#ifndef REVISION_H
#define REVISION_H
#include "parse-options.h"
#include "grep.h"
#include "notes.h"
#include "commit.h"
#include "diff.h"
/* Remember to update object flag allocation in object.h */
#define SEEN (1u<<0)
#define UNINTERESTING (1u<<1)
#define TREESAME (1u<<2)
#define SHOWN (1u<<3)
#define TMP_MARK (1u<<4) /* for isolated cases; clean after use */
#define BOUNDARY (1u<<5)
#define CHILD_SHOWN (1u<<6)
#define ADDED (1u<<7) /* Parents already parsed and added? */
#define SYMMETRIC_LEFT (1u<<8)
#define PATCHSAME (1u<<9)
#define BOTTOM (1u<<10)
#define TRACK_LINEAR (1u<<26)
#define ALL_REV_FLAGS (((1u<<11)-1) | TRACK_LINEAR)
#define DECORATE_SHORT_REFS 1
#define DECORATE_FULL_REFS 2
struct rev_info;
struct log_info;
struct string_list;
struct saved_parents;
struct rev_cmdline_info {
unsigned int nr;
unsigned int alloc;
struct rev_cmdline_entry {
struct object *item;
const char *name;
enum {
REV_CMD_REF,
REV_CMD_PARENTS_ONLY,
REV_CMD_LEFT,
REV_CMD_RIGHT,
REV_CMD_MERGE_BASE,
REV_CMD_REV
} whence;
unsigned flags;
} *rev;
};
#define REVISION_WALK_WALK 0
#define REVISION_WALK_NO_WALK_SORTED 1
#define REVISION_WALK_NO_WALK_UNSORTED 2
struct rev_info {
/* Starting list */
struct commit_list *commits;
struct object_array pending;
/* Parents of shown commits */
struct object_array boundary_commits;
/* The end-points specified by the end user */
struct rev_cmdline_info cmdline;
/* excluding from --branches, --refs, etc. expansion */
struct string_list *ref_excludes;
/* Basic information */
const char *prefix;
const char *def;
struct pathspec prune_data;
/* topo-sort */
enum rev_sort_order sort_order;
unsigned int early_output:1,
ignore_missing:1,
ignore_missing_links:1;
/* Traversal flags */
unsigned int dense:1,
prune:1,
no_walk:2,
show_all:1,
remove_empty_trees:1,
simplify_history:1,
topo_order:1,
simplify_merges:1,
simplify_by_decoration:1,
tag_objects:1,
tree_objects:1,
blob_objects:1,
verify_objects:1,
edge_hint:1,
edge_hint_aggressive:1,
limited:1,
unpacked:1,
boundary:2,
count:1,
left_right:1,
left_only:1,
right_only:1,
rewrite_parents:1,
print_parents:1,
show_source:1,
show_decorations:1,
reverse:1,
reverse_output_stage:1,
cherry_pick:1,
cherry_mark:1,
bisect:1,
ancestry_path:1,
first_parent_only:1,
line_level_traverse:1;
/* Diff flags */
unsigned int diff:1,
full_diff:1,
show_root_diff:1,
no_commit_id:1,
verbose_header:1,
ignore_merges:1,
combine_merges:1,
dense_combined_merges:1,
always_show_header:1;
/* Format info */
unsigned int shown_one:1,
shown_dashes:1,
show_merge:1,
show_notes:1,
show_notes_given:1,
show_signature:1,
pretty_given:1,
abbrev_commit:1,
abbrev_commit_given:1,
zero_commit:1,
use_terminator:1,
missing_newline:1,
date_mode_explicit:1,
preserve_subject:1;
unsigned int disable_stdin:1;
unsigned int leak_pending:1;
/* --show-linear-break */
unsigned int track_linear:1,
track_first_time:1,
linear:1;
struct date_mode date_mode;
unsigned int abbrev;
enum cmit_fmt commit_format;
struct log_info *loginfo;
int nr, total;
const char *mime_boundary;
const char *patch_suffix;
int numbered_files;
int reroll_count;
char *message_id;
struct ident_split from_ident;
struct string_list *ref_message_ids;
int add_signoff;
const char *extra_headers;
const char *log_reencode;
const char *subject_prefix;
int no_inline;
int show_log_size;
struct string_list *mailmap;
/* Filter by commit log message */
struct grep_opt grep_filter;
/* Negate the match of grep_filter */
int invert_grep;
/* Display history graph */
struct git_graph *graph;
/* special limits */
int skip_count;
int max_count;
unsigned long max_age;
unsigned long min_age;
int min_parents;
int max_parents;
int (*include_check)(struct commit *, void *);
void *include_check_data;
/* diff info for patches and for paths limiting */
struct diff_options diffopt;
struct diff_options pruning;
struct reflog_walk_info *reflog_info;
struct decoration children;
struct decoration merge_simplification;
struct decoration treesame;
/* notes-specific options: which refs to show */
struct display_notes_opt notes_opt;
/* commit counts */
int count_left;
int count_right;
int count_same;
/* line level range that we are chasing */
struct decoration line_log_data;
/* copies of the parent lists, for --full-diff display */
struct saved_parents *saved_parents_slab;
struct commit_list *previous_parents;
const char *break_bar;
};
extern int ref_excluded(struct string_list *, const char *path);
void clear_ref_exclusion(struct string_list **);
void add_ref_exclusion(struct string_list **, const char *exclude);
#define REV_TREE_SAME 0
#define REV_TREE_NEW 1 /* Only new files */
#define REV_TREE_OLD 2 /* Only files removed */
#define REV_TREE_DIFFERENT 3 /* Mixed changes */
/* revision.c */
typedef void (*show_early_output_fn_t)(struct rev_info *, struct commit_list *);
extern volatile show_early_output_fn_t show_early_output;
struct setup_revision_opt {
const char *def;
void (*tweak)(struct rev_info *, struct setup_revision_opt *);
const char *submodule;
int assume_dashdash;
unsigned revarg_opt;
};
extern void init_revisions(struct rev_info *revs, const char *prefix);
extern int setup_revisions(int argc, const char **argv, struct rev_info *revs,
struct setup_revision_opt *);
extern void parse_revision_opt(struct rev_info *revs, struct parse_opt_ctx_t *ctx,
const struct option *options,
const char * const usagestr[]);
#define REVARG_CANNOT_BE_FILENAME 01
#define REVARG_COMMITTISH 02
extern int handle_revision_arg(const char *arg, struct rev_info *revs,
int flags, unsigned revarg_opt);
extern void reset_revision_walk(void);
extern int prepare_revision_walk(struct rev_info *revs);
extern struct commit *get_revision(struct rev_info *revs);
extern char *get_revision_mark(const struct rev_info *revs,
const struct commit *commit);
extern void put_revision_mark(const struct rev_info *revs,
const struct commit *commit);
extern void mark_parents_uninteresting(struct commit *commit);
extern void mark_tree_uninteresting(struct tree *tree);
char *path_name(struct strbuf *path, const char *name);
extern void show_object_with_name(FILE *, struct object *, const char *);
extern void add_pending_object(struct rev_info *revs,
struct object *obj, const char *name);
extern void add_pending_sha1(struct rev_info *revs,
const char *name, const unsigned char *sha1,
unsigned int flags);
extern void add_head_to_pending(struct rev_info *);
extern void add_reflogs_to_pending(struct rev_info *, unsigned int flags);
extern void add_index_objects_to_pending(struct rev_info *, unsigned int flags);
enum commit_action {
commit_ignore,
commit_show,
commit_error
};
extern enum commit_action get_commit_action(struct rev_info *revs,
struct commit *commit);
extern enum commit_action simplify_commit(struct rev_info *revs,
struct commit *commit);
enum rewrite_result {
rewrite_one_ok,
rewrite_one_noparents,
rewrite_one_error
};
typedef enum rewrite_result (*rewrite_parent_fn_t)(struct rev_info *revs, struct commit **pp);
extern int rewrite_parents(struct rev_info *revs, struct commit *commit,
rewrite_parent_fn_t rewrite_parent);
/*
* The log machinery saves the original parent list so that
* get_saved_parents() can later tell what the real parents of the
* commits are, when commit->parents has been modified by history
* simpification.
*
* get_saved_parents() will transparently return commit->parents if
* history simplification is off.
*/
extern struct commit_list *get_saved_parents(struct rev_info *revs, const struct commit *commit);
#endif
|
4946_8
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/******************************************************************************
** Copyright (c) 2017-2018, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#if defined(__EDGE_EXECUTE_F32__)
#define REALTYPE float
#else
#define REALTYPE double
#endif
typedef struct edge_mat_desc {
unsigned int row_count;
unsigned int col_count;
unsigned int num_elements;
} edge_mat_desc;
static void libxsmm_sparse_csr_reader( const char* i_csr_file_in,
unsigned int** o_row_idx,
unsigned int** o_column_idx,
REALTYPE** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_row_idx_id = NULL;
unsigned int l_i = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) {
/* allocate CSC datastructure matching mtx file */
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count));
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1));
*o_values = (REALTYPE*) malloc(sizeof(double) * (*o_element_count));
l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_row_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return;
}
/* set everything to zero for init */
memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1));
memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count));
memset(*o_values, 0, sizeof(double)*(*o_element_count));
memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_row_count + 1); l_i++)
(*o_row_idx)[l_i] = (*o_element_count);
/* init */
(*o_row_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
REALTYPE l_value;
/* read a line of content */
#if defined(__EDGE_EXECUTE_F32__)
if ( sscanf(l_line, "%u %u %f", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#else
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#endif
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_column_idx)[l_i] = l_column;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_row_idx_id[l_row] = 1;
(*o_row_idx)[l_row+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csr_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_row_count); l_i++) {
if ( l_row_idx_id[l_i] == 0 ) {
(*o_row_idx)[l_i+1] = (*o_row_idx)[l_i];
}
}
/* free helper data structure */
if ( l_row_idx_id != NULL ) {
free( l_row_idx_id );
}
}
static void libxsmm_sparse_csc_reader( const char* i_csc_file_in,
unsigned int** o_column_idx,
unsigned int** o_row_idx,
REALTYPE** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csc_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_column_idx_id = NULL;
unsigned int l_i = 0;
l_csc_file_handle = fopen( i_csc_file_in, "r" );
if ( l_csc_file_handle == NULL ) {
fprintf( stderr, "cannot open CSC file!\n" );
return;
}
while (fgets(l_line, l_line_length, l_csc_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) {
/* allocate CSC datastructure matching mtx file */
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count));
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_column_count + 1));
*o_values = (REALTYPE*) malloc(sizeof(double) * (*o_element_count));
l_column_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_column_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_column_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return;
}
/* set everything to zero for init */
memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_column_count + 1));
memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_element_count));
memset(*o_values, 0, sizeof(double)*(*o_element_count));
memset(l_column_idx_id, 0, sizeof(unsigned int)*(*o_column_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_column_count + 1); l_i++)
(*o_column_idx)[l_i] = (*o_element_count);
/* init */
(*o_column_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
REALTYPE l_value;
/* read a line of content */
#if defined(__EDGE_EXECUTE_F32__)
if ( sscanf(l_line, "%u %u %f", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#else
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#endif
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_row_idx)[l_i] = l_row;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_column_idx_id[l_column] = 1;
(*o_column_idx)[l_column+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csc_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_column_count); l_i++) {
if ( l_column_idx_id[l_i] == 0 ) {
(*o_column_idx)[l_i+1] = (*o_column_idx)[l_i];
}
}
/* free helper data structure */
if ( l_column_idx_id != NULL ) {
free( l_column_idx_id );
}
}
static edge_mat_desc libxsmm_sparse_csr_reader_desc( const char* i_csr_file_in ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int l_row_count = 0;
unsigned int l_col_count = 0;
unsigned int l_num_elements = 0;
edge_mat_desc desc;
desc.row_count = 0;
desc.col_count = 0;
desc.num_elements = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return desc;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return desc;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", &l_row_count, &l_col_count, &l_num_elements) == 3 ) {
l_header_read = 1;
desc.row_count = l_row_count;
desc.col_count = l_col_count;
desc.num_elements = l_num_elements;
} else {
fprintf( stderr, "could not csr description!\n" );
return desc;
}
} else {
}
}
}
return desc;
}
static edge_mat_desc libxsmm_sparse_csc_reader_desc( const char* i_csc_file_in ) {
FILE *l_csc_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int l_row_count = 0;
unsigned int l_col_count = 0;
unsigned int l_num_elements = 0;
edge_mat_desc desc;
desc.row_count = 0;
desc.col_count = 0;
desc.num_elements = 0;
l_csc_file_handle = fopen( i_csc_file_in, "r" );
if ( l_csc_file_handle == NULL ) {
fprintf( stderr, "cannot open CSC file!\n" );
return desc;
}
while (fgets(l_line, l_line_length, l_csc_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return desc;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", &l_row_count, &l_col_count, &l_num_elements) == 3 ) {
l_header_read = 1;
desc.row_count = l_row_count;
desc.col_count = l_col_count;
desc.num_elements = l_num_elements;
} else {
fprintf( stderr, "could not csc description!\n" );
return desc;
}
} else {
}
}
}
return desc;
}
|
/******************************************************************************
** Copyright (c) 2017-2018, Intel Corporation **
** All rights reserved. **
** **
** Redistribution and use in source and binary forms, with or without **
** modification, are permitted provided that the following conditions **
** are met: **
** 1. Redistributions of source code must retain the above copyright **
** notice, this list of conditions and the following disclaimer. **
** 2. Redistributions in binary form must reproduce the above copyright **
** notice, this list of conditions and the following disclaimer in the **
** documentation and/or other materials provided with the distribution. **
** 3. Neither the name of the copyright holder nor the names of its **
** contributors may be used to endorse or promote products derived **
** from this software without specific prior written permission. **
** **
** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS **
** "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT **
** LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR **
** A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT **
** HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, **
** SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED **
** TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR **
** PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF **
** LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING **
** NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS **
** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. **
******************************************************************************/
/* Alexander Heinecke (Intel Corp.)
******************************************************************************/
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
#if defined(__EDGE_EXECUTE_F32__)
#define REALTYPE float
#else
#define REALTYPE double
#endif
typedef struct edge_mat_desc {
unsigned int row_count;
unsigned int col_count;
unsigned int num_elements;
} edge_mat_desc;
static void libxsmm_sparse_csr_reader( const char* i_csr_file_in,
unsigned int** o_row_idx,
unsigned int** o_column_idx,
REALTYPE** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_row_idx_id = NULL;
unsigned int l_i = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if (3 == sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) &&
0 != *o_row_count && 0 != *o_column_count && 0 != *o_element_count)
{
/* allocate CSC datastructure matching mtx file */
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count));
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count + 1));
*o_values = (REALTYPE*) malloc(sizeof(double) * (*o_element_count));
l_row_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_row_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_row_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return;
}
/* set everything to zero for init */
memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_row_count + 1));
memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_element_count));
memset(*o_values, 0, sizeof(double)*(*o_element_count));
memset(l_row_idx_id, 0, sizeof(unsigned int)*(*o_row_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_row_count + 1); l_i++)
(*o_row_idx)[l_i] = (*o_element_count);
/* init */
(*o_row_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
REALTYPE l_value;
/* read a line of content */
#if defined(__EDGE_EXECUTE_F32__)
if ( sscanf(l_line, "%u %u %f", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#else
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#endif
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_column_idx)[l_i] = l_column;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_row_idx_id[l_row] = 1;
(*o_row_idx)[l_row+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csr_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_row_count); l_i++) {
if ( l_row_idx_id[l_i] == 0 ) {
(*o_row_idx)[l_i+1] = (*o_row_idx)[l_i];
}
}
/* free helper data structure */
if ( l_row_idx_id != NULL ) {
free( l_row_idx_id );
}
}
static void libxsmm_sparse_csc_reader( const char* i_csc_file_in,
unsigned int** o_column_idx,
unsigned int** o_row_idx,
REALTYPE** o_values,
unsigned int* o_row_count,
unsigned int* o_column_count,
unsigned int* o_element_count ) {
FILE *l_csc_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int* l_column_idx_id = NULL;
unsigned int l_i = 0;
l_csc_file_handle = fopen( i_csc_file_in, "r" );
if ( l_csc_file_handle == NULL ) {
fprintf( stderr, "cannot open CSC file!\n" );
return;
}
while (fgets(l_line, l_line_length, l_csc_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", o_row_count, o_column_count, o_element_count) == 3 ) {
/* allocate CSC datastructure matching mtx file */
*o_row_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_element_count));
*o_column_idx = (unsigned int*) malloc(sizeof(unsigned int) * (*o_column_count + 1));
*o_values = (REALTYPE*) malloc(sizeof(double) * (*o_element_count));
l_column_idx_id = (unsigned int*) malloc(sizeof(unsigned int) * (*o_column_count));
/* check if mallocs were successful */
if ( ( *o_row_idx == NULL ) ||
( *o_column_idx == NULL ) ||
( *o_values == NULL ) ||
( l_column_idx_id == NULL ) ) {
fprintf( stderr, "could not allocate sp data!\n" );
return;
}
/* set everything to zero for init */
memset(*o_column_idx, 0, sizeof(unsigned int)*(*o_column_count + 1));
memset(*o_row_idx, 0, sizeof(unsigned int)*(*o_element_count));
memset(*o_values, 0, sizeof(double)*(*o_element_count));
memset(l_column_idx_id, 0, sizeof(unsigned int)*(*o_column_count));
/* init column idx */
for ( l_i = 0; l_i < (*o_column_count + 1); l_i++)
(*o_column_idx)[l_i] = (*o_element_count);
/* init */
(*o_column_idx)[0] = 0;
l_i = 0;
l_header_read = 1;
} else {
fprintf( stderr, "could not csr description!\n" );
return;
}
/* now we read the actual content */
} else {
unsigned int l_row, l_column;
REALTYPE l_value;
/* read a line of content */
#if defined(__EDGE_EXECUTE_F32__)
if ( sscanf(l_line, "%u %u %f", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#else
if ( sscanf(l_line, "%u %u %lf", &l_row, &l_column, &l_value) != 3 ) {
fprintf( stderr, "could not read element!\n" );
return;
}
#endif
/* adjust numbers to zero termination */
l_row--;
l_column--;
/* add these values to row and value structure */
(*o_row_idx)[l_i] = l_row;
(*o_values)[l_i] = l_value;
l_i++;
/* handle columns, set id to own for this column, yeah we need to handle empty columns */
l_column_idx_id[l_column] = 1;
(*o_column_idx)[l_column+1] = l_i;
}
}
}
/* close mtx file */
fclose( l_csc_file_handle );
/* check if we read a file which was consistent */
if ( l_i != (*o_element_count) ) {
fprintf( stderr, "we were not able to read all elements!\n" );
return;
}
/* let's handle empty rows */
for ( l_i = 0; l_i < (*o_column_count); l_i++) {
if ( l_column_idx_id[l_i] == 0 ) {
(*o_column_idx)[l_i+1] = (*o_column_idx)[l_i];
}
}
/* free helper data structure */
if ( l_column_idx_id != NULL ) {
free( l_column_idx_id );
}
}
static edge_mat_desc libxsmm_sparse_csr_reader_desc( const char* i_csr_file_in ) {
FILE *l_csr_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int l_row_count = 0;
unsigned int l_col_count = 0;
unsigned int l_num_elements = 0;
edge_mat_desc desc;
desc.row_count = 0;
desc.col_count = 0;
desc.num_elements = 0;
l_csr_file_handle = fopen( i_csr_file_in, "r" );
if ( l_csr_file_handle == NULL ) {
fprintf( stderr, "cannot open CSR file!\n" );
return desc;
}
while (fgets(l_line, l_line_length, l_csr_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return desc;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", &l_row_count, &l_col_count, &l_num_elements) == 3 ) {
l_header_read = 1;
desc.row_count = l_row_count;
desc.col_count = l_col_count;
desc.num_elements = l_num_elements;
} else {
fprintf( stderr, "could not csr description!\n" );
return desc;
}
} else {
}
}
}
return desc;
}
static edge_mat_desc libxsmm_sparse_csc_reader_desc( const char* i_csc_file_in ) {
FILE *l_csc_file_handle;
const unsigned int l_line_length = 512;
char l_line[512/*l_line_length*/+1];
unsigned int l_header_read = 0;
unsigned int l_row_count = 0;
unsigned int l_col_count = 0;
unsigned int l_num_elements = 0;
edge_mat_desc desc;
desc.row_count = 0;
desc.col_count = 0;
desc.num_elements = 0;
l_csc_file_handle = fopen( i_csc_file_in, "r" );
if ( l_csc_file_handle == NULL ) {
fprintf( stderr, "cannot open CSC file!\n" );
return desc;
}
while (fgets(l_line, l_line_length, l_csc_file_handle) != NULL) {
if ( strlen(l_line) == l_line_length ) {
fprintf( stderr, "could not read file length!\n" );
return desc;
}
/* check if we are still reading comments header */
if ( l_line[0] == '%' ) {
continue;
} else {
/* if we are the first line after comment header, we allocate our data structures */
if ( l_header_read == 0 ) {
if ( sscanf(l_line, "%u %u %u", &l_row_count, &l_col_count, &l_num_elements) == 3 ) {
l_header_read = 1;
desc.row_count = l_row_count;
desc.col_count = l_col_count;
desc.num_elements = l_num_elements;
} else {
fprintf( stderr, "could not csc description!\n" );
return desc;
}
} else {
}
}
}
return desc;
}
|
507_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* (c) Magnus Auvinen. See licence.txt in the root of the distribution for more information. */
/* If you are missing that file, acquire a complete release at teeworlds.com. */
#ifndef ENGINE_SHARED_SNAPSHOT_H
#define ENGINE_SHARED_SNAPSHOT_H
#include <base/system.h>
// CSnapshot
class CSnapshotItem
{
public:
int m_TypeAndID;
int *Data() { return (int *)(this+1); }
int Type() { return m_TypeAndID>>16; }
int ID() { return m_TypeAndID&0xffff; }
int Key() { return m_TypeAndID; }
};
class CSnapshot
{
friend class CSnapshotBuilder;
int m_DataSize;
int m_NumItems;
int *Offsets() const { return (int *)(this+1); }
char *DataStart() const { return (char*)(Offsets()+m_NumItems); }
public:
enum
{
MAX_SIZE=64*1024
};
void Clear() { m_DataSize = 0; m_NumItems = 0; }
int NumItems() const { return m_NumItems; }
CSnapshotItem *GetItem(int Index);
int GetItemSize(int Index);
int GetItemIndex(int Key);
int Crc();
void DebugDump();
};
// CSnapshotDelta
class CSnapshotDelta
{
public:
class CData
{
public:
int m_NumDeletedItems;
int m_NumUpdateItems;
int m_NumTempItems; // needed?
int m_pData[1];
};
private:
// TODO: strange arbitrary number
short m_aItemSizes[64];
int m_aSnapshotDataRate[0xffff];
int m_aSnapshotDataUpdates[0xffff];
int m_SnapshotCurrent;
CData m_Empty;
void UndiffItem(int *pPast, int *pDiff, int *pOut, int Size);
public:
CSnapshotDelta();
int GetDataRate(int Index) { return m_aSnapshotDataRate[Index]; }
int GetDataUpdates(int Index) { return m_aSnapshotDataUpdates[Index]; }
void SetStaticsize(int ItemType, int Size);
CData *EmptyDelta();
int CreateDelta(class CSnapshot *pFrom, class CSnapshot *pTo, void *pData);
int UnpackDelta(class CSnapshot *pFrom, class CSnapshot *pTo, void *pData, int DataSize);
};
// CSnapshotStorage
class CSnapshotStorage
{
public:
class CHolder
{
public:
CHolder *m_pPrev;
CHolder *m_pNext;
int64 m_Tagtime;
int m_Tick;
int m_SnapSize;
CSnapshot *m_pSnap;
CSnapshot *m_pAltSnap;
};
CHolder *m_pFirst;
CHolder *m_pLast;
void Init();
void PurgeAll();
void PurgeUntil(int Tick);
void Add(int Tick, int64 Tagtime, int DataSize, void *pData, int CreateAlt);
int Get(int Tick, int64 *pTagtime, CSnapshot **ppData, CSnapshot **ppAltData);
};
class CSnapshotBuilder
{
enum
{
MAX_ITEMS = 1024
};
char m_aData[CSnapshot::MAX_SIZE];
int m_DataSize;
int m_aOffsets[MAX_ITEMS];
int m_NumItems;
public:
void Init();
void *NewItem(int Type, int ID, int Size);
CSnapshotItem *GetItem(int Index);
int *GetItemData(int Key);
int Finish(void *pSnapdata);
};
#endif // ENGINE_SNAPSHOT_H
|
/* (c) Magnus Auvinen. See licence.txt in the root of the distribution for more information. */
/* If you are missing that file, acquire a complete release at teeworlds.com. */
#ifndef ENGINE_SHARED_SNAPSHOT_H
#define ENGINE_SHARED_SNAPSHOT_H
#include <base/system.h>
// CSnapshot
class CSnapshotItem
{
public:
int m_TypeAndID;
int *Data() { return (int *)(this+1); }
int Type() { return m_TypeAndID>>16; }
int ID() { return m_TypeAndID&0xffff; }
int Key() { return m_TypeAndID; }
};
class CSnapshot
{
friend class CSnapshotBuilder;
int m_DataSize;
int m_NumItems;
int *Offsets() const { return (int *)(this+1); }
char *DataStart() const { return (char*)(Offsets()+m_NumItems); }
public:
enum
{
MAX_PARTS = 64,
MAX_SIZE = MAX_PARTS*1024
};
void Clear() { m_DataSize = 0; m_NumItems = 0; }
int NumItems() const { return m_NumItems; }
CSnapshotItem *GetItem(int Index);
int GetItemSize(int Index);
int GetItemIndex(int Key);
int Crc();
void DebugDump();
};
// CSnapshotDelta
class CSnapshotDelta
{
public:
class CData
{
public:
int m_NumDeletedItems;
int m_NumUpdateItems;
int m_NumTempItems; // needed?
int m_pData[1];
};
private:
// TODO: strange arbitrary number
short m_aItemSizes[64];
int m_aSnapshotDataRate[0xffff];
int m_aSnapshotDataUpdates[0xffff];
int m_SnapshotCurrent;
CData m_Empty;
void UndiffItem(int *pPast, int *pDiff, int *pOut, int Size);
public:
CSnapshotDelta();
int GetDataRate(int Index) { return m_aSnapshotDataRate[Index]; }
int GetDataUpdates(int Index) { return m_aSnapshotDataUpdates[Index]; }
void SetStaticsize(int ItemType, int Size);
CData *EmptyDelta();
int CreateDelta(class CSnapshot *pFrom, class CSnapshot *pTo, void *pData);
int UnpackDelta(class CSnapshot *pFrom, class CSnapshot *pTo, void *pData, int DataSize);
};
// CSnapshotStorage
class CSnapshotStorage
{
public:
class CHolder
{
public:
CHolder *m_pPrev;
CHolder *m_pNext;
int64 m_Tagtime;
int m_Tick;
int m_SnapSize;
CSnapshot *m_pSnap;
CSnapshot *m_pAltSnap;
};
CHolder *m_pFirst;
CHolder *m_pLast;
void Init();
void PurgeAll();
void PurgeUntil(int Tick);
void Add(int Tick, int64 Tagtime, int DataSize, void *pData, int CreateAlt);
int Get(int Tick, int64 *pTagtime, CSnapshot **ppData, CSnapshot **ppAltData);
};
class CSnapshotBuilder
{
enum
{
MAX_ITEMS = 1024
};
char m_aData[CSnapshot::MAX_SIZE];
int m_DataSize;
int m_aOffsets[MAX_ITEMS];
int m_NumItems;
public:
void Init();
void *NewItem(int Type, int ID, int Size);
CSnapshotItem *GetItem(int Index);
int *GetItemData(int Key);
int Finish(void *pSnapdata);
};
#endif // ENGINE_SNAPSHOT_H
|
5419_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
#include <asm/page.h>
#include <asm/e820.h>
#include <asm/pgtable_types.h>
/*
* Macro to mark a page protection value as UC-
*/
#define pgprot_noncached(prot) \
((boot_cpu_data.x86 > 3) \
? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
: (prot))
#ifndef __ASSEMBLY__
#include <asm/x86_init.h>
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
extern struct mm_struct *pgd_page_get_mm(struct page *page);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
#define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte)
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
#ifndef __PAGETABLE_PUD_FOLDED
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
#ifndef set_pud
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd) native_pmd_clear(pmd)
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#define pmd_update(mm, addr, ptep) do { } while (0)
#define pmd_update_defer(mm, addr, ptep) do { } while (0)
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x) native_pud_val(x)
#define __pud(x) native_make_pud(x)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x) native_pmd_val(x)
#define __pmd(x) native_make_pmd(x)
#endif
#define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)
#define arch_end_context_switch(prev) do {} while(0)
#endif /* CONFIG_PARAVIRT */
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
{
return pte_flags(pte) & _PAGE_ACCESSED;
}
static inline int pmd_young(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_ACCESSED;
}
static inline int pte_write(pte_t pte)
{
return pte_flags(pte) & _PAGE_RW;
}
static inline int pte_file(pte_t pte)
{
return pte_flags(pte) & _PAGE_FILE;
}
static inline int pte_huge(pte_t pte)
{
return pte_flags(pte) & _PAGE_PSE;
}
static inline int pte_global(pte_t pte)
{
return pte_flags(pte) & _PAGE_GLOBAL;
}
static inline int pte_exec(pte_t pte)
{
return !(pte_flags(pte) & _PAGE_NX);
}
static inline int pte_special(pte_t pte)
{
return pte_flags(pte) & _PAGE_SPECIAL;
}
static inline unsigned long pte_pfn(pte_t pte)
{
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
{
return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_splitting(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_SPLITTING;
}
static inline int pmd_trans_huge(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_PSE;
}
static inline int has_transparent_hugepage(void)
{
return cpu_has_pse;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v | set);
}
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v & ~clear);
}
static inline pte_t pte_mkclean(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkold(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkexec(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_NX);
}
static inline pte_t pte_mkdirty(pte_t pte)
{
return pte_set_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
return pte_set_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_mkwrite(pte_t pte)
{
return pte_set_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte_set_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_clrhuge(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_mkglobal(pte_t pte)
{
return pte_set_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_clrglobal(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SPECIAL);
}
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
pmdval_t v = native_pmd_val(pmd);
return __pmd(v | set);
}
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
pmdval_t v = native_pmd_val(pmd);
return __pmd(v & ~clear);
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_ACCESSED);
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_RW);
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_DIRTY);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_PSE);
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_ACCESSED);
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_RW);
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
*/
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
{
pgprotval_t protval = pgprot_val(pgprot);
if (protval & _PAGE_PRESENT)
protval &= __supported_pte_mask;
return protval;
}
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pteval_t val = pte_val(pte);
/*
* Chop off the NX bit (if present), and add the NX portion of
* the newprot (if present):
*/
val &= _PAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
return __pte(val);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmdval_t val = pmd_val(pmd);
val &= _HPAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
return __pmd(val);
}
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
pgprotval_t addbits = pgprot_val(newprot);
return __pgprot(preservebits | addbits);
}
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
unsigned long flags,
unsigned long new_flags)
{
/*
* PAT type is always WB for untracked ranges, so no need to check.
*/
if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
return 1;
/*
* Certain new memtypes are not allowed with certain
* requested memtype:
* - request is uncached, return cannot be write-back
* - request is write-combine, return cannot be write-back
*/
if ((flags == _PAGE_CACHE_UC_MINUS &&
new_flags == _PAGE_CACHE_WB) ||
(flags == _PAGE_CACHE_WC &&
new_flags == _PAGE_CACHE_WB)) {
return 0;
}
return 1;
}
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
# include <asm/pgtable_32.h>
#else
# include <asm/pgtable_64.h>
#endif
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
{
return !pte.pte;
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte == b.pte;
}
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
static inline int pte_hidden(pte_t pte)
{
return pte_flags(pte) & _PAGE_HIDDEN;
}
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
}
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
out of sync with upper half. */
return (unsigned long)native_pmd_val(pmd) == 0;
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
static inline unsigned long pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* (Currently stuck as a macro because of indirect forward reference
* to linux/mm.h:page_to_nid())
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
* this function returns the index of the entry in the pte page which would
* control the given virtual address
*/
static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - PAGE_SHIFT);
}
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
}
static inline int pud_present(pud_t pud)
{
return pud_flags(pud) & _PAGE_PRESENT;
}
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
static inline int pud_large(pud_t pud)
{
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
static inline int pud_bad(pud_t pud)
{
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#else
static inline int pud_large(pud_t pud)
{
return 0;
}
#endif /* PAGETABLE_LEVELS > 2 */
#if PAGETABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
}
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
}
static inline int pgd_bad(pgd_t pgd)
{
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline int pgd_none(pgd_t pgd)
{
return !native_pgd_val(pgd);
}
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
* this macro returns the index of the entry in the pgd page which would
* control the given virtual address
*/
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
#ifndef __ASSEMBLY__
extern int direct_gbpages;
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
pte_t res = *ptep;
/* Pure native function needs no input for mm, addr */
native_pte_clear(NULL, 0, ptep);
return res;
}
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
{
pmd_t res = *pmdp;
native_pmd_clear(pmdp);
return res;
}
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp , pmd_t pmd)
{
native_set_pmd(pmdp, pmd);
}
#ifndef CONFIG_PARAVIRT
/*
* Rules for using pte_update - it must be called after any PTE update which
* has not been done using the set_pte / clear_pte interfaces. It is used by
* shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
* updates should either be sets, clears, or set_pte_atomic for P->P
* transitions, which means this hook should only be called for user PTEs.
* This hook implies a P->P protection or access change has taken place, which
* requires a subsequent TLB flush. The notification can optionally be delayed
* until the TLB flush event by using the pte_update_defer form of the
* interface, but care must be taken to assure that the flush happens while
* still holding the same page table lock so that the shadow and primary pages
* do not become out of sync on SMP.
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#endif
/*
* We only update the dirty/accessed state if we set
* the dirty bit by hand in the kernel, since the hardware
* will do the accessed bit for us, and we don't want to
* race with other CPU's that might be updating the dirty
* bit at the same time.
*/
struct vm_area_struct;
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
pte_update(mm, addr, ptep);
return pte;
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
int full)
{
pte_t pte;
if (full) {
/*
* Full address destruction in progress; paravirt does not
* care about updates and native needs no locking
*/
pte = native_local_ptep_get_and_clear(ptep);
} else {
pte = ptep_get_and_clear(mm, addr, ptep);
}
return pte;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
pte_update(mm, addr, ptep);
}
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_RW;
}
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
pmd_update(mm, addr, pmdp);
return pmd;
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
pmd_update(mm, addr, pmdp);
}
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
* dst - pointer to pgd range anwhere on a pgd page
* src - ""
* count - the number of pgds to copy.
*
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
memcpy(dst, src, count * sizeof(pgd_t));
}
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_H */
|
#ifndef _ASM_X86_PGTABLE_H
#define _ASM_X86_PGTABLE_H
#include <asm/page.h>
#include <asm/e820.h>
#include <asm/pgtable_types.h>
/*
* Macro to mark a page protection value as UC-
*/
#define pgprot_noncached(prot) \
((boot_cpu_data.x86 > 3) \
? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
: (prot))
#ifndef __ASSEMBLY__
#include <asm/x86_init.h>
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
extern spinlock_t pgd_lock;
extern struct list_head pgd_list;
extern struct mm_struct *pgd_page_get_mm(struct page *page);
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#else /* !CONFIG_PARAVIRT */
#define set_pte(ptep, pte) native_set_pte(ptep, pte)
#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
#define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
#define set_pte_atomic(ptep, pte) \
native_set_pte_atomic(ptep, pte)
#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
#ifndef __PAGETABLE_PUD_FOLDED
#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
#define pgd_clear(pgd) native_pgd_clear(pgd)
#endif
#ifndef set_pud
# define set_pud(pudp, pud) native_set_pud(pudp, pud)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pud_clear(pud) native_pud_clear(pud)
#endif
#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
#define pmd_clear(pmd) native_pmd_clear(pmd)
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#define pmd_update(mm, addr, ptep) do { } while (0)
#define pmd_update_defer(mm, addr, ptep) do { } while (0)
#define pgd_val(x) native_pgd_val(x)
#define __pgd(x) native_make_pgd(x)
#ifndef __PAGETABLE_PUD_FOLDED
#define pud_val(x) native_pud_val(x)
#define __pud(x) native_make_pud(x)
#endif
#ifndef __PAGETABLE_PMD_FOLDED
#define pmd_val(x) native_pmd_val(x)
#define __pmd(x) native_make_pmd(x)
#endif
#define pte_val(x) native_pte_val(x)
#define __pte(x) native_make_pte(x)
#define arch_end_context_switch(prev) do {} while(0)
#endif /* CONFIG_PARAVIRT */
/*
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
static inline int pte_dirty(pte_t pte)
{
return pte_flags(pte) & _PAGE_DIRTY;
}
static inline int pte_young(pte_t pte)
{
return pte_flags(pte) & _PAGE_ACCESSED;
}
static inline int pmd_young(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_ACCESSED;
}
static inline int pte_write(pte_t pte)
{
return pte_flags(pte) & _PAGE_RW;
}
static inline int pte_file(pte_t pte)
{
return pte_flags(pte) & _PAGE_FILE;
}
static inline int pte_huge(pte_t pte)
{
return pte_flags(pte) & _PAGE_PSE;
}
static inline int pte_global(pte_t pte)
{
return pte_flags(pte) & _PAGE_GLOBAL;
}
static inline int pte_exec(pte_t pte)
{
return !(pte_flags(pte) & _PAGE_NX);
}
static inline int pte_special(pte_t pte)
{
return pte_flags(pte) & _PAGE_SPECIAL;
}
static inline unsigned long pte_pfn(pte_t pte)
{
return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
}
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
static inline int pmd_large(pmd_t pte)
{
return pmd_flags(pte) & _PAGE_PSE;
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_trans_splitting(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_SPLITTING;
}
static inline int pmd_trans_huge(pmd_t pmd)
{
return pmd_val(pmd) & _PAGE_PSE;
}
static inline int has_transparent_hugepage(void)
{
return cpu_has_pse;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v | set);
}
static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
{
pteval_t v = native_pte_val(pte);
return native_make_pte(v & ~clear);
}
static inline pte_t pte_mkclean(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkold(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_wrprotect(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkexec(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_NX);
}
static inline pte_t pte_mkdirty(pte_t pte)
{
return pte_set_flags(pte, _PAGE_DIRTY);
}
static inline pte_t pte_mkyoung(pte_t pte)
{
return pte_set_flags(pte, _PAGE_ACCESSED);
}
static inline pte_t pte_mkwrite(pte_t pte)
{
return pte_set_flags(pte, _PAGE_RW);
}
static inline pte_t pte_mkhuge(pte_t pte)
{
return pte_set_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_clrhuge(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_PSE);
}
static inline pte_t pte_mkglobal(pte_t pte)
{
return pte_set_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_clrglobal(pte_t pte)
{
return pte_clear_flags(pte, _PAGE_GLOBAL);
}
static inline pte_t pte_mkspecial(pte_t pte)
{
return pte_set_flags(pte, _PAGE_SPECIAL);
}
static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
{
pmdval_t v = native_pmd_val(pmd);
return __pmd(v | set);
}
static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
{
pmdval_t v = native_pmd_val(pmd);
return __pmd(v & ~clear);
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_ACCESSED);
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_RW);
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_DIRTY);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_PSE);
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_ACCESSED);
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
return pmd_set_flags(pmd, _PAGE_RW);
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
*/
static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
{
pgprotval_t protval = pgprot_val(pgprot);
if (protval & _PAGE_PRESENT)
protval &= __supported_pte_mask;
return protval;
}
static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
{
return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
}
static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
{
return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
massage_pgprot(pgprot));
}
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pteval_t val = pte_val(pte);
/*
* Chop off the NX bit (if present), and add the NX portion of
* the newprot (if present):
*/
val &= _PAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
return __pte(val);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmdval_t val = pmd_val(pmd);
val &= _HPAGE_CHG_MASK;
val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
return __pmd(val);
}
/* mprotect needs to preserve PAT bits when updating vm_page_prot */
#define pgprot_modify pgprot_modify
static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
{
pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
pgprotval_t addbits = pgprot_val(newprot);
return __pgprot(preservebits | addbits);
}
#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
#define canon_pgprot(p) __pgprot(massage_pgprot(p))
static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
unsigned long flags,
unsigned long new_flags)
{
/*
* PAT type is always WB for untracked ranges, so no need to check.
*/
if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
return 1;
/*
* Certain new memtypes are not allowed with certain
* requested memtype:
* - request is uncached, return cannot be write-back
* - request is write-combine, return cannot be write-back
*/
if ((flags == _PAGE_CACHE_UC_MINUS &&
new_flags == _PAGE_CACHE_WB) ||
(flags == _PAGE_CACHE_WC &&
new_flags == _PAGE_CACHE_WB)) {
return 0;
}
return 1;
}
pmd_t *populate_extra_pmd(unsigned long vaddr);
pte_t *populate_extra_pte(unsigned long vaddr);
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_X86_32
# include <asm/pgtable_32.h>
#else
# include <asm/pgtable_64.h>
#endif
#ifndef __ASSEMBLY__
#include <linux/mm_types.h>
static inline int pte_none(pte_t pte)
{
return !pte.pte;
}
#define __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t a, pte_t b)
{
return a.pte == b.pte;
}
static inline int pte_present(pte_t a)
{
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
static inline int pte_hidden(pte_t pte)
{
return pte_flags(pte) & _PAGE_HIDDEN;
}
static inline int pmd_present(pmd_t pmd)
{
/*
* Checking for _PAGE_PSE is needed too because
* split_huge_page will temporarily clear the present bit (but
* the _PAGE_PSE flag will remain set at all times while the
* _PAGE_PRESENT bit is clear).
*/
return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
}
static inline int pmd_none(pmd_t pmd)
{
/* Only check low word on 32-bit platforms, since it might be
out of sync with upper half. */
return (unsigned long)native_pmd_val(pmd) == 0;
}
static inline unsigned long pmd_page_vaddr(pmd_t pmd)
{
return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
/*
* the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
*
* this macro returns the index of the entry in the pmd page which would
* control the given virtual address
*/
static inline unsigned long pmd_index(unsigned long address)
{
return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* (Currently stuck as a macro because of indirect forward reference
* to linux/mm.h:page_to_nid())
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
/*
* the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
*
* this function returns the index of the entry in the pte page which would
* control the given virtual address
*/
static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
{
return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
}
static inline int pmd_bad(pmd_t pmd)
{
return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline unsigned long pages_to_mb(unsigned long npg)
{
return npg >> (20 - PAGE_SHIFT);
}
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
remap_pfn_range(vma, vaddr, pfn, size, prot)
#if PAGETABLE_LEVELS > 2
static inline int pud_none(pud_t pud)
{
return native_pud_val(pud) == 0;
}
static inline int pud_present(pud_t pud)
{
return pud_flags(pud) & _PAGE_PRESENT;
}
static inline unsigned long pud_page_vaddr(pud_t pud)
{
return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
/* Find an entry in the second-level page table.. */
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
}
static inline int pud_large(pud_t pud)
{
return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
(_PAGE_PSE | _PAGE_PRESENT);
}
static inline int pud_bad(pud_t pud)
{
return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
}
#else
static inline int pud_large(pud_t pud)
{
return 0;
}
#endif /* PAGETABLE_LEVELS > 2 */
#if PAGETABLE_LEVELS > 3
static inline int pgd_present(pgd_t pgd)
{
return pgd_flags(pgd) & _PAGE_PRESENT;
}
static inline unsigned long pgd_page_vaddr(pgd_t pgd)
{
return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
}
/*
* Currently stuck as a macro due to indirect forward reference to
* linux/mmzone.h's __section_mem_map_addr() definition:
*/
#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory. */
static inline unsigned long pud_index(unsigned long address)
{
return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
}
static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
{
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
}
static inline int pgd_bad(pgd_t pgd)
{
return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
}
static inline int pgd_none(pgd_t pgd)
{
return !native_pgd_val(pgd);
}
#endif /* PAGETABLE_LEVELS > 3 */
#endif /* __ASSEMBLY__ */
/*
* the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
*
* this macro returns the index of the entry in the pgd page which would
* control the given virtual address
*/
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
/*
* pgd_offset() returns a (pgd_t *)
* pgd_index() is used get the offset into the pgd page's array of pgd_t's;
*/
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
/*
* a shortcut which implies the use of the kernel's pgd, instead
* of a process's
*/
#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
#ifndef __ASSEMBLY__
extern int direct_gbpages;
/* local pte updates need not use xchg for locking */
static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
{
pte_t res = *ptep;
/* Pure native function needs no input for mm, addr */
native_pte_clear(NULL, 0, ptep);
return res;
}
static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
{
pmd_t res = *pmdp;
native_pmd_clear(pmdp);
return res;
}
static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep , pte_t pte)
{
native_set_pte(ptep, pte);
}
static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp , pmd_t pmd)
{
native_set_pmd(pmdp, pmd);
}
#ifndef CONFIG_PARAVIRT
/*
* Rules for using pte_update - it must be called after any PTE update which
* has not been done using the set_pte / clear_pte interfaces. It is used by
* shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
* updates should either be sets, clears, or set_pte_atomic for P->P
* transitions, which means this hook should only be called for user PTEs.
* This hook implies a P->P protection or access change has taken place, which
* requires a subsequent TLB flush. The notification can optionally be delayed
* until the TLB flush event by using the pte_update_defer form of the
* interface, but care must be taken to assure that the flush happens while
* still holding the same page table lock so that the shadow and primary pages
* do not become out of sync on SMP.
*/
#define pte_update(mm, addr, ptep) do { } while (0)
#define pte_update_defer(mm, addr, ptep) do { } while (0)
#endif
/*
* We only update the dirty/accessed state if we set
* the dirty bit by hand in the kernel, since the hardware
* will do the accessed bit for us, and we don't want to
* race with other CPU's that might be updating the dirty
* bit at the same time.
*/
struct vm_area_struct;
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
pte_t entry, int dirty);
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep);
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
extern int ptep_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t *ptep)
{
pte_t pte = native_ptep_get_and_clear(ptep);
pte_update(mm, addr, ptep);
return pte;
}
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
int full)
{
pte_t pte;
if (full) {
/*
* Full address destruction in progress; paravirt does not
* care about updates and native needs no locking
*/
pte = native_local_ptep_get_and_clear(ptep);
} else {
pte = ptep_get_and_clear(mm, addr, ptep);
}
return pte;
}
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
pte_update(mm, addr, ptep);
}
#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
extern int pmdp_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp,
pmd_t entry, int dirty);
#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp);
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_RW;
}
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp)
{
pmd_t pmd = native_pmdp_get_and_clear(pmdp);
pmd_update(mm, addr, pmdp);
return pmd;
}
#define __HAVE_ARCH_PMDP_SET_WRPROTECT
static inline void pmdp_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
{
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
pmd_update(mm, addr, pmdp);
}
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
* dst - pointer to pgd range anwhere on a pgd page
* src - ""
* count - the number of pgds to copy.
*
* dst and src can be on the same page, but the range must not overlap,
* and must not cross a page boundary.
*/
static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
{
memcpy(dst, src, count * sizeof(pgd_t));
}
#include <asm-generic/pgtable.h>
#endif /* __ASSEMBLY__ */
#endif /* _ASM_X86_PGTABLE_H */
|
5564_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/* Syslog internals
*
* Copyright 2010 Canonical, Ltd.
* Author: Kees Cook <kees.cook@canonical.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_SYSLOG_H
#define _LINUX_SYSLOG_H
/* Close the log. Currently a NOP. */
#define SYSLOG_ACTION_CLOSE 0
/* Open the log. Currently a NOP. */
#define SYSLOG_ACTION_OPEN 1
/* Read from the log. */
#define SYSLOG_ACTION_READ 2
/* Read all messages remaining in the ring buffer. */
#define SYSLOG_ACTION_READ_ALL 3
/* Read and clear all messages remaining in the ring buffer */
#define SYSLOG_ACTION_READ_CLEAR 4
/* Clear ring buffer. */
#define SYSLOG_ACTION_CLEAR 5
/* Disable printk's to console */
#define SYSLOG_ACTION_CONSOLE_OFF 6
/* Enable printk's to console */
#define SYSLOG_ACTION_CONSOLE_ON 7
/* Set level of messages printed to console */
#define SYSLOG_ACTION_CONSOLE_LEVEL 8
/* Return number of unread characters in the log buffer */
#define SYSLOG_ACTION_SIZE_UNREAD 9
/* Return size of the log buffer */
#define SYSLOG_ACTION_SIZE_BUFFER 10
#define SYSLOG_FROM_CALL 0
#define SYSLOG_FROM_FILE 1
int do_syslog(int type, char __user *buf, int count, bool from_file);
#endif /* _LINUX_SYSLOG_H */
|
/* Syslog internals
*
* Copyright 2010 Canonical, Ltd.
* Author: Kees Cook <kees.cook@canonical.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _LINUX_SYSLOG_H
#define _LINUX_SYSLOG_H
/* Close the log. Currently a NOP. */
#define SYSLOG_ACTION_CLOSE 0
/* Open the log. Currently a NOP. */
#define SYSLOG_ACTION_OPEN 1
/* Read from the log. */
#define SYSLOG_ACTION_READ 2
/* Read all messages remaining in the ring buffer. */
#define SYSLOG_ACTION_READ_ALL 3
/* Read and clear all messages remaining in the ring buffer */
#define SYSLOG_ACTION_READ_CLEAR 4
/* Clear ring buffer. */
#define SYSLOG_ACTION_CLEAR 5
/* Disable printk's to console */
#define SYSLOG_ACTION_CONSOLE_OFF 6
/* Enable printk's to console */
#define SYSLOG_ACTION_CONSOLE_ON 7
/* Set level of messages printed to console */
#define SYSLOG_ACTION_CONSOLE_LEVEL 8
/* Return number of unread characters in the log buffer */
#define SYSLOG_ACTION_SIZE_UNREAD 9
/* Return size of the log buffer */
#define SYSLOG_ACTION_SIZE_BUFFER 10
#define SYSLOG_FROM_CALL 0
#define SYSLOG_FROM_FILE 1
/*
* Syslog priority (PRI) maximum length in char : '<[0-9]{1,3}>'
* See RFC5424 for details
*/
#define SYSLOG_PRI_MAX_LENGTH 5
int do_syslog(int type, char __user *buf, int count, bool from_file);
#endif /* _LINUX_SYSLOG_H */
|
5589_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _LINUX_NLS_H
#define _LINUX_NLS_H
#include <linux/init.h>
/* Unicode has changed over the years. Unicode code points no longer
* fit into 16 bits; as of Unicode 5 valid code points range from 0
* to 0x10ffff (17 planes, where each plane holds 65536 code points).
*
* The original decision to represent Unicode characters as 16-bit
* wchar_t values is now outdated. But plane 0 still includes the
* most commonly used characters, so we will retain it. The newer
* 32-bit unicode_t type can be used when it is necessary to
* represent the full Unicode character set.
*/
/* Plane-0 Unicode character */
typedef u16 wchar_t;
#define MAX_WCHAR_T 0xffff
/* Arbitrary Unicode character */
typedef u32 unicode_t;
struct nls_table {
const char *charset;
const char *alias;
int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen);
int (*char2uni) (const unsigned char *rawstring, int boundlen,
wchar_t *uni);
const unsigned char *charset2lower;
const unsigned char *charset2upper;
struct module *owner;
struct nls_table *next;
};
/* this value hold the maximum octet of charset */
#define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
/* Byte order for UTF-16 strings */
enum utf16_endian {
UTF16_HOST_ENDIAN,
UTF16_LITTLE_ENDIAN,
UTF16_BIG_ENDIAN
};
/* nls.c */
extern int register_nls(struct nls_table *);
extern int unregister_nls(struct nls_table *);
extern struct nls_table *load_nls(char *);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
extern int utf8s_to_utf16s(const u8 *s, int len, wchar_t *pwcs);
extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
enum utf16_endian endian, u8 *s, int maxlen);
static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
{
unsigned char nc = t->charset2lower[c];
return nc ? nc : c;
}
static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c)
{
unsigned char nc = t->charset2upper[c];
return nc ? nc : c;
}
static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1,
const unsigned char *s2, int len)
{
while (len--) {
if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++))
return 1;
}
return 0;
}
/*
* nls_nullsize - return length of null character for codepage
* @codepage - codepage for which to return length of NULL terminator
*
* Since we can't guarantee that the null terminator will be a particular
* length, we have to check against the codepage. If there's a problem
* determining it, assume a single-byte NULL terminator.
*/
static inline int
nls_nullsize(const struct nls_table *codepage)
{
int charlen;
char tmp[NLS_MAX_CHARSET_SIZE];
charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE);
return charlen > 0 ? charlen : 1;
}
#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name))
#endif /* _LINUX_NLS_H */
|
#ifndef _LINUX_NLS_H
#define _LINUX_NLS_H
#include <linux/init.h>
/* Unicode has changed over the years. Unicode code points no longer
* fit into 16 bits; as of Unicode 5 valid code points range from 0
* to 0x10ffff (17 planes, where each plane holds 65536 code points).
*
* The original decision to represent Unicode characters as 16-bit
* wchar_t values is now outdated. But plane 0 still includes the
* most commonly used characters, so we will retain it. The newer
* 32-bit unicode_t type can be used when it is necessary to
* represent the full Unicode character set.
*/
/* Plane-0 Unicode character */
typedef u16 wchar_t;
#define MAX_WCHAR_T 0xffff
/* Arbitrary Unicode character */
typedef u32 unicode_t;
struct nls_table {
const char *charset;
const char *alias;
int (*uni2char) (wchar_t uni, unsigned char *out, int boundlen);
int (*char2uni) (const unsigned char *rawstring, int boundlen,
wchar_t *uni);
const unsigned char *charset2lower;
const unsigned char *charset2upper;
struct module *owner;
struct nls_table *next;
};
/* this value hold the maximum octet of charset */
#define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
/* Byte order for UTF-16 strings */
enum utf16_endian {
UTF16_HOST_ENDIAN,
UTF16_LITTLE_ENDIAN,
UTF16_BIG_ENDIAN
};
/* nls_base.c */
extern int register_nls(struct nls_table *);
extern int unregister_nls(struct nls_table *);
extern struct nls_table *load_nls(char *);
extern void unload_nls(struct nls_table *);
extern struct nls_table *load_nls_default(void);
extern int utf8_to_utf32(const u8 *s, int len, unicode_t *pu);
extern int utf32_to_utf8(unicode_t u, u8 *s, int maxlen);
extern int utf8s_to_utf16s(const u8 *s, int len,
enum utf16_endian endian, wchar_t *pwcs, int maxlen);
extern int utf16s_to_utf8s(const wchar_t *pwcs, int len,
enum utf16_endian endian, u8 *s, int maxlen);
static inline unsigned char nls_tolower(struct nls_table *t, unsigned char c)
{
unsigned char nc = t->charset2lower[c];
return nc ? nc : c;
}
static inline unsigned char nls_toupper(struct nls_table *t, unsigned char c)
{
unsigned char nc = t->charset2upper[c];
return nc ? nc : c;
}
static inline int nls_strnicmp(struct nls_table *t, const unsigned char *s1,
const unsigned char *s2, int len)
{
while (len--) {
if (nls_tolower(t, *s1++) != nls_tolower(t, *s2++))
return 1;
}
return 0;
}
/*
* nls_nullsize - return length of null character for codepage
* @codepage - codepage for which to return length of NULL terminator
*
* Since we can't guarantee that the null terminator will be a particular
* length, we have to check against the codepage. If there's a problem
* determining it, assume a single-byte NULL terminator.
*/
static inline int
nls_nullsize(const struct nls_table *codepage)
{
int charlen;
char tmp[NLS_MAX_CHARSET_SIZE];
charlen = codepage->uni2char(0, tmp, NLS_MAX_CHARSET_SIZE);
return charlen > 0 ? charlen : 1;
}
#define MODULE_ALIAS_NLS(name) MODULE_ALIAS("nls_" __stringify(name))
#endif /* _LINUX_NLS_H */
|
5590_3
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*-
* Copyright (c) 2002-2003 Brian Somers <brian@Awfulhak.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/lib/libradius/radlib_vs.h,v 1.2.2.1 2002/06/17 02:24:57 brian Exp $
*/
#ifndef _RADLIB_VS_H_
#define _RADLIB_VS_H_
#include <sys/types.h>
#ifndef PHP_WIN32
#include <netinet/in.h>
#endif
#include "radlib_compat.h"
#define RAD_VENDOR_MICROSOFT 311 /* rfc2548 */
#define RAD_MICROSOFT_MS_CHAP_RESPONSE 1
#define RAD_MICROSOFT_MS_CHAP_ERROR 2
#define RAD_MICROSOFT_MS_CHAP_PW_1 3
#define RAD_MICROSOFT_MS_CHAP_PW_2 4
#define RAD_MICROSOFT_MS_CHAP_LM_ENC_PW 5
#define RAD_MICROSOFT_MS_CHAP_NT_ENC_PW 6
#define RAD_MICROSOFT_MS_MPPE_ENCRYPTION_POLICY 7
#define RAD_MICROSOFT_MS_MPPE_ENCRYPTION_TYPES 8
#define RAD_MICROSOFT_MS_RAS_VENDOR 9
#define RAD_MICROSOFT_MS_CHAP_DOMAIN 10
#define RAD_MICROSOFT_MS_CHAP_CHALLENGE 11
#define RAD_MICROSOFT_MS_CHAP_MPPE_KEYS 12
#define RAD_MICROSOFT_MS_BAP_USAGE 13
#define RAD_MICROSOFT_MS_LINK_UTILIZATION_THRESHOLD 14
#define RAD_MICROSOFT_MS_LINK_DROP_TIME_LIMIT 15
#define RAD_MICROSOFT_MS_MPPE_SEND_KEY 16
#define RAD_MICROSOFT_MS_MPPE_RECV_KEY 17
#define RAD_MICROSOFT_MS_RAS_VERSION 18
#define RAD_MICROSOFT_MS_OLD_ARAP_PASSWORD 19
#define RAD_MICROSOFT_MS_NEW_ARAP_PASSWORD 20
#define RAD_MICROSOFT_MS_ARAP_PASSWORD_CHANGE_REASON 21
#define RAD_MICROSOFT_MS_FILTER 22
#define RAD_MICROSOFT_MS_ACCT_AUTH_TYPE 23
#define RAD_MICROSOFT_MS_ACCT_EAP_TYPE 24
#define RAD_MICROSOFT_MS_CHAP2_RESPONSE 25
#define RAD_MICROSOFT_MS_CHAP2_SUCCESS 26
#define RAD_MICROSOFT_MS_CHAP2_PW 27
#define RAD_MICROSOFT_MS_PRIMARY_DNS_SERVER 28
#define RAD_MICROSOFT_MS_SECONDARY_DNS_SERVER 29
#define RAD_MICROSOFT_MS_PRIMARY_NBNS_SERVER 30
#define RAD_MICROSOFT_MS_SECONDARY_NBNS_SERVER 31
#define RAD_MICROSOFT_MS_ARAP_CHALLENGE 33
#define SALT_LEN 2
struct rad_handle;
int rad_get_vendor_attr(u_int32_t *, const void **, size_t *);
int rad_put_vendor_addr(struct rad_handle *, int, int, struct in_addr);
int rad_put_vendor_attr(struct rad_handle *, int, int, const void *,
size_t);
int rad_put_vendor_int(struct rad_handle *, int, int, u_int32_t);
int rad_put_vendor_string(struct rad_handle *, int, int, const char *);
int rad_demangle_mppe_key(struct rad_handle *, const void *, size_t, u_char *, size_t *);
#endif /* _RADLIB_VS_H_ */
/* vim: set ts=8 sw=8 noet: */
|
/*-
* Copyright (c) 2002-2003 Brian Somers <brian@Awfulhak.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD: src/lib/libradius/radlib_vs.h,v 1.2.2.1 2002/06/17 02:24:57 brian Exp $
*/
#ifndef _RADLIB_VS_H_
#define _RADLIB_VS_H_
#include <sys/types.h>
#ifndef PHP_WIN32
#include <netinet/in.h>
#endif
#include "radlib_compat.h"
#define RAD_VENDOR_MICROSOFT 311 /* rfc2548 */
#define RAD_MICROSOFT_MS_CHAP_RESPONSE 1
#define RAD_MICROSOFT_MS_CHAP_ERROR 2
#define RAD_MICROSOFT_MS_CHAP_PW_1 3
#define RAD_MICROSOFT_MS_CHAP_PW_2 4
#define RAD_MICROSOFT_MS_CHAP_LM_ENC_PW 5
#define RAD_MICROSOFT_MS_CHAP_NT_ENC_PW 6
#define RAD_MICROSOFT_MS_MPPE_ENCRYPTION_POLICY 7
#define RAD_MICROSOFT_MS_MPPE_ENCRYPTION_TYPES 8
#define RAD_MICROSOFT_MS_RAS_VENDOR 9
#define RAD_MICROSOFT_MS_CHAP_DOMAIN 10
#define RAD_MICROSOFT_MS_CHAP_CHALLENGE 11
#define RAD_MICROSOFT_MS_CHAP_MPPE_KEYS 12
#define RAD_MICROSOFT_MS_BAP_USAGE 13
#define RAD_MICROSOFT_MS_LINK_UTILIZATION_THRESHOLD 14
#define RAD_MICROSOFT_MS_LINK_DROP_TIME_LIMIT 15
#define RAD_MICROSOFT_MS_MPPE_SEND_KEY 16
#define RAD_MICROSOFT_MS_MPPE_RECV_KEY 17
#define RAD_MICROSOFT_MS_RAS_VERSION 18
#define RAD_MICROSOFT_MS_OLD_ARAP_PASSWORD 19
#define RAD_MICROSOFT_MS_NEW_ARAP_PASSWORD 20
#define RAD_MICROSOFT_MS_ARAP_PASSWORD_CHANGE_REASON 21
#define RAD_MICROSOFT_MS_FILTER 22
#define RAD_MICROSOFT_MS_ACCT_AUTH_TYPE 23
#define RAD_MICROSOFT_MS_ACCT_EAP_TYPE 24
#define RAD_MICROSOFT_MS_CHAP2_RESPONSE 25
#define RAD_MICROSOFT_MS_CHAP2_SUCCESS 26
#define RAD_MICROSOFT_MS_CHAP2_PW 27
#define RAD_MICROSOFT_MS_PRIMARY_DNS_SERVER 28
#define RAD_MICROSOFT_MS_SECONDARY_DNS_SERVER 29
#define RAD_MICROSOFT_MS_PRIMARY_NBNS_SERVER 30
#define RAD_MICROSOFT_MS_SECONDARY_NBNS_SERVER 31
#define RAD_MICROSOFT_MS_ARAP_CHALLENGE 33
#define SALT_LEN 2
struct rad_handle;
int rad_get_vendor_attr(u_int32_t *, unsigned char *, const void **, size_t *, const void *, size_t);
int rad_put_vendor_addr(struct rad_handle *, int, int, struct in_addr);
int rad_put_vendor_attr(struct rad_handle *, int, int, const void *,
size_t);
int rad_put_vendor_int(struct rad_handle *, int, int, u_int32_t);
int rad_put_vendor_string(struct rad_handle *, int, int, const char *);
int rad_demangle_mppe_key(struct rad_handle *, const void *, size_t, u_char *, size_t *);
#endif /* _RADLIB_VS_H_ */
/* vim: set ts=8 sw=8 noet: */
|
5658_2
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
struct iscsi_extra_response {
char key[64];
char value[32];
struct list_head er_list;
} ____cacheline_aligned;
struct iscsi_param {
char *name;
char *value;
u8 set_param;
u8 phase;
u8 scope;
u8 sender;
u8 type;
u8 use;
u16 type_range;
u32 state;
struct list_head p_list;
} ____cacheline_aligned;
extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
extern void iscsi_print_params(struct iscsi_param_list *);
extern int iscsi_create_default_params(struct iscsi_param_list **);
extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
extern int iscsi_copy_param_list(struct iscsi_param_list **,
struct iscsi_param_list *, int);
extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
extern void iscsi_release_param_list(struct iscsi_param_list *);
extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
struct iscsi_param_list *);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
struct iscsi_param_list *, int);
#define YES "Yes"
#define NO "No"
#define ALL "All"
#define IRRELEVANT "Irrelevant"
#define NONE "None"
#define NOTUNDERSTOOD "NotUnderstood"
#define REJECT "Reject"
/*
* The Parameter Names.
*/
#define AUTHMETHOD "AuthMethod"
#define HEADERDIGEST "HeaderDigest"
#define DATADIGEST "DataDigest"
#define MAXCONNECTIONS "MaxConnections"
#define SENDTARGETS "SendTargets"
#define TARGETNAME "TargetName"
#define INITIATORNAME "InitiatorName"
#define TARGETALIAS "TargetAlias"
#define INITIATORALIAS "InitiatorAlias"
#define TARGETADDRESS "TargetAddress"
#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
#define INITIALR2T "InitialR2T"
#define IMMEDIATEDATA "ImmediateData"
#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
#define MAXXMITDATASEGMENTLENGTH "MaxXmitDataSegmentLength"
#define MAXBURSTLENGTH "MaxBurstLength"
#define FIRSTBURSTLENGTH "FirstBurstLength"
#define DEFAULTTIME2WAIT "DefaultTime2Wait"
#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
#define DATAPDUINORDER "DataPDUInOrder"
#define DATASEQUENCEINORDER "DataSequenceInOrder"
#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
#define SESSIONTYPE "SessionType"
#define IFMARKER "IFMarker"
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
#define X_EXTENSIONKEY "X-com.sbei.version"
#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
*/
#define RDMAEXTENSIONS "RDMAExtensions"
#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
/*
* For AuthMethod.
*/
#define KRB5 "KRB5"
#define SPKM1 "SPKM1"
#define SPKM2 "SPKM2"
#define SRP "SRP"
#define CHAP "CHAP"
/*
* Initial values for Parameter Negotiation.
*/
#define INITIAL_AUTHMETHOD CHAP
#define INITIAL_HEADERDIGEST "CRC32C,None"
#define INITIAL_DATADIGEST "CRC32C,None"
#define INITIAL_MAXCONNECTIONS "1"
#define INITIAL_SENDTARGETS ALL
#define INITIAL_TARGETNAME "LIO.Target"
#define INITIAL_INITIATORNAME "LIO.Initiator"
#define INITIAL_TARGETALIAS "LIO Target"
#define INITIAL_INITIATORALIAS "LIO Initiator"
#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
#define INITIAL_TARGETPORTALGROUPTAG "1"
#define INITIAL_INITIALR2T YES
#define INITIAL_IMMEDIATEDATA YES
#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
/*
* Match outgoing MXDSL default to incoming Open-iSCSI default
*/
#define INITIAL_MAXXMITDATASEGMENTLENGTH "262144"
#define INITIAL_MAXBURSTLENGTH "262144"
#define INITIAL_FIRSTBURSTLENGTH "65536"
#define INITIAL_DEFAULTTIME2WAIT "2"
#define INITIAL_DEFAULTTIME2RETAIN "20"
#define INITIAL_MAXOUTSTANDINGR2T "1"
#define INITIAL_DATAPDUINORDER YES
#define INITIAL_DATASEQUENCEINORDER YES
#define INITIAL_ERRORRECOVERYLEVEL "0"
#define INITIAL_SESSIONTYPE NORMAL
#define INITIAL_IFMARKER NO
#define INITIAL_OFMARKER NO
#define INITIAL_IFMARKINT "2048~65535"
#define INITIAL_OFMARKINT "2048~65535"
/*
* Initial values for iSER parameters following RFC-5046 Section 6
*/
#define INITIAL_RDMAEXTENSIONS NO
#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
/*
* For [Header,Data]Digests.
*/
#define CRC32C "CRC32C"
/*
* For SessionType.
*/
#define DISCOVERY "Discovery"
#define NORMAL "Normal"
/*
* struct iscsi_param->use
*/
#define USE_LEADING_ONLY 0x01
#define USE_INITIAL_ONLY 0x02
#define USE_ALL 0x04
#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
#define IS_USE_ALL(p) ((p)->use & USE_ALL)
#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
/*
* struct iscsi_param->sender
*/
#define SENDER_INITIATOR 0x01
#define SENDER_TARGET 0x02
#define SENDER_BOTH 0x03
/* Used in iscsi_check_key() */
#define SENDER_RECEIVER 0x04
#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
/*
* struct iscsi_param->scope
*/
#define SCOPE_CONNECTION_ONLY 0x01
#define SCOPE_SESSION_WIDE 0x02
#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
/*
* struct iscsi_param->phase
*/
#define PHASE_SECURITY 0x01
#define PHASE_OPERATIONAL 0x02
#define PHASE_DECLARATIVE 0x04
#define PHASE_FFP0 0x08
#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
/*
* struct iscsi_param->type
*/
#define TYPE_BOOL_AND 0x01
#define TYPE_BOOL_OR 0x02
#define TYPE_NUMBER 0x04
#define TYPE_NUMBER_RANGE 0x08
#define TYPE_STRING 0x10
#define TYPE_VALUE_LIST 0x20
#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
/*
* struct iscsi_param->type_range
*/
#define TYPERANGE_BOOL_AND 0x0001
#define TYPERANGE_BOOL_OR 0x0002
#define TYPERANGE_0_TO_2 0x0004
#define TYPERANGE_0_TO_3600 0x0008
#define TYPERANGE_0_TO_32767 0x0010
#define TYPERANGE_0_TO_65535 0x0020
#define TYPERANGE_1_TO_65535 0x0040
#define TYPERANGE_2_TO_3600 0x0080
#define TYPERANGE_512_TO_16777215 0x0100
#define TYPERANGE_AUTH 0x0200
#define TYPERANGE_DIGEST 0x0400
#define TYPERANGE_ISCSINAME 0x0800
#define TYPERANGE_MARKINT 0x1000
#define TYPERANGE_SESSIONTYPE 0x2000
#define TYPERANGE_TARGETADDRESS 0x4000
#define TYPERANGE_UTF8 0x8000
#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
TYPERANGE_512_TO_16777215)
#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
TYPERANGE_SESSIONTYPE)
/*
* struct iscsi_param->state
*/
#define PSTATE_ACCEPTOR 0x01
#define PSTATE_NEGOTIATE 0x02
#define PSTATE_PROPOSER 0x04
#define PSTATE_IRRELEVANT 0x08
#define PSTATE_REJECT 0x10
#define PSTATE_REPLY_OPTIONAL 0x20
#define PSTATE_RESPONSE_GOT 0x40
#define PSTATE_RESPONSE_SENT 0x80
#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
#endif /* ISCSI_PARAMETERS_H */
|
#ifndef ISCSI_PARAMETERS_H
#define ISCSI_PARAMETERS_H
#include <scsi/iscsi_proto.h>
struct iscsi_extra_response {
char key[KEY_MAXLEN];
char value[32];
struct list_head er_list;
} ____cacheline_aligned;
struct iscsi_param {
char *name;
char *value;
u8 set_param;
u8 phase;
u8 scope;
u8 sender;
u8 type;
u8 use;
u16 type_range;
u32 state;
struct list_head p_list;
} ____cacheline_aligned;
extern int iscsi_login_rx_data(struct iscsi_conn *, char *, int);
extern int iscsi_login_tx_data(struct iscsi_conn *, char *, char *, int);
extern void iscsi_dump_conn_ops(struct iscsi_conn_ops *);
extern void iscsi_dump_sess_ops(struct iscsi_sess_ops *);
extern void iscsi_print_params(struct iscsi_param_list *);
extern int iscsi_create_default_params(struct iscsi_param_list **);
extern int iscsi_set_keys_to_negotiate(struct iscsi_param_list *, bool);
extern int iscsi_set_keys_irrelevant_for_discovery(struct iscsi_param_list *);
extern int iscsi_copy_param_list(struct iscsi_param_list **,
struct iscsi_param_list *, int);
extern int iscsi_change_param_value(char *, struct iscsi_param_list *, int);
extern void iscsi_release_param_list(struct iscsi_param_list *);
extern struct iscsi_param *iscsi_find_param_from_key(char *, struct iscsi_param_list *);
extern int iscsi_extract_key_value(char *, char **, char **);
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
struct iscsi_param_list *);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
struct iscsi_param_list *, int);
#define YES "Yes"
#define NO "No"
#define ALL "All"
#define IRRELEVANT "Irrelevant"
#define NONE "None"
#define NOTUNDERSTOOD "NotUnderstood"
#define REJECT "Reject"
/*
* The Parameter Names.
*/
#define AUTHMETHOD "AuthMethod"
#define HEADERDIGEST "HeaderDigest"
#define DATADIGEST "DataDigest"
#define MAXCONNECTIONS "MaxConnections"
#define SENDTARGETS "SendTargets"
#define TARGETNAME "TargetName"
#define INITIATORNAME "InitiatorName"
#define TARGETALIAS "TargetAlias"
#define INITIATORALIAS "InitiatorAlias"
#define TARGETADDRESS "TargetAddress"
#define TARGETPORTALGROUPTAG "TargetPortalGroupTag"
#define INITIALR2T "InitialR2T"
#define IMMEDIATEDATA "ImmediateData"
#define MAXRECVDATASEGMENTLENGTH "MaxRecvDataSegmentLength"
#define MAXXMITDATASEGMENTLENGTH "MaxXmitDataSegmentLength"
#define MAXBURSTLENGTH "MaxBurstLength"
#define FIRSTBURSTLENGTH "FirstBurstLength"
#define DEFAULTTIME2WAIT "DefaultTime2Wait"
#define DEFAULTTIME2RETAIN "DefaultTime2Retain"
#define MAXOUTSTANDINGR2T "MaxOutstandingR2T"
#define DATAPDUINORDER "DataPDUInOrder"
#define DATASEQUENCEINORDER "DataSequenceInOrder"
#define ERRORRECOVERYLEVEL "ErrorRecoveryLevel"
#define SESSIONTYPE "SessionType"
#define IFMARKER "IFMarker"
#define OFMARKER "OFMarker"
#define IFMARKINT "IFMarkInt"
#define OFMARKINT "OFMarkInt"
#define X_EXTENSIONKEY "X-com.sbei.version"
#define X_EXTENSIONKEY_CISCO_NEW "X-com.cisco.protocol"
#define X_EXTENSIONKEY_CISCO_OLD "X-com.cisco.iscsi.draft"
/*
* Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
*/
#define RDMAEXTENSIONS "RDMAExtensions"
#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
/*
* For AuthMethod.
*/
#define KRB5 "KRB5"
#define SPKM1 "SPKM1"
#define SPKM2 "SPKM2"
#define SRP "SRP"
#define CHAP "CHAP"
/*
* Initial values for Parameter Negotiation.
*/
#define INITIAL_AUTHMETHOD CHAP
#define INITIAL_HEADERDIGEST "CRC32C,None"
#define INITIAL_DATADIGEST "CRC32C,None"
#define INITIAL_MAXCONNECTIONS "1"
#define INITIAL_SENDTARGETS ALL
#define INITIAL_TARGETNAME "LIO.Target"
#define INITIAL_INITIATORNAME "LIO.Initiator"
#define INITIAL_TARGETALIAS "LIO Target"
#define INITIAL_INITIATORALIAS "LIO Initiator"
#define INITIAL_TARGETADDRESS "0.0.0.0:0000,0"
#define INITIAL_TARGETPORTALGROUPTAG "1"
#define INITIAL_INITIALR2T YES
#define INITIAL_IMMEDIATEDATA YES
#define INITIAL_MAXRECVDATASEGMENTLENGTH "8192"
/*
* Match outgoing MXDSL default to incoming Open-iSCSI default
*/
#define INITIAL_MAXXMITDATASEGMENTLENGTH "262144"
#define INITIAL_MAXBURSTLENGTH "262144"
#define INITIAL_FIRSTBURSTLENGTH "65536"
#define INITIAL_DEFAULTTIME2WAIT "2"
#define INITIAL_DEFAULTTIME2RETAIN "20"
#define INITIAL_MAXOUTSTANDINGR2T "1"
#define INITIAL_DATAPDUINORDER YES
#define INITIAL_DATASEQUENCEINORDER YES
#define INITIAL_ERRORRECOVERYLEVEL "0"
#define INITIAL_SESSIONTYPE NORMAL
#define INITIAL_IFMARKER NO
#define INITIAL_OFMARKER NO
#define INITIAL_IFMARKINT "2048~65535"
#define INITIAL_OFMARKINT "2048~65535"
/*
* Initial values for iSER parameters following RFC-5046 Section 6
*/
#define INITIAL_RDMAEXTENSIONS NO
#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
/*
* For [Header,Data]Digests.
*/
#define CRC32C "CRC32C"
/*
* For SessionType.
*/
#define DISCOVERY "Discovery"
#define NORMAL "Normal"
/*
* struct iscsi_param->use
*/
#define USE_LEADING_ONLY 0x01
#define USE_INITIAL_ONLY 0x02
#define USE_ALL 0x04
#define IS_USE_LEADING_ONLY(p) ((p)->use & USE_LEADING_ONLY)
#define IS_USE_INITIAL_ONLY(p) ((p)->use & USE_INITIAL_ONLY)
#define IS_USE_ALL(p) ((p)->use & USE_ALL)
#define SET_USE_INITIAL_ONLY(p) ((p)->use |= USE_INITIAL_ONLY)
/*
* struct iscsi_param->sender
*/
#define SENDER_INITIATOR 0x01
#define SENDER_TARGET 0x02
#define SENDER_BOTH 0x03
/* Used in iscsi_check_key() */
#define SENDER_RECEIVER 0x04
#define IS_SENDER_INITIATOR(p) ((p)->sender & SENDER_INITIATOR)
#define IS_SENDER_TARGET(p) ((p)->sender & SENDER_TARGET)
#define IS_SENDER_BOTH(p) ((p)->sender & SENDER_BOTH)
/*
* struct iscsi_param->scope
*/
#define SCOPE_CONNECTION_ONLY 0x01
#define SCOPE_SESSION_WIDE 0x02
#define IS_SCOPE_CONNECTION_ONLY(p) ((p)->scope & SCOPE_CONNECTION_ONLY)
#define IS_SCOPE_SESSION_WIDE(p) ((p)->scope & SCOPE_SESSION_WIDE)
/*
* struct iscsi_param->phase
*/
#define PHASE_SECURITY 0x01
#define PHASE_OPERATIONAL 0x02
#define PHASE_DECLARATIVE 0x04
#define PHASE_FFP0 0x08
#define IS_PHASE_SECURITY(p) ((p)->phase & PHASE_SECURITY)
#define IS_PHASE_OPERATIONAL(p) ((p)->phase & PHASE_OPERATIONAL)
#define IS_PHASE_DECLARATIVE(p) ((p)->phase & PHASE_DECLARATIVE)
#define IS_PHASE_FFP0(p) ((p)->phase & PHASE_FFP0)
/*
* struct iscsi_param->type
*/
#define TYPE_BOOL_AND 0x01
#define TYPE_BOOL_OR 0x02
#define TYPE_NUMBER 0x04
#define TYPE_NUMBER_RANGE 0x08
#define TYPE_STRING 0x10
#define TYPE_VALUE_LIST 0x20
#define IS_TYPE_BOOL_AND(p) ((p)->type & TYPE_BOOL_AND)
#define IS_TYPE_BOOL_OR(p) ((p)->type & TYPE_BOOL_OR)
#define IS_TYPE_NUMBER(p) ((p)->type & TYPE_NUMBER)
#define IS_TYPE_NUMBER_RANGE(p) ((p)->type & TYPE_NUMBER_RANGE)
#define IS_TYPE_STRING(p) ((p)->type & TYPE_STRING)
#define IS_TYPE_VALUE_LIST(p) ((p)->type & TYPE_VALUE_LIST)
/*
* struct iscsi_param->type_range
*/
#define TYPERANGE_BOOL_AND 0x0001
#define TYPERANGE_BOOL_OR 0x0002
#define TYPERANGE_0_TO_2 0x0004
#define TYPERANGE_0_TO_3600 0x0008
#define TYPERANGE_0_TO_32767 0x0010
#define TYPERANGE_0_TO_65535 0x0020
#define TYPERANGE_1_TO_65535 0x0040
#define TYPERANGE_2_TO_3600 0x0080
#define TYPERANGE_512_TO_16777215 0x0100
#define TYPERANGE_AUTH 0x0200
#define TYPERANGE_DIGEST 0x0400
#define TYPERANGE_ISCSINAME 0x0800
#define TYPERANGE_MARKINT 0x1000
#define TYPERANGE_SESSIONTYPE 0x2000
#define TYPERANGE_TARGETADDRESS 0x4000
#define TYPERANGE_UTF8 0x8000
#define IS_TYPERANGE_0_TO_2(p) ((p)->type_range & TYPERANGE_0_TO_2)
#define IS_TYPERANGE_0_TO_3600(p) ((p)->type_range & TYPERANGE_0_TO_3600)
#define IS_TYPERANGE_0_TO_32767(p) ((p)->type_range & TYPERANGE_0_TO_32767)
#define IS_TYPERANGE_0_TO_65535(p) ((p)->type_range & TYPERANGE_0_TO_65535)
#define IS_TYPERANGE_1_TO_65535(p) ((p)->type_range & TYPERANGE_1_TO_65535)
#define IS_TYPERANGE_2_TO_3600(p) ((p)->type_range & TYPERANGE_2_TO_3600)
#define IS_TYPERANGE_512_TO_16777215(p) ((p)->type_range & \
TYPERANGE_512_TO_16777215)
#define IS_TYPERANGE_AUTH_PARAM(p) ((p)->type_range & TYPERANGE_AUTH)
#define IS_TYPERANGE_DIGEST_PARAM(p) ((p)->type_range & TYPERANGE_DIGEST)
#define IS_TYPERANGE_SESSIONTYPE(p) ((p)->type_range & \
TYPERANGE_SESSIONTYPE)
/*
* struct iscsi_param->state
*/
#define PSTATE_ACCEPTOR 0x01
#define PSTATE_NEGOTIATE 0x02
#define PSTATE_PROPOSER 0x04
#define PSTATE_IRRELEVANT 0x08
#define PSTATE_REJECT 0x10
#define PSTATE_REPLY_OPTIONAL 0x20
#define PSTATE_RESPONSE_GOT 0x40
#define PSTATE_RESPONSE_SENT 0x80
#define IS_PSTATE_ACCEPTOR(p) ((p)->state & PSTATE_ACCEPTOR)
#define IS_PSTATE_NEGOTIATE(p) ((p)->state & PSTATE_NEGOTIATE)
#define IS_PSTATE_PROPOSER(p) ((p)->state & PSTATE_PROPOSER)
#define IS_PSTATE_IRRELEVANT(p) ((p)->state & PSTATE_IRRELEVANT)
#define IS_PSTATE_REJECT(p) ((p)->state & PSTATE_REJECT)
#define IS_PSTATE_REPLY_OPTIONAL(p) ((p)->state & PSTATE_REPLY_OPTIONAL)
#define IS_PSTATE_RESPONSE_GOT(p) ((p)->state & PSTATE_RESPONSE_GOT)
#define IS_PSTATE_RESPONSE_SENT(p) ((p)->state & PSTATE_RESPONSE_SENT)
#define SET_PSTATE_ACCEPTOR(p) ((p)->state |= PSTATE_ACCEPTOR)
#define SET_PSTATE_NEGOTIATE(p) ((p)->state |= PSTATE_NEGOTIATE)
#define SET_PSTATE_PROPOSER(p) ((p)->state |= PSTATE_PROPOSER)
#define SET_PSTATE_IRRELEVANT(p) ((p)->state |= PSTATE_IRRELEVANT)
#define SET_PSTATE_REJECT(p) ((p)->state |= PSTATE_REJECT)
#define SET_PSTATE_REPLY_OPTIONAL(p) ((p)->state |= PSTATE_REPLY_OPTIONAL)
#define SET_PSTATE_RESPONSE_GOT(p) ((p)->state |= PSTATE_RESPONSE_GOT)
#define SET_PSTATE_RESPONSE_SENT(p) ((p)->state |= PSTATE_RESPONSE_SENT)
#endif /* ISCSI_PARAMETERS_H */
|
5675_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
#include <uapi/linux/sched.h>
#include <linux/sched/prio.h>
struct sched_param {
int sched_priority;
};
#include <asm/param.h> /* for HZ */
#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/plist.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
#include <linux/preempt.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <linux/cputime.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/shm.h>
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/rtmutex.h>
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
#include <linux/cred.h>
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/magic.h>
#include <linux/cgroup-defs.h>
#include <asm/processor.h>
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
/*
* Extended scheduling parameters data structure.
*
* This is needed because the original struct sched_param can not be
* altered without introducing ABI issues with legacy applications
* (e.g., in sched_getparam()).
*
* However, the possibility of specifying more than just a priority for
* the tasks may be useful for a wide variety of application fields, e.g.,
* multimedia, streaming, automation and control, and many others.
*
* This variant (sched_attr) is meant at describing a so-called
* sporadic time-constrained task. In such model a task is specified by:
* - the activation period or minimum instance inter-arrival time;
* - the maximum (or average, depending on the actual scheduling
* discipline) computation time of all instances, a.k.a. runtime;
* - the deadline (relative to the actual activation time) of each
* instance.
* Very briefly, a periodic (sporadic) task asks for the execution of
* some specific computation --which is typically called an instance--
* (at most) every period. Moreover, each instance typically lasts no more
* than the runtime and must be completed by time instant t equal to
* the instance activation time + the deadline.
*
* This is reflected by the actual fields of the sched_attr structure:
*
* @size size of the structure, for fwd/bwd compat.
*
* @sched_policy task's scheduling policy
* @sched_flags for customizing the scheduler behaviour
* @sched_nice task's nice value (SCHED_NORMAL/BATCH)
* @sched_priority task's static priority (SCHED_FIFO/RR)
* @sched_deadline representative of the task's deadline
* @sched_runtime representative of the task's runtime
* @sched_period representative of the task's period
*
* Given this task model, there are a multiplicity of scheduling algorithms
* and policies, that can be used to ensure all the tasks will make their
* timing constraints.
*
* As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
* only user of this new interface. More information about the algorithm
* available in the scheduling class file or in Documentation/.
*/
struct sched_attr {
u32 size;
u32 sched_policy;
u64 sched_flags;
/* SCHED_NORMAL, SCHED_BATCH */
s32 sched_nice;
/* SCHED_FIFO, SCHED_RR */
u32 sched_priority;
/* SCHED_DEADLINE */
u64 sched_runtime;
u64 sched_deadline;
u64 sched_period;
};
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct filename;
struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
/*
* These are the constant used to fake the fixed-point load-average
* counting. Some notes:
* - 11 bit fractions expand to 22 bits by the multiplies: this gives
* a load-average precision of 10 bits integer + 11 bits fractional
* - if you want to count load-averages more often, you need more
* precision, or rounding will get you. With 2-second counting freq,
* the EXP_n values would be 1981, 2034 and 2043 if still using only
* 11 bit fractions.
*/
extern unsigned long avenrun[]; /* Load averages */
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
#define EXP_5 2014 /* 1/exp(5sec/5min) */
#define EXP_15 2037 /* 1/exp(5sec/15min) */
#define CALC_LOAD(load,exp,n) \
load *= exp; \
load += n*(FIXED_1-exp); \
load >>= FSHIFT;
extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void update_cpu_load_nohz(void);
#else
static inline void update_cpu_load_nohz(void) { }
#endif
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
struct seq_file;
struct cfs_rq;
struct task_group;
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_DEAD 16
#define EXIT_ZOMBIE 32
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
#define TASK_STATE_MAX 2048
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
/* get_task_state() */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
#define __set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
(tsk)->state = (state_value); \
} while (0)
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (do_i_need_to_sleep())
* schedule();
*
* If the caller does not need such serialisation then use __set_current_state()
*/
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
current->state = (state_value); \
} while (0)
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
smp_store_mb(current->state, (state_value)); \
} while (0)
#else
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (do_i_need_to_sleep())
* schedule();
*
* If the caller does not need such serialisation then use __set_current_state()
*/
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
#endif
/* Task command name length */
#define TASK_COMM_LEN 16
#include <linux/spinlock.h>
/*
* This serializes "schedule()" and also protects
* the run-queue from deletions/modifications (but
* _adding_ to the beginning of the run-queue has
* a separate lock).
*/
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
struct task_struct;
#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
#endif
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
extern void show_state_filter(unsigned long state_filter);
static inline void show_state(void)
{
show_state_filter(0);
}
extern void show_regs(struct pt_regs *);
/*
* TASK is a pointer to the task whose backtrace we want to see (or NULL for current
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
extern void show_stack(struct task_struct *task, unsigned long *sp);
extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_LOCKUP_DETECTOR
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
#else
static inline void touch_softlockup_watchdog(void)
{
}
static inline void touch_softlockup_watchdog_sync(void)
{
}
static inline void touch_all_softlockup_watchdogs(void)
{
}
static inline void lockup_detector_init(void)
{
}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
static inline void reset_hung_task_detector(void)
{
}
#endif
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text")))
/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
extern long io_schedule_timeout(long timeout);
static inline void io_schedule(void)
{
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
struct nsproxy;
struct user_namespace;
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
#define SUID_DUMP_USER 1 /* Dump as user of process */
#define SUID_DUMP_ROOT 2 /* Dump as root */
/* mm flags */
/* for SUID_DUMP_* above */
#define MMF_DUMPABLE_BITS 2
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
extern void set_dumpable(struct mm_struct *mm, int value);
/*
* This returns the actual value of the suid_dumpable flag. For things
* that are using this for checking for privilege transitions, it must
* test against SUID_DUMP_USER rather than treating it as a boolean
* value.
*/
static inline int __get_dumpable(unsigned long mm_flags)
{
return mm_flags & MMF_DUMPABLE_MASK;
}
static inline int get_dumpable(struct mm_struct *mm)
{
return __get_dumpable(mm->flags);
}
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE 2
#define MMF_DUMP_ANON_SHARED 3
#define MMF_DUMP_MAPPED_PRIVATE 4
#define MMF_DUMP_MAPPED_SHARED 5
#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED 8
#define MMF_DUMP_DAX_PRIVATE 9
#define MMF_DUMP_DAX_SHARED 10
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
#define MMF_DUMP_FILTER_BITS 9
#define MMF_DUMP_FILTER_MASK \
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
(1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF 0
#endif
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
wait_queue_head_t signalfd_wqh;
};
struct pacct_struct {
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
cputime_t ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};
struct cpu_itimer {
cputime_t expires;
cputime_t incr;
u32 error;
u32 incr_error;
};
/**
* struct prev_cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
* @lock: protects the above two fields
*
* Stores previous user/system time values such that we can guarantee
* monotonicity.
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime;
cputime_t stime;
raw_spinlock_t lock;
#endif
};
static inline void prev_cputime_init(struct prev_cputime *prev)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
prev->utime = prev->stime = 0;
raw_spin_lock_init(&prev->lock);
#endif
}
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are tracked for
* threads and thread groups. Most things considering CPU time want to group
* these counts together and treat all three of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define virt_exp utime
#define prof_exp stime
#define sched_exp sum_exec_runtime
#define INIT_CPUTIME \
(struct task_cputime) { \
.utime = 0, \
.stime = 0, \
.sum_exec_runtime = 0, \
}
/*
* This is the atomic variant of task_cputime, which can be used for
* storing and updating task_cputime statistics without locking.
*/
struct task_cputime_atomic {
atomic64_t utime;
atomic64_t stime;
atomic64_t sum_exec_runtime;
};
#define INIT_CPUTIME_ATOMIC \
(struct task_cputime_atomic) { \
.utime = ATOMIC64_INIT(0), \
.stime = ATOMIC64_INIT(0), \
.sum_exec_runtime = ATOMIC64_INIT(0), \
}
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
* Disable preemption until the scheduler is running -- use an unconditional
* value so that it also works on !PREEMPT_COUNT kernels.
*
* Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
*/
#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
/*
* Initial preempt_count value; reflects the preempt_count schedule invariant
* which states that during context switches:
*
* preempt_count() == 2*PREEMPT_DISABLE_OFFSET
*
* Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
* Note: See finish_task_switch().
*/
#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers.
* @running: true when there are timers running and
* @cputime_atomic receives updates.
* @checking_timer: true when a thread in the group is in the
* process of checking for thread group timers.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
struct task_cputime_atomic cputime_atomic;
bool running;
bool checking_timer;
};
#include <linux/rwsem.h>
struct autogroup;
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
struct task_struct *curr_target;
/* shared signal handling: */
struct sigpending shared_pending;
/* thread group exit support */
int group_exit_code;
/* overloaded:
* - notify group_exit_task when ->count is equal to notify_count
* - everyone except group_exit_task is stopped during signal delivery
* of fatal signals, group_exit_task processes the signal.
*/
int notify_count;
struct task_struct *group_exit_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
* to this process instead of 'init'. The service manager is
* able to receive SIGCHLD signals and is able to investigate
* the process until it calls wait(). All children of this
* process will inherit a flag if they should look for a
* child_subreaper process at exit.
*/
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
/* POSIX.1b Interval Timers */
int posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
struct pid *leader_pid;
ktime_t it_real_incr;
/*
* ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
* CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
* values are defined to 0 and 1 respectively
*/
struct cpu_itimer it[2];
/*
* Thread group totals for process CPU timers.
* See thread_group_cputimer(), et al, for details.
*/
struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
/* boolean value for session group leader */
int leader;
struct tty_struct *tty; /* NULL if no tty */
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup *autogroup;
#endif
/*
* Cumulative resource counters for dead threads in the group,
* and for reaped dead child processes forked by this group.
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;
/*
* Cumulative ns of schedule CPU time fo dead threads in the
* group, not including a zombie group leader, (This only differs
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
unsigned long long sum_sched_runtime;
/*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
* alone is a single word that can safely be read normally.
* getrlimit/setrlimit use task_lock(current->group_leader) to
* protect this instead of the siglock, because they really
* have no need to disable irqs.
*/
struct rlimit rlim[RLIM_NLIMITS];
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
#ifdef CONFIG_AUDIT
unsigned audit_tty;
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
};
/*
* Bits in flags field of signal_struct.
*/
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
#define SIGNAL_CLD_STOPPED 0x00000010
#define SIGNAL_CLD_CONTINUED 0x00000020
#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
return (sig->flags & SIGNAL_GROUP_EXIT) ||
(sig->group_exit_task != NULL);
}
/*
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
atomic_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_INOTIFY_USER
atomic_t inotify_watches; /* How many inotify watches does this user have? */
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif
#ifdef CONFIG_FANOTIFY
atomic_t fanotify_listeners;
#endif
#ifdef CONFIG_EPOLL
atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
#endif
#ifdef CONFIG_POSIX_MQUEUE
/* protected by mq_lock */
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
struct key *session_keyring; /* UID's default session keyring */
#endif
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
atomic_long_t locked_vm;
#endif
};
extern int uids_sysfs_init(void);
extern struct user_struct *find_user(kuid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
struct backing_dev_info;
struct reclaim_state;
#ifdef CONFIG_SCHED_INFO
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
};
#endif /* CONFIG_SCHED_INFO */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
spinlock_t lock;
unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
*
* struct timespec XXX_start, XXX_end;
* u64 XXX_delay;
* u32 XXX_count;
*
* Atomicity of updates to XXX_delay, XXX_count protected by
* single lock above (split into XXX_lock if contention is an issue).
*/
/*
* XXX_count is incremented on every XXX operation, the delay
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
u64 blkio_start; /* Shared by blkio, swapin */
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_delay; /* wait for swapin block io completion */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
u32 swapin_count; /* total count of the number of swapin block */
/* io operations performed */
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
u32 freepages_count; /* total count of memory reclaim */
};
#endif /* CONFIG_TASK_DELAY_ACCT */
static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
extern int delayacct_on;
return delayacct_on;
#else
return 0;
#endif
}
enum cpu_idle_type {
CPU_IDLE,
CPU_NOT_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};
/*
* Increase resolution of cpu_capacity calculations
*/
#define SCHED_CAPACITY_SHIFT 10
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
* Wake-queues are lists of tasks with a pending wakeup, whose
* callers have already marked the task as woken internally,
* and can thus carry on. A common use case is being able to
* do the wakeups once the corresponding user lock as been
* released.
*
* We hold reference to each task in the list across the wakeup,
* thus guaranteeing that the memory is still valid by the time
* the actual wakeups are performed in wake_up_q().
*
* One per task suffices, because there's never a need for a task to be
* in two wake queues simultaneously; it is forbidden to abandon a task
* in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
* already in a wake queue, the wakeup will happen soon and the second
* waker can just skip it.
*
* The WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
* called near the end of a function, where the fact that the queue is
* not used again will be easy to see by inspection.
*
* Note that this can cause spurious wakeups. schedule() callers
* must ensure the call is done inside a loop, confirming that the
* wakeup condition has in fact occurred.
*/
struct wake_q_node {
struct wake_q_node *next;
};
struct wake_q_head {
struct wake_q_node *first;
struct wake_q_node **lastp;
};
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
#define WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
return SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_NUMA
static inline int cpu_numa_flags(void)
{
return SD_NUMA;
}
#endif
struct sched_domain_attr {
int relax_domain_level;
};
#define SD_ATTR_INIT (struct sched_domain_attr) { \
.relax_domain_level = -1, \
}
extern int sched_domain_level_max;
struct sched_group;
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
unsigned int smt_gain;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
int level;
/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
u64 max_newidle_lb_cost;
unsigned long next_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
/* Active load balancing */
unsigned int alb_count;
unsigned int alb_failed;
unsigned int alb_pushed;
/* SD_BALANCE_EXEC stats */
unsigned int sbe_count;
unsigned int sbe_balanced;
unsigned int sbe_pushed;
/* SD_BALANCE_FORK stats */
unsigned int sbf_count;
unsigned int sbf_balanced;
unsigned int sbf_pushed;
/* try_to_wake_up() stats */
unsigned int ttwu_wake_remote;
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
#endif
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
union {
void *private; /* used during construction */
struct rcu_head rcu; /* used during destruction */
};
unsigned int span_weight;
/*
* Span of all CPUs in this domain.
*
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
unsigned long span[0];
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return to_cpumask(sd->span);
}
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
struct sched_group_capacity **__percpu sgc;
};
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
int flags;
int numa_level;
struct sd_data data;
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
};
extern void set_sched_topology(struct sched_domain_topology_level *tl);
extern void wake_up_if_idle(int cpu);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
#else
# define SD_INIT_NAME(type)
#endif
#else /* CONFIG_SMP */
struct sched_domain_attr;
static inline void
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
}
static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
return true;
}
#endif /* !CONFIG_SMP */
struct io_context; /* See blkdev.h */
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
extern void prefetch_stack(struct task_struct *t);
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
struct audit_context; /* See audit.c */
struct mempolicy;
struct pipe_inode_info;
struct uts_namespace;
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
/*
* The load_avg/util_avg accumulates an infinite geometric series.
* 1) load_avg factors frequency scaling into the amount of time that a
* sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
* aggregated such weights of all runnable and blocked sched_entities.
* 2) util_avg factors frequency and cpu scaling into the amount of time
* that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
* For cfs_rq, it is the aggregated such times of all runnable and
* blocked sched_entities.
* The 64 bit load_sum can:
* 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
* the highest weight (=88761) always runnable, we should not overflow
* 2) for entity, support any load.weight always runnable
*/
struct sched_avg {
u64 last_update_time, load_sum;
u32 util_sum, period_contrib;
unsigned long load_avg, util_avg;
};
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 wait_sum;
u64 iowait_count;
u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
s64 sum_sleep_runtime;
u64 block_start;
u64 block_max;
u64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
u64 nr_wakeups_migrate;
u64 nr_wakeups_local;
u64 nr_wakeups_remote;
u64 nr_wakeups_affine;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
};
#endif
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;
u64 nr_migrations;
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
/* rq on which this entity is (to be) queued: */
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
#endif
#ifdef CONFIG_SMP
/* Per entity load average tracking */
struct sched_avg avg;
#endif
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */
struct rt_rq *rt_rq;
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
};
struct sched_dl_entity {
struct rb_node rb_node;
/*
* Original scheduling parameters. Copied here from sched_attr
* during sched_setattr(), they will remain the same until
* the next sched_setattr().
*/
u64 dl_runtime; /* maximum runtime for each instance */
u64 dl_deadline; /* relative deadline of each instance */
u64 dl_period; /* separation of two instances (period) */
u64 dl_bw; /* dl_runtime / dl_deadline */
/*
* Actual scheduling parameters. Initialized with the values above,
* they are continously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* remaining runtime for this instance */
u64 deadline; /* absolute deadline for this instance */
unsigned int flags; /* specifying the scheduler behaviour */
/*
* Some bool flags:
*
* @dl_throttled tells if we exhausted the runtime. If so, the
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
* @dl_new tells if a new instance arrived. If so we must
* start executing it with full runtime and reset its absolute
* deadline;
*
* @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we
* exit the critical section);
*
* @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job.
*/
int dl_throttled, dl_new, dl_boosted, dl_yielded;
/*
* Bandwidth enforcement timer. Each -deadline task has its
* own bandwidth to be enforced, thus we need one timer per task.
*/
struct hrtimer dl_timer;
};
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
u8 exp_need_qs;
u8 pad; /* Otherwise the compiler can store garbage here. */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
struct rcu_node;
enum perf_event_task_context {
perf_invalid_context = -1,
perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
/*
* Each bit set is a CPU that potentially has a TLB entry for one of
* the PFNs being flushed. See set_tlb_ubc_flush_pending().
*/
struct cpumask cpumask;
/* True if any bit in cpumask is set */
bool flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
* flushed before IO is initiated or a stale TLB entry potentially
* allows an update without redirtying the page.
*/
bool writable;
};
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
int wake_cpu;
#endif
int on_rq;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
struct sched_dl_entity dl;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
#endif
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
bool rcu_tasks_holdout;
struct list_head rcu_tasks_holdout_list;
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_SCHED_INFO
struct sched_info sched_info;
#endif
struct list_head tasks;
#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
#endif
struct mm_struct *mm, *active_mm;
/* per-thread vma caching */
u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
/* task state */
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
unsigned long jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */
unsigned int personality;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
unsigned in_iowait:1;
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
#endif
#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
struct restart_block restart_block;
pid_t pid;
pid_t tgid;
#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
struct task_struct __rcu *real_parent; /* real parent process */
struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
/*
* ptraced is the list of tasks this task is using ptrace on.
* This includes both natural children and PTRACE_ATTACH targets.
* p->ptrace_entry is p's link on the p->parent->ptraced list.
*/
struct list_head ptraced;
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
VTIME_USER,
VTIME_SYS,
} vtime_snap_whence;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
u64 real_start_time; /* boot based time in nsec */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
struct sysv_shm sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
unsigned long last_switch_count;
#endif
/* filesystem information */
struct fs_struct *fs;
/* open file information */
struct files_struct *files;
/* namespaces */
struct nsproxy *nsproxy;
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
struct callback_head *task_works;
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
kuid_t loginuid;
unsigned int sessionid;
#endif
struct seccomp seccomp;
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
* mempolicy */
spinlock_t alloc_lock;
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
int hardirqs_enabled;
int hardirq_context;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
/* journalling filesystem info */
void *journal_info;
/* stacked block device info */
struct bio_list *bio_list;
#ifdef CONFIG_BLOCK
/* stack plugging */
struct blk_plug *plug;
#endif
/* VM state */
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
unsigned long numa_migrate_retry;
u64 node_stamp; /* migration stamp */
u64 last_task_numa_placement;
u64 last_sum_exec_runtime;
struct callback_head numa_work;
struct list_head numa_entry;
struct numa_group *numa_group;
/*
* numa_faults is an array split into four regions:
* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
* in this precise order.
*
* faults_memory: Exponential decaying average of faults on a per-node
* basis. Scheduling placement decisions are made based on these
* counts. The values remain static for the duration of a PTE scan.
* faults_cpu: Track the nodes the process was running on when a NUMA
* hinting fault was incurred.
* faults_memory_buffer and faults_cpu_buffer: Record faults per node
* during the current scan window. When the scan completes, the counts
* in faults_memory and faults_cpu decay and these values are copied.
*/
unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local or failed to migrate. The task scan
* period is adapted based on the locality of the faults with different
* weights depending on whether they were shared or private faults
*/
unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
struct tlbflush_unmap_batch tlb_ubc;
#endif
struct rcu_head rcu;
/*
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause
*/
int nr_dirtied;
int nr_dirtied_pause;
unsigned long dirty_paused_when; /* start of a write-and-pause period */
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
/*
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
/* time stamp for last schedule */
unsigned long long ftrace_timestamp;
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg_in_oom;
gfp_t memcg_oom_gfp_mask;
int memcg_oom_order;
/* number of pages to reclaim on returning to userland */
unsigned int memcg_nr_pages_over_high;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
int pagefault_disabled;
/* CPU-specific state of this task */
struct thread_struct thread;
/*
* WARNING: on x86, 'thread_struct' contains a variable-sized
* structure. It *MUST* be at the end of 'task_struct'.
*
* Do not put anything below here!
*/
};
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
{
}
static inline pid_t task_numa_group_id(struct task_struct *p)
{
return 0;
}
static inline void set_numabalancing_state(bool enabled)
{
}
static inline void task_numa_free(struct task_struct *p)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
struct page *page, int src_nid, int dst_cpu)
{
return true;
}
#endif
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
static inline struct pid *task_tgid(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PID].pid;
}
/*
* Without tasklist or rcu lock it is not safe to dereference
* the result of task_pgrp/task_session even if task == current,
* we can race with another thread doing sys_setsid/sys_setpgid.
*/
static inline struct pid *task_pgrp(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
}
static inline struct pid *task_session(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_SID].pid;
}
struct pid_namespace;
/*
* the helpers to get the task's different pids as they are seen
* from various namespaces
*
* task_xid_nr() : global id, i.e. the id seen from the init namespace;
* task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
* current.
* task_xid_nr_ns() : id seen from the ns specified;
*
* set_task_vxid() : assigns a virtual id to a task;
*
* see also pid_nr() etc in include/linux/pid.h
*/
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
}
static inline pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return pid_vnr(task_tgid(tsk));
}
static inline int pid_alive(const struct task_struct *p);
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
*
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
/**
* is_global_init - check if a task structure is init
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
return tsk->pid == 1;
}
extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
extern void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
if (utime)
*utime = t->utime;
if (stime)
*stime = t->stime;
}
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
if (utimescaled)
*utimescaled = t->utimescaled;
if (stimescaled)
*stimescaled = t->stimescaled;
}
static inline cputime_t task_gtime(struct task_struct *t)
{
return t->gtime;
}
#endif
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
/*
* Per process flags
*/
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
#define PF_SIGNALED 0x00000400 /* killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
/*
* Only the _current_ task can read/write to tsk->flags, but other
* tasks can access tsk->flags in readonly mode for example
* with tsk_used_math (like during threaded core dumping).
* There is however an exception to this rule during ptrace
* or during fork: the ptracer task is allowed to write to the
* child->flags of its traced child (same goes for fork, the parent
* can write to the child->flags), because we're guaranteed the
* child is not running and in turn not changing child->flags
* at the same time the parent does it.
*/
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
* __GFP_FS is also cleared as it implies __GFP_IO.
*/
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
current->flags |= PF_MEMALLOC_NOIO;
return flags;
}
static inline void memalloc_noio_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func) \
static inline void task_set_##func(struct task_struct *p) \
{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func) \
static inline void task_clear_##func(struct task_struct *p) \
{ clear_bit(PFA_##name, &p->atomic_flags); }
TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
/*
* task->jobctl flags
*/
#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
extern bool task_set_jobctl_pending(struct task_struct *task,
unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
unsigned long mask);
static inline void rcu_copy_process(struct task_struct *p)
{
#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
p->rcu_read_unlock_special.s = 0;
p->rcu_blocked_node = NULL;
INIT_LIST_HEAD(&p->rcu_node_entry);
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
p->rcu_tasks_holdout = false;
INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
p->rcu_tasks_idle_cpu = -1;
#endif /* #ifdef CONFIG_TASKS_RCU */
}
static inline void tsk_restore_flags(struct task_struct *task,
unsigned long orig_flags, unsigned long flags)
{
task->flags &= ~flags;
task->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
{
}
static inline int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask)
{
if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
#endif
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Do not use outside of architecture code which knows its limitations.
*
* sched_clock() has no promise of monotonicity or bounded drift between
* CPUs, use (which you should not) requires disabling IRQs.
*
* Please use one of the three interfaces below.
*/
extern unsigned long long notrace sched_clock(void);
/*
* See the comment in kernel/sched/clock.c
*/
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
}
static inline void sched_clock_idle_sleep_event(void)
{
}
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
extern int sched_clock_stable(void);
extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* An i/f to runtime opt-in for irq time accounting based off of sched_clock.
* The reason for this explicit opt-in is not to have perf penalty with
* slow sched_clocks.
*/
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif
extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec() {}
#endif
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
extern void wake_up_nohz_cpu(int cpu);
#else
static inline void wake_up_nohz_cpu(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
#else
static inline bool sched_can_stop_tick(void) { return false; }
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
#endif
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*
* Return: The nice value [ -20 ... 0 ... 19 ].
*/
static inline int task_nice(const struct task_struct *p)
{
return PRIO_TO_NICE((p)->static_prio);
}
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int,
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
extern int sched_setattr(struct task_struct *,
const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
return p->pid == 0;
}
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
struct thread_info thread_info;
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
/* Reliable end of stack detection:
* Some APM bios versions misalign the stack
*/
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif
extern union thread_union init_thread_union;
extern struct task_struct init_task;
extern struct mm_struct init_mm;
extern struct pid_namespace init_pid_ns;
/*
* find a task by one of its numerical ids
*
* find_task_by_pid_ns():
* finds a task by its pid in the specified namespace
* find_task_by_vpid():
* finds a task by its virtual pid
*
* see also find_vpid() etc in include/linux/pid.h
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
/* per-UID process charging. */
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
atomic_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
#include <asm/current.h>
extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
static inline int kernel_dequeue_signal(siginfo_t *info)
{
struct task_struct *tsk = current;
siginfo_t __info;
int ret;
spin_lock_irq(&tsk->sighand->siglock);
ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
spin_unlock_irq(&tsk->sighand->siglock);
return ret;
}
static inline void kernel_signal_stop(void)
{
spin_lock_irq(¤t->sighand->siglock);
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
__set_current_state(TASK_STOPPED);
spin_unlock_irq(¤t->sighand->siglock);
schedule();
}
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
__set_current_blocked(¤t->saved_sigmask);
}
static inline sigset_t *sigmask_to_save(void)
{
sigset_t *res = ¤t->blocked;
if (unlikely(test_restore_sigmask()))
res = ¤t->saved_sigmask;
return res;
}
static inline int kill_cad_pid(int sig, int priv)
{
return kill_pid(cad_pid, sig, priv);
}
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
/*
* True if we are on the alternate signal stack.
*/
static inline int on_sig_stack(unsigned long sp)
{
#ifdef CONFIG_STACK_GROWSUP
return sp >= current->sas_ss_sp &&
sp - current->sas_ss_sp < current->sas_ss_size;
#else
return sp > current->sas_ss_sp &&
sp - current->sas_ss_sp <= current->sas_ss_size;
#endif
}
static inline int sas_ss_flags(unsigned long sp)
{
if (!current->sas_ss_size)
return SS_DISABLE;
return on_sig_stack(sp) ? SS_ONSTACK : 0;
}
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
#ifdef CONFIG_STACK_GROWSUP
return current->sas_ss_sp;
#else
return current->sas_ss_sp + current->sas_ss_size;
#endif
return sp;
}
/*
* Routines for handling mm_structs
*/
extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
* Grab a reference to a task's mm, if it is not already going away
* and ptrace_may_access with the mode parameter passed to it
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_HAVE_COPY_THREAD_TLS
extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
struct task_struct *, unsigned long);
#else
extern int copy_thread(unsigned long, unsigned long, unsigned long,
struct task_struct *);
/* Architectures that haven't opted into copy_thread_tls get the tls argument
* via pt_regs, so ignore the tls argument passed via C. */
static inline int copy_thread_tls(
unsigned long clone_flags, unsigned long sp, unsigned long arg,
struct task_struct *p, unsigned long tls)
{
return copy_thread(clone_flags, sp, arg, p);
}
#endif
extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
extern void do_group_exit(int);
extern int do_execve(struct filename *,
const char __user * const __user *,
const char __user * const __user *);
extern int do_execveat(int, struct filename *,
const char __user * const __user *,
const char __user * const __user *,
int);
extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
__set_task_comm(tsk, from, false);
}
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
void scheduler_ipi(void);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
static inline void scheduler_ipi(void) { }
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
return 1;
}
#endif
#define tasklist_empty() \
list_empty(&init_task.tasks)
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
extern bool current_is_single_threaded(void);
/*
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
*/
#define do_each_thread(g, t) \
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
/* Careful: this is a double loop, 'break' won't work as expected. */
#define for_each_process_thread(p, t) \
for_each_process(p) for_each_thread(p, t)
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
}
static inline bool thread_group_leader(struct task_struct *p)
{
return p->exit_signal >= 0;
}
/* Do to the insanities of de_thread it is possible for a process
* to have the pid of the thread group leader without actually being
* the thread group leader. For iteration through the pids in proc
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
static inline bool has_group_leader_pid(struct task_struct *p)
{
return task_pid(p) == p->signal->leader_pid;
}
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry_rcu(p->thread_group.next,
struct task_struct, thread_group);
}
static inline int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
* neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
{
spin_unlock(&p->alloc_lock);
}
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flags);
static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
struct sighand_struct *ret;
ret = __lock_task_sighand(tsk, flags);
(void)__cond_lock(&tsk->sighand->siglock, ret);
return ret;
}
static inline void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
/**
* threadgroup_change_begin - mark the beginning of changes to a threadgroup
* @tsk: task causing the changes
*
* All operations which modify a threadgroup - a new thread joining the
* group, death of a member thread (the assertion of PF_EXITING) and
* exec(2) dethreading the process and replacing the leader - are wrapped
* by threadgroup_change_{begin|end}(). This is to provide a place which
* subsystems needing threadgroup stability can hook into for
* synchronization.
*/
static inline void threadgroup_change_begin(struct task_struct *tsk)
{
might_sleep();
cgroup_threadgroup_change_begin(tsk);
}
/**
* threadgroup_change_end - mark the end of changes to a threadgroup
* @tsk: task causing the changes
*
* See threadgroup_change_begin().
*/
static inline void threadgroup_change_end(struct task_struct *tsk)
{
cgroup_threadgroup_change_end(tsk);
}
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
*task_thread_info(p) = *task_thread_info(org);
task_thread_info(p)->task = p;
}
/*
* Return the address of the last usable long on the stack.
*
* When the stack grows down, this is just above the thread
* info struct. Going any lower will corrupt the threadinfo.
*
* When the stack grows up, this is the highest address.
* Beyond that position, we corrupt data on the next page.
*/
static inline unsigned long *end_of_stack(struct task_struct *p)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
#else
return (unsigned long *)(task_thread_info(p) + 1);
#endif
}
#endif
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
static inline int object_is_on_stack(void *obj)
{
void *stack = task_stack_page(current);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
extern void thread_info_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
n++;
} while (!*n);
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
static inline int fatal_signal_pending(struct task_struct *p)
{
return signal_pending(p) && __fatal_signal_pending(p);
}
static inline int signal_pending_state(long state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
if (!signal_pending(p))
return 0;
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
extern int _cond_resched(void);
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock);
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
rcu_read_unlock();
cond_resched();
rcu_read_lock();
#endif
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPT
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Idle thread specific functions to determine the need_resched
* polling state.
*/
#ifdef TIF_POLLING_NRFLAG
static inline int tsk_is_polling(struct task_struct *p)
{
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
static inline void __current_set_polling(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
}
static inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
static inline void __current_clr_polling(void)
{
clear_thread_flag(TIF_POLLING_NRFLAG);
}
static inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }
static inline bool __must_check current_set_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
#endif
static inline void current_clr_polling(void)
{
__current_clr_polling();
/*
* Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
* Once the bit is cleared, we'll get IPIs with every new
* TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
* fold.
*/
smp_mb(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
}
/*
* Thread group CPU time accounting.
*/
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
static inline void signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
#ifdef CONFIG_SMP
static inline unsigned int task_cpu(const struct task_struct *p)
{
return task_thread_info(p)->cpu;
}
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}
#endif /* CONFIG_SMP */
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#ifdef CONFIG_CGROUP_SCHED
extern struct task_group root_task_group;
#endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
tsk->ioac.rchar += amt;
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
tsk->ioac.wchar += amt;
}
static inline void inc_syscr(struct task_struct *tsk)
{
tsk->ioac.syscr++;
}
static inline void inc_syscw(struct task_struct *tsk)
{
tsk->ioac.syscw++;
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline void inc_syscr(struct task_struct *tsk)
{
}
static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}
#endif /* CONFIG_MEMCG */
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
{
return task_rlimit(current, limit);
}
static inline unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(current, limit);
}
#endif
|
#ifndef _LINUX_SCHED_H
#define _LINUX_SCHED_H
#include <uapi/linux/sched.h>
#include <linux/sched/prio.h>
struct sched_param {
int sched_priority;
};
#include <asm/param.h> /* for HZ */
#include <linux/capability.h>
#include <linux/threads.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/jiffies.h>
#include <linux/plist.h>
#include <linux/rbtree.h>
#include <linux/thread_info.h>
#include <linux/cpumask.h>
#include <linux/errno.h>
#include <linux/nodemask.h>
#include <linux/mm_types.h>
#include <linux/preempt.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <linux/cputime.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/shm.h>
#include <linux/signal.h>
#include <linux/compiler.h>
#include <linux/completion.h>
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/rtmutex.h>
#include <linux/time.h>
#include <linux/param.h>
#include <linux/resource.h>
#include <linux/timer.h>
#include <linux/hrtimer.h>
#include <linux/task_io_accounting.h>
#include <linux/latencytop.h>
#include <linux/cred.h>
#include <linux/llist.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/magic.h>
#include <linux/cgroup-defs.h>
#include <asm/processor.h>
#define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */
/*
* Extended scheduling parameters data structure.
*
* This is needed because the original struct sched_param can not be
* altered without introducing ABI issues with legacy applications
* (e.g., in sched_getparam()).
*
* However, the possibility of specifying more than just a priority for
* the tasks may be useful for a wide variety of application fields, e.g.,
* multimedia, streaming, automation and control, and many others.
*
* This variant (sched_attr) is meant at describing a so-called
* sporadic time-constrained task. In such model a task is specified by:
* - the activation period or minimum instance inter-arrival time;
* - the maximum (or average, depending on the actual scheduling
* discipline) computation time of all instances, a.k.a. runtime;
* - the deadline (relative to the actual activation time) of each
* instance.
* Very briefly, a periodic (sporadic) task asks for the execution of
* some specific computation --which is typically called an instance--
* (at most) every period. Moreover, each instance typically lasts no more
* than the runtime and must be completed by time instant t equal to
* the instance activation time + the deadline.
*
* This is reflected by the actual fields of the sched_attr structure:
*
* @size size of the structure, for fwd/bwd compat.
*
* @sched_policy task's scheduling policy
* @sched_flags for customizing the scheduler behaviour
* @sched_nice task's nice value (SCHED_NORMAL/BATCH)
* @sched_priority task's static priority (SCHED_FIFO/RR)
* @sched_deadline representative of the task's deadline
* @sched_runtime representative of the task's runtime
* @sched_period representative of the task's period
*
* Given this task model, there are a multiplicity of scheduling algorithms
* and policies, that can be used to ensure all the tasks will make their
* timing constraints.
*
* As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
* only user of this new interface. More information about the algorithm
* available in the scheduling class file or in Documentation/.
*/
struct sched_attr {
u32 size;
u32 sched_policy;
u64 sched_flags;
/* SCHED_NORMAL, SCHED_BATCH */
s32 sched_nice;
/* SCHED_FIFO, SCHED_RR */
u32 sched_priority;
/* SCHED_DEADLINE */
u64 sched_runtime;
u64 sched_deadline;
u64 sched_period;
};
struct futex_pi_state;
struct robust_list_head;
struct bio_list;
struct fs_struct;
struct perf_event_context;
struct blk_plug;
struct filename;
struct nameidata;
#define VMACACHE_BITS 2
#define VMACACHE_SIZE (1U << VMACACHE_BITS)
#define VMACACHE_MASK (VMACACHE_SIZE - 1)
/*
* These are the constant used to fake the fixed-point load-average
* counting. Some notes:
* - 11 bit fractions expand to 22 bits by the multiplies: this gives
* a load-average precision of 10 bits integer + 11 bits fractional
* - if you want to count load-averages more often, you need more
* precision, or rounding will get you. With 2-second counting freq,
* the EXP_n values would be 1981, 2034 and 2043 if still using only
* 11 bit fractions.
*/
extern unsigned long avenrun[]; /* Load averages */
extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
#define EXP_5 2014 /* 1/exp(5sec/5min) */
#define EXP_15 2037 /* 1/exp(5sec/15min) */
#define CALC_LOAD(load,exp,n) \
load *= exp; \
load += n*(FIXED_1-exp); \
load >>= FSHIFT;
extern unsigned long total_forks;
extern int nr_threads;
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void update_cpu_load_nohz(void);
#else
static inline void update_cpu_load_nohz(void) { }
#endif
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
struct seq_file;
struct cfs_rq;
struct task_group;
#ifdef CONFIG_SCHED_DEBUG
extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
extern void proc_sched_set_task(struct task_struct *p);
#endif
/*
* Task state bitmask. NOTE! These bits are also
* encoded in fs/proc/array.c: get_task_state().
*
* We have two separate sets of flags: task->state
* is about runnability, while task->exit_state are
* about the task exiting. Confusing, but this way
* modifying one set can't modify the other one by
* mistake.
*/
#define TASK_RUNNING 0
#define TASK_INTERRUPTIBLE 1
#define TASK_UNINTERRUPTIBLE 2
#define __TASK_STOPPED 4
#define __TASK_TRACED 8
/* in tsk->exit_state */
#define EXIT_DEAD 16
#define EXIT_ZOMBIE 32
#define EXIT_TRACE (EXIT_ZOMBIE | EXIT_DEAD)
/* in tsk->state again */
#define TASK_DEAD 64
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
#define TASK_NOLOAD 1024
#define TASK_STATE_MAX 2048
#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
/* Convenience macros for the sake of set_task_state */
#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
#define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
/* Convenience macros for the sake of wake_up */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
/* get_task_state() */
#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
__TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
#define task_is_stopped_or_traced(task) \
((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
#define task_contributes_to_load(task) \
((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
#define __set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
(tsk)->state = (state_value); \
} while (0)
#define set_task_state(tsk, state_value) \
do { \
(tsk)->task_state_change = _THIS_IP_; \
smp_store_mb((tsk)->state, (state_value)); \
} while (0)
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (do_i_need_to_sleep())
* schedule();
*
* If the caller does not need such serialisation then use __set_current_state()
*/
#define __set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
current->state = (state_value); \
} while (0)
#define set_current_state(state_value) \
do { \
current->task_state_change = _THIS_IP_; \
smp_store_mb(current->state, (state_value)); \
} while (0)
#else
#define __set_task_state(tsk, state_value) \
do { (tsk)->state = (state_value); } while (0)
#define set_task_state(tsk, state_value) \
smp_store_mb((tsk)->state, (state_value))
/*
* set_current_state() includes a barrier so that the write of current->state
* is correctly serialised wrt the caller's subsequent test of whether to
* actually sleep:
*
* set_current_state(TASK_UNINTERRUPTIBLE);
* if (do_i_need_to_sleep())
* schedule();
*
* If the caller does not need such serialisation then use __set_current_state()
*/
#define __set_current_state(state_value) \
do { current->state = (state_value); } while (0)
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
#endif
/* Task command name length */
#define TASK_COMM_LEN 16
#include <linux/spinlock.h>
/*
* This serializes "schedule()" and also protects
* the run-queue from deletions/modifications (but
* _adding_ to the beginning of the run-queue has
* a separate lock).
*/
extern rwlock_t tasklist_lock;
extern spinlock_t mmlist_lock;
struct task_struct;
#ifdef CONFIG_PROVE_RCU
extern int lockdep_tasklist_lock_is_held(void);
#endif /* #ifdef CONFIG_PROVE_RCU */
extern void sched_init(void);
extern void sched_init_smp(void);
extern asmlinkage void schedule_tail(struct task_struct *prev);
extern void init_idle(struct task_struct *idle, int cpu);
extern void init_idle_bootup_task(struct task_struct *idle);
extern cpumask_var_t cpu_isolated_map;
extern int runqueue_is_locked(int cpu);
#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
extern void nohz_balance_enter_idle(int cpu);
extern void set_cpu_sd_state_idle(void);
extern int get_nohz_timer_target(void);
#else
static inline void nohz_balance_enter_idle(int cpu) { }
static inline void set_cpu_sd_state_idle(void) { }
#endif
/*
* Only dump TASK_* tasks. (0 for all tasks)
*/
extern void show_state_filter(unsigned long state_filter);
static inline void show_state(void)
{
show_state_filter(0);
}
extern void show_regs(struct pt_regs *);
/*
* TASK is a pointer to the task whose backtrace we want to see (or NULL for current
* task), SP is the stack pointer of the first frame that should be shown in the back
* trace (or NULL if the entire call-chain of the task should be shown).
*/
extern void show_stack(struct task_struct *task, unsigned long *sp);
extern void cpu_init (void);
extern void trap_init(void);
extern void update_process_times(int user);
extern void scheduler_tick(void);
extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_LOCKUP_DETECTOR
extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos);
extern unsigned int softlockup_panic;
extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
#else
static inline void touch_softlockup_watchdog(void)
{
}
static inline void touch_softlockup_watchdog_sync(void)
{
}
static inline void touch_all_softlockup_watchdogs(void)
{
}
static inline void lockup_detector_init(void)
{
}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
#else
static inline void reset_hung_task_detector(void)
{
}
#endif
/* Attach to any functions which should be ignored in wchan output. */
#define __sched __attribute__((__section__(".sched.text")))
/* Linker adds these: start and end of __sched functions */
extern char __sched_text_start[], __sched_text_end[];
/* Is this address in the __sched functions? */
extern int in_sched_functions(unsigned long addr);
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long schedule_timeout(signed long timeout);
extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
extern long io_schedule_timeout(long timeout);
static inline void io_schedule(void)
{
io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
}
struct nsproxy;
struct user_namespace;
#ifdef CONFIG_MMU
extern void arch_pick_mmap_layout(struct mm_struct *mm);
extern unsigned long
arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
unsigned long, unsigned long);
extern unsigned long
arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff,
unsigned long flags);
#else
static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
#endif
#define SUID_DUMP_DISABLE 0 /* No setuid dumping */
#define SUID_DUMP_USER 1 /* Dump as user of process */
#define SUID_DUMP_ROOT 2 /* Dump as root */
/* mm flags */
/* for SUID_DUMP_* above */
#define MMF_DUMPABLE_BITS 2
#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
extern void set_dumpable(struct mm_struct *mm, int value);
/*
* This returns the actual value of the suid_dumpable flag. For things
* that are using this for checking for privilege transitions, it must
* test against SUID_DUMP_USER rather than treating it as a boolean
* value.
*/
static inline int __get_dumpable(unsigned long mm_flags)
{
return mm_flags & MMF_DUMPABLE_MASK;
}
static inline int get_dumpable(struct mm_struct *mm)
{
return __get_dumpable(mm->flags);
}
/* coredump filter bits */
#define MMF_DUMP_ANON_PRIVATE 2
#define MMF_DUMP_ANON_SHARED 3
#define MMF_DUMP_MAPPED_PRIVATE 4
#define MMF_DUMP_MAPPED_SHARED 5
#define MMF_DUMP_ELF_HEADERS 6
#define MMF_DUMP_HUGETLB_PRIVATE 7
#define MMF_DUMP_HUGETLB_SHARED 8
#define MMF_DUMP_DAX_PRIVATE 9
#define MMF_DUMP_DAX_SHARED 10
#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
#define MMF_DUMP_FILTER_BITS 9
#define MMF_DUMP_FILTER_MASK \
(((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
#define MMF_DUMP_FILTER_DEFAULT \
((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
(1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
#else
# define MMF_DUMP_MASK_DEFAULT_ELF 0
#endif
/* leave room for more dump flags */
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
struct sighand_struct {
atomic_t count;
struct k_sigaction action[_NSIG];
spinlock_t siglock;
wait_queue_head_t signalfd_wqh;
};
struct pacct_struct {
int ac_flag;
long ac_exitcode;
unsigned long ac_mem;
cputime_t ac_utime, ac_stime;
unsigned long ac_minflt, ac_majflt;
};
struct cpu_itimer {
cputime_t expires;
cputime_t incr;
u32 error;
u32 incr_error;
};
/**
* struct prev_cputime - snaphsot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
* @lock: protects the above two fields
*
* Stores previous user/system time values such that we can guarantee
* monotonicity.
*/
struct prev_cputime {
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
cputime_t utime;
cputime_t stime;
raw_spinlock_t lock;
#endif
};
static inline void prev_cputime_init(struct prev_cputime *prev)
{
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
prev->utime = prev->stime = 0;
raw_spin_lock_init(&prev->lock);
#endif
}
/**
* struct task_cputime - collected CPU time counts
* @utime: time spent in user mode, in &cputime_t units
* @stime: time spent in kernel mode, in &cputime_t units
* @sum_exec_runtime: total time spent on the CPU, in nanoseconds
*
* This structure groups together three kinds of CPU time that are tracked for
* threads and thread groups. Most things considering CPU time want to group
* these counts together and treat all three of them in parallel.
*/
struct task_cputime {
cputime_t utime;
cputime_t stime;
unsigned long long sum_exec_runtime;
};
/* Alternate field names when used to cache expirations. */
#define virt_exp utime
#define prof_exp stime
#define sched_exp sum_exec_runtime
#define INIT_CPUTIME \
(struct task_cputime) { \
.utime = 0, \
.stime = 0, \
.sum_exec_runtime = 0, \
}
/*
* This is the atomic variant of task_cputime, which can be used for
* storing and updating task_cputime statistics without locking.
*/
struct task_cputime_atomic {
atomic64_t utime;
atomic64_t stime;
atomic64_t sum_exec_runtime;
};
#define INIT_CPUTIME_ATOMIC \
(struct task_cputime_atomic) { \
.utime = ATOMIC64_INIT(0), \
.stime = ATOMIC64_INIT(0), \
.sum_exec_runtime = ATOMIC64_INIT(0), \
}
#define PREEMPT_DISABLED (PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/*
* Disable preemption until the scheduler is running -- use an unconditional
* value so that it also works on !PREEMPT_COUNT kernels.
*
* Reset by start_kernel()->sched_init()->init_idle()->init_idle_preempt_count().
*/
#define INIT_PREEMPT_COUNT PREEMPT_OFFSET
/*
* Initial preempt_count value; reflects the preempt_count schedule invariant
* which states that during context switches:
*
* preempt_count() == 2*PREEMPT_DISABLE_OFFSET
*
* Note: PREEMPT_DISABLE_OFFSET is 0 for !PREEMPT_COUNT kernels.
* Note: See finish_task_switch().
*/
#define FORK_PREEMPT_COUNT (2*PREEMPT_DISABLE_OFFSET + PREEMPT_ENABLED)
/**
* struct thread_group_cputimer - thread group interval timer counts
* @cputime_atomic: atomic thread group interval timers.
* @running: true when there are timers running and
* @cputime_atomic receives updates.
* @checking_timer: true when a thread in the group is in the
* process of checking for thread group timers.
*
* This structure contains the version of task_cputime, above, that is
* used for thread group CPU timer calculations.
*/
struct thread_group_cputimer {
struct task_cputime_atomic cputime_atomic;
bool running;
bool checking_timer;
};
#include <linux/rwsem.h>
struct autogroup;
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
* implies a shared sighand_struct, so locking
* sighand_struct is always a proper superset of
* the locking of signal_struct.
*/
struct signal_struct {
atomic_t sigcnt;
atomic_t live;
int nr_threads;
struct list_head thread_head;
wait_queue_head_t wait_chldexit; /* for wait4() */
/* current thread group signal load-balancing target: */
struct task_struct *curr_target;
/* shared signal handling: */
struct sigpending shared_pending;
/* thread group exit support */
int group_exit_code;
/* overloaded:
* - notify group_exit_task when ->count is equal to notify_count
* - everyone except group_exit_task is stopped during signal delivery
* of fatal signals, group_exit_task processes the signal.
*/
int notify_count;
struct task_struct *group_exit_task;
/* thread group stop support, overloads group_exit_code too */
int group_stop_count;
unsigned int flags; /* see SIGNAL_* flags below */
/*
* PR_SET_CHILD_SUBREAPER marks a process, like a service
* manager, to re-parent orphan (double-forking) child processes
* to this process instead of 'init'. The service manager is
* able to receive SIGCHLD signals and is able to investigate
* the process until it calls wait(). All children of this
* process will inherit a flag if they should look for a
* child_subreaper process at exit.
*/
unsigned int is_child_subreaper:1;
unsigned int has_child_subreaper:1;
/* POSIX.1b Interval Timers */
int posix_timer_id;
struct list_head posix_timers;
/* ITIMER_REAL timer for the process */
struct hrtimer real_timer;
struct pid *leader_pid;
ktime_t it_real_incr;
/*
* ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
* CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
* values are defined to 0 and 1 respectively
*/
struct cpu_itimer it[2];
/*
* Thread group totals for process CPU timers.
* See thread_group_cputimer(), et al, for details.
*/
struct thread_group_cputimer cputimer;
/* Earliest-expiration cache. */
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
struct pid *tty_old_pgrp;
/* boolean value for session group leader */
int leader;
struct tty_struct *tty; /* NULL if no tty */
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup *autogroup;
#endif
/*
* Cumulative resource counters for dead threads in the group,
* and for reaped dead child processes forked by this group.
* Live threads maintain their own counters and add to these
* in __exit_signal, except for the group leader.
*/
seqlock_t stats_lock;
cputime_t utime, stime, cutime, cstime;
cputime_t gtime;
cputime_t cgtime;
struct prev_cputime prev_cputime;
unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
unsigned long inblock, oublock, cinblock, coublock;
unsigned long maxrss, cmaxrss;
struct task_io_accounting ioac;
/*
* Cumulative ns of schedule CPU time fo dead threads in the
* group, not including a zombie group leader, (This only differs
* from jiffies_to_ns(utime + stime) if sched_clock uses something
* other than jiffies.)
*/
unsigned long long sum_sched_runtime;
/*
* We don't bother to synchronize most readers of this at all,
* because there is no reader checking a limit that actually needs
* to get both rlim_cur and rlim_max atomically, and either one
* alone is a single word that can safely be read normally.
* getrlimit/setrlimit use task_lock(current->group_leader) to
* protect this instead of the siglock, because they really
* have no need to disable irqs.
*/
struct rlimit rlim[RLIM_NLIMITS];
#ifdef CONFIG_BSD_PROCESS_ACCT
struct pacct_struct pacct; /* per-process accounting information */
#endif
#ifdef CONFIG_TASKSTATS
struct taskstats *stats;
#endif
#ifdef CONFIG_AUDIT
unsigned audit_tty;
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */
short oom_score_adj_min; /* OOM kill score adjustment min value.
* Only settable by CAP_SYS_RESOURCE. */
struct mutex cred_guard_mutex; /* guard against foreign influences on
* credential calculations
* (notably. ptrace) */
};
/*
* Bits in flags field of signal_struct.
*/
#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
#define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
#define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
#define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
/*
* Pending notifications to parent.
*/
#define SIGNAL_CLD_STOPPED 0x00000010
#define SIGNAL_CLD_CONTINUED 0x00000020
#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
return (sig->flags & SIGNAL_GROUP_EXIT) ||
(sig->group_exit_task != NULL);
}
/*
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
atomic_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_INOTIFY_USER
atomic_t inotify_watches; /* How many inotify watches does this user have? */
atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
#endif
#ifdef CONFIG_FANOTIFY
atomic_t fanotify_listeners;
#endif
#ifdef CONFIG_EPOLL
atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
#endif
#ifdef CONFIG_POSIX_MQUEUE
/* protected by mq_lock */
unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
struct key *session_keyring; /* UID's default session keyring */
#endif
/* Hash table maintenance information */
struct hlist_node uidhash_node;
kuid_t uid;
#if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL)
atomic_long_t locked_vm;
#endif
};
extern int uids_sysfs_init(void);
extern struct user_struct *find_user(kuid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
struct backing_dev_info;
struct reclaim_state;
#ifdef CONFIG_SCHED_INFO
struct sched_info {
/* cumulative counters */
unsigned long pcount; /* # of times run on this cpu */
unsigned long long run_delay; /* time spent waiting on a runqueue */
/* timestamps */
unsigned long long last_arrival,/* when we last ran on a cpu */
last_queued; /* when we were last queued to run */
};
#endif /* CONFIG_SCHED_INFO */
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info {
spinlock_t lock;
unsigned int flags; /* Private per-task flags */
/* For each stat XXX, add following, aligned appropriately
*
* struct timespec XXX_start, XXX_end;
* u64 XXX_delay;
* u32 XXX_count;
*
* Atomicity of updates to XXX_delay, XXX_count protected by
* single lock above (split into XXX_lock if contention is an issue).
*/
/*
* XXX_count is incremented on every XXX operation, the delay
* associated with the operation is added to XXX_delay.
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
u64 blkio_start; /* Shared by blkio, swapin */
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_delay; /* wait for swapin block io completion */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
u32 swapin_count; /* total count of the number of swapin block */
/* io operations performed */
u64 freepages_start;
u64 freepages_delay; /* wait for memory reclaim */
u32 freepages_count; /* total count of memory reclaim */
};
#endif /* CONFIG_TASK_DELAY_ACCT */
static inline int sched_info_on(void)
{
#ifdef CONFIG_SCHEDSTATS
return 1;
#elif defined(CONFIG_TASK_DELAY_ACCT)
extern int delayacct_on;
return delayacct_on;
#else
return 0;
#endif
}
enum cpu_idle_type {
CPU_IDLE,
CPU_NOT_IDLE,
CPU_NEWLY_IDLE,
CPU_MAX_IDLE_TYPES
};
/*
* Increase resolution of cpu_capacity calculations
*/
#define SCHED_CAPACITY_SHIFT 10
#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT)
/*
* Wake-queues are lists of tasks with a pending wakeup, whose
* callers have already marked the task as woken internally,
* and can thus carry on. A common use case is being able to
* do the wakeups once the corresponding user lock as been
* released.
*
* We hold reference to each task in the list across the wakeup,
* thus guaranteeing that the memory is still valid by the time
* the actual wakeups are performed in wake_up_q().
*
* One per task suffices, because there's never a need for a task to be
* in two wake queues simultaneously; it is forbidden to abandon a task
* in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
* already in a wake queue, the wakeup will happen soon and the second
* waker can just skip it.
*
* The WAKE_Q macro declares and initializes the list head.
* wake_up_q() does NOT reinitialize the list; it's expected to be
* called near the end of a function, where the fact that the queue is
* not used again will be easy to see by inspection.
*
* Note that this can cause spurious wakeups. schedule() callers
* must ensure the call is done inside a loop, confirming that the
* wakeup condition has in fact occurred.
*/
struct wake_q_node {
struct wake_q_node *next;
};
struct wake_q_head {
struct wake_q_node *first;
struct wake_q_node **lastp;
};
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
#define WAKE_Q(name) \
struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
extern void wake_q_add(struct wake_q_head *head,
struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
/*
* sched-domains (multiprocessor balancing) declarations:
*/
#ifdef CONFIG_SMP
#define SD_LOAD_BALANCE 0x0001 /* Do load balancing on this domain. */
#define SD_BALANCE_NEWIDLE 0x0002 /* Balance when about to become idle */
#define SD_BALANCE_EXEC 0x0004 /* Balance on exec */
#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */
#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
#define SD_ASYM_PACKING 0x0800 /* Place busy groups earlier in the domain */
#define SD_PREFER_SIBLING 0x1000 /* Prefer to place tasks in a sibling domain */
#define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */
#define SD_NUMA 0x4000 /* cross-node balancing */
#ifdef CONFIG_SCHED_SMT
static inline int cpu_smt_flags(void)
{
return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_SCHED_MC
static inline int cpu_core_flags(void)
{
return SD_SHARE_PKG_RESOURCES;
}
#endif
#ifdef CONFIG_NUMA
static inline int cpu_numa_flags(void)
{
return SD_NUMA;
}
#endif
struct sched_domain_attr {
int relax_domain_level;
};
#define SD_ATTR_INIT (struct sched_domain_attr) { \
.relax_domain_level = -1, \
}
extern int sched_domain_level_max;
struct sched_group;
struct sched_domain {
/* These fields must be setup */
struct sched_domain *parent; /* top domain must be null terminated */
struct sched_domain *child; /* bottom domain must be null terminated */
struct sched_group *groups; /* the balancing groups of the domain */
unsigned long min_interval; /* Minimum balance interval ms */
unsigned long max_interval; /* Maximum balance interval ms */
unsigned int busy_factor; /* less balancing by factor if busy */
unsigned int imbalance_pct; /* No balance until over watermark */
unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
unsigned int busy_idx;
unsigned int idle_idx;
unsigned int newidle_idx;
unsigned int wake_idx;
unsigned int forkexec_idx;
unsigned int smt_gain;
int nohz_idle; /* NOHZ IDLE status */
int flags; /* See SD_* */
int level;
/* Runtime fields. */
unsigned long last_balance; /* init to jiffies. units in jiffies */
unsigned int balance_interval; /* initialise to 1. units in ms. */
unsigned int nr_balance_failed; /* initialise to 0 */
/* idle_balance() stats */
u64 max_newidle_lb_cost;
unsigned long next_decay_max_lb_cost;
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
/* Active load balancing */
unsigned int alb_count;
unsigned int alb_failed;
unsigned int alb_pushed;
/* SD_BALANCE_EXEC stats */
unsigned int sbe_count;
unsigned int sbe_balanced;
unsigned int sbe_pushed;
/* SD_BALANCE_FORK stats */
unsigned int sbf_count;
unsigned int sbf_balanced;
unsigned int sbf_pushed;
/* try_to_wake_up() stats */
unsigned int ttwu_wake_remote;
unsigned int ttwu_move_affine;
unsigned int ttwu_move_balance;
#endif
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
union {
void *private; /* used during construction */
struct rcu_head rcu; /* used during destruction */
};
unsigned int span_weight;
/*
* Span of all CPUs in this domain.
*
* NOTE: this field is variable length. (Allocated dynamically
* by attaching extra space to the end of the structure,
* depending on how many CPUs the kernel has booted up with)
*/
unsigned long span[0];
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
{
return to_cpumask(sd->span);
}
extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new);
/* Allocate an array of sched domains, for partition_sched_domains(). */
cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
#define SDTL_OVERLAP 0x01
struct sd_data {
struct sched_domain **__percpu sd;
struct sched_group **__percpu sg;
struct sched_group_capacity **__percpu sgc;
};
struct sched_domain_topology_level {
sched_domain_mask_f mask;
sched_domain_flags_f sd_flags;
int flags;
int numa_level;
struct sd_data data;
#ifdef CONFIG_SCHED_DEBUG
char *name;
#endif
};
extern void set_sched_topology(struct sched_domain_topology_level *tl);
extern void wake_up_if_idle(int cpu);
#ifdef CONFIG_SCHED_DEBUG
# define SD_INIT_NAME(type) .name = #type
#else
# define SD_INIT_NAME(type)
#endif
#else /* CONFIG_SMP */
struct sched_domain_attr;
static inline void
partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
struct sched_domain_attr *dattr_new)
{
}
static inline bool cpus_share_cache(int this_cpu, int that_cpu)
{
return true;
}
#endif /* !CONFIG_SMP */
struct io_context; /* See blkdev.h */
#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
extern void prefetch_stack(struct task_struct *t);
#else
static inline void prefetch_stack(struct task_struct *t) { }
#endif
struct audit_context; /* See audit.c */
struct mempolicy;
struct pipe_inode_info;
struct uts_namespace;
struct load_weight {
unsigned long weight;
u32 inv_weight;
};
/*
* The load_avg/util_avg accumulates an infinite geometric series.
* 1) load_avg factors frequency scaling into the amount of time that a
* sched_entity is runnable on a rq into its weight. For cfs_rq, it is the
* aggregated such weights of all runnable and blocked sched_entities.
* 2) util_avg factors frequency and cpu scaling into the amount of time
* that a sched_entity is running on a CPU, in the range [0..SCHED_LOAD_SCALE].
* For cfs_rq, it is the aggregated such times of all runnable and
* blocked sched_entities.
* The 64 bit load_sum can:
* 1) for cfs_rq, afford 4353082796 (=2^64/47742/88761) entities with
* the highest weight (=88761) always runnable, we should not overflow
* 2) for entity, support any load.weight always runnable
*/
struct sched_avg {
u64 last_update_time, load_sum;
u32 util_sum, period_contrib;
unsigned long load_avg, util_avg;
};
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics {
u64 wait_start;
u64 wait_max;
u64 wait_count;
u64 wait_sum;
u64 iowait_count;
u64 iowait_sum;
u64 sleep_start;
u64 sleep_max;
s64 sum_sleep_runtime;
u64 block_start;
u64 block_max;
u64 exec_max;
u64 slice_max;
u64 nr_migrations_cold;
u64 nr_failed_migrations_affine;
u64 nr_failed_migrations_running;
u64 nr_failed_migrations_hot;
u64 nr_forced_migrations;
u64 nr_wakeups;
u64 nr_wakeups_sync;
u64 nr_wakeups_migrate;
u64 nr_wakeups_local;
u64 nr_wakeups_remote;
u64 nr_wakeups_affine;
u64 nr_wakeups_affine_attempts;
u64 nr_wakeups_passive;
u64 nr_wakeups_idle;
};
#endif
struct sched_entity {
struct load_weight load; /* for load-balancing */
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
u64 exec_start;
u64 sum_exec_runtime;
u64 vruntime;
u64 prev_sum_exec_runtime;
u64 nr_migrations;
#ifdef CONFIG_SCHEDSTATS
struct sched_statistics statistics;
#endif
#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
struct sched_entity *parent;
/* rq on which this entity is (to be) queued: */
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
#endif
#ifdef CONFIG_SMP
/* Per entity load average tracking */
struct sched_avg avg;
#endif
};
struct sched_rt_entity {
struct list_head run_list;
unsigned long timeout;
unsigned long watchdog_stamp;
unsigned int time_slice;
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
/* rq on which this entity is (to be) queued: */
struct rt_rq *rt_rq;
/* rq "owned" by this entity/group: */
struct rt_rq *my_q;
#endif
};
struct sched_dl_entity {
struct rb_node rb_node;
/*
* Original scheduling parameters. Copied here from sched_attr
* during sched_setattr(), they will remain the same until
* the next sched_setattr().
*/
u64 dl_runtime; /* maximum runtime for each instance */
u64 dl_deadline; /* relative deadline of each instance */
u64 dl_period; /* separation of two instances (period) */
u64 dl_bw; /* dl_runtime / dl_deadline */
/*
* Actual scheduling parameters. Initialized with the values above,
* they are continously updated during task execution. Note that
* the remaining runtime could be < 0 in case we are in overrun.
*/
s64 runtime; /* remaining runtime for this instance */
u64 deadline; /* absolute deadline for this instance */
unsigned int flags; /* specifying the scheduler behaviour */
/*
* Some bool flags:
*
* @dl_throttled tells if we exhausted the runtime. If so, the
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
* @dl_new tells if a new instance arrived. If so we must
* start executing it with full runtime and reset its absolute
* deadline;
*
* @dl_boosted tells if we are boosted due to DI. If so we are
* outside bandwidth enforcement mechanism (but only until we
* exit the critical section);
*
* @dl_yielded tells if task gave up the cpu before consuming
* all its available runtime during the last job.
*/
int dl_throttled, dl_new, dl_boosted, dl_yielded;
/*
* Bandwidth enforcement timer. Each -deadline task has its
* own bandwidth to be enforced, thus we need one timer per task.
*/
struct hrtimer dl_timer;
};
union rcu_special {
struct {
u8 blocked;
u8 need_qs;
u8 exp_need_qs;
u8 pad; /* Otherwise the compiler can store garbage here. */
} b; /* Bits. */
u32 s; /* Set of bits. */
};
struct rcu_node;
enum perf_event_task_context {
perf_invalid_context = -1,
perf_hw_context = 0,
perf_sw_context,
perf_nr_task_contexts,
};
/* Track pages that require TLB flushes */
struct tlbflush_unmap_batch {
/*
* Each bit set is a CPU that potentially has a TLB entry for one of
* the PFNs being flushed. See set_tlb_ubc_flush_pending().
*/
struct cpumask cpumask;
/* True if any bit in cpumask is set */
bool flush_required;
/*
* If true then the PTE was dirty when unmapped. The entry must be
* flushed before IO is initiated or a stale TLB entry potentially
* allows an update without redirtying the page.
*/
bool writable;
};
struct task_struct {
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
unsigned int flags; /* per process flags, defined below */
unsigned int ptrace;
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
int wake_cpu;
#endif
int on_rq;
int prio, static_prio, normal_prio;
unsigned int rt_priority;
const struct sched_class *sched_class;
struct sched_entity se;
struct sched_rt_entity rt;
#ifdef CONFIG_CGROUP_SCHED
struct task_group *sched_task_group;
#endif
struct sched_dl_entity dl;
#ifdef CONFIG_PREEMPT_NOTIFIERS
/* list of struct preempt_notifier: */
struct hlist_head preempt_notifiers;
#endif
#ifdef CONFIG_BLK_DEV_IO_TRACE
unsigned int btrace_seq;
#endif
unsigned int policy;
int nr_cpus_allowed;
cpumask_t cpus_allowed;
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
union rcu_special rcu_read_unlock_special;
struct list_head rcu_node_entry;
struct rcu_node *rcu_blocked_node;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
unsigned long rcu_tasks_nvcsw;
bool rcu_tasks_holdout;
struct list_head rcu_tasks_holdout_list;
int rcu_tasks_idle_cpu;
#endif /* #ifdef CONFIG_TASKS_RCU */
#ifdef CONFIG_SCHED_INFO
struct sched_info sched_info;
#endif
struct list_head tasks;
#ifdef CONFIG_SMP
struct plist_node pushable_tasks;
struct rb_node pushable_dl_tasks;
#endif
struct mm_struct *mm, *active_mm;
/* per-thread vma caching */
u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE];
#if defined(SPLIT_RSS_COUNTING)
struct task_rss_stat rss_stat;
#endif
/* task state */
int exit_state;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
unsigned long jobctl; /* JOBCTL_*, siglock protected */
/* Used for emulating ABI behavior of previous Linux versions */
unsigned int personality;
unsigned in_execve:1; /* Tell the LSMs that the process is doing an
* execve */
unsigned in_iowait:1;
/* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
#endif
#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
struct restart_block restart_block;
pid_t pid;
pid_t tgid;
#ifdef CONFIG_CC_STACKPROTECTOR
/* Canary value for the -fstack-protector gcc feature */
unsigned long stack_canary;
#endif
/*
* pointers to (original) parent process, youngest child, younger sibling,
* older sibling, respectively. (p->father can be replaced with
* p->real_parent->pid)
*/
struct task_struct __rcu *real_parent; /* real parent process */
struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
/*
* children/sibling forms the list of my natural children
*/
struct list_head children; /* list of my children */
struct list_head sibling; /* linkage in my parent's children list */
struct task_struct *group_leader; /* threadgroup leader */
/*
* ptraced is the list of tasks this task is using ptrace on.
* This includes both natural children and PTRACE_ATTACH targets.
* p->ptrace_entry is p's link on the p->parent->ptraced list.
*/
struct list_head ptraced;
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done; /* for vfork() */
int __user *set_child_tid; /* CLONE_CHILD_SETTID */
int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_t vtime_seqlock;
unsigned long long vtime_snap;
enum {
VTIME_SLEEPING = 0,
VTIME_USER,
VTIME_SYS,
} vtime_snap_whence;
#endif
unsigned long nvcsw, nivcsw; /* context switch counts */
u64 start_time; /* monotonic time in nsec */
u64 real_start_time; /* boot based time in nsec */
/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
unsigned long min_flt, maj_flt;
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
/* process credentials */
const struct cred __rcu *real_cred; /* objective and real subjective task
* credentials (COW) */
const struct cred __rcu *cred; /* effective (overridable) subjective task
* credentials (COW) */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
it with task_lock())
- initialized normally by setup_new_exec */
/* file system info */
struct nameidata *nameidata;
#ifdef CONFIG_SYSVIPC
/* ipc stuff */
struct sysv_sem sysvsem;
struct sysv_shm sysvshm;
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
/* hung task detection */
unsigned long last_switch_count;
#endif
/* filesystem information */
struct fs_struct *fs;
/* open file information */
struct files_struct *files;
/* namespaces */
struct nsproxy *nsproxy;
/* signal handlers */
struct signal_struct *signal;
struct sighand_struct *sighand;
sigset_t blocked, real_blocked;
sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
struct sigpending pending;
unsigned long sas_ss_sp;
size_t sas_ss_size;
struct callback_head *task_works;
struct audit_context *audit_context;
#ifdef CONFIG_AUDITSYSCALL
kuid_t loginuid;
unsigned int sessionid;
#endif
struct seccomp seccomp;
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
* mempolicy */
spinlock_t alloc_lock;
/* Protection of the PI data structures: */
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
/* Deadlock detection and priority inheritance handling */
struct rt_mutex_waiter *pi_blocked_on;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
/* mutex deadlock detection */
struct mutex_waiter *blocked_on;
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
unsigned int hardirq_disable_event;
int hardirqs_enabled;
int hardirq_context;
unsigned long softirq_disable_ip;
unsigned long softirq_enable_ip;
unsigned int softirq_disable_event;
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
#endif
#ifdef CONFIG_LOCKDEP
# define MAX_LOCK_DEPTH 48UL
u64 curr_chain_key;
int lockdep_depth;
unsigned int lockdep_recursion;
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
/* journalling filesystem info */
void *journal_info;
/* stacked block device info */
struct bio_list *bio_list;
#ifdef CONFIG_BLOCK
/* stack plugging */
struct blk_plug *plug;
#endif
/* VM state */
struct reclaim_state *reclaim_state;
struct backing_dev_info *backing_dev_info;
struct io_context *io_context;
unsigned long ptrace_message;
siginfo_t *last_siginfo; /* For ptrace use. */
struct task_io_accounting ioac;
#if defined(CONFIG_TASK_XACCT)
u64 acct_rss_mem1; /* accumulated rss usage */
u64 acct_vm_mem1; /* accumulated virtual memory usage */
cputime_t acct_timexpd; /* stime + utime since last update */
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
#ifdef CONFIG_CGROUPS
/* Control Group info protected by css_set_lock */
struct css_set __rcu *cgroups;
/* cg_list protected by css_set_lock and tsk->alloc_lock */
struct list_head cg_list;
#endif
#ifdef CONFIG_FUTEX
struct robust_list_head __user *robust_list;
#ifdef CONFIG_COMPAT
struct compat_robust_list_head __user *compat_robust_list;
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
struct mutex perf_event_mutex;
struct list_head perf_event_list;
#endif
#ifdef CONFIG_DEBUG_PREEMPT
unsigned long preempt_disable_ip;
#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy; /* Protected by alloc_lock */
short il_next;
short pref_node_fork;
#endif
#ifdef CONFIG_NUMA_BALANCING
int numa_scan_seq;
unsigned int numa_scan_period;
unsigned int numa_scan_period_max;
int numa_preferred_nid;
unsigned long numa_migrate_retry;
u64 node_stamp; /* migration stamp */
u64 last_task_numa_placement;
u64 last_sum_exec_runtime;
struct callback_head numa_work;
struct list_head numa_entry;
struct numa_group *numa_group;
/*
* numa_faults is an array split into four regions:
* faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
* in this precise order.
*
* faults_memory: Exponential decaying average of faults on a per-node
* basis. Scheduling placement decisions are made based on these
* counts. The values remain static for the duration of a PTE scan.
* faults_cpu: Track the nodes the process was running on when a NUMA
* hinting fault was incurred.
* faults_memory_buffer and faults_cpu_buffer: Record faults per node
* during the current scan window. When the scan completes, the counts
* in faults_memory and faults_cpu decay and these values are copied.
*/
unsigned long *numa_faults;
unsigned long total_numa_faults;
/*
* numa_faults_locality tracks if faults recorded during the last
* scan window were remote/local or failed to migrate. The task scan
* period is adapted based on the locality of the faults with different
* weights depending on whether they were shared or private faults
*/
unsigned long numa_faults_locality[3];
unsigned long numa_pages_migrated;
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
struct tlbflush_unmap_batch tlb_ubc;
#endif
struct rcu_head rcu;
/*
* cache last used pipe for splice
*/
struct pipe_inode_info *splice_pipe;
struct page_frag task_frag;
#ifdef CONFIG_TASK_DELAY_ACCT
struct task_delay_info *delays;
#endif
#ifdef CONFIG_FAULT_INJECTION
int make_it_fail;
#endif
/*
* when (nr_dirtied >= nr_dirtied_pause), it's time to call
* balance_dirty_pages() for some dirty throttling pause
*/
int nr_dirtied;
int nr_dirtied_pause;
unsigned long dirty_paused_when; /* start of a write-and-pause period */
#ifdef CONFIG_LATENCYTOP
int latency_record_count;
struct latency_record latency_record[LT_SAVECOUNT];
#endif
/*
* time slack values; these are used to round up poll() and
* select() etc timeout values. These are in nanoseconds.
*/
unsigned long timer_slack_ns;
unsigned long default_timer_slack_ns;
#ifdef CONFIG_KASAN
unsigned int kasan_depth;
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/* Index of current stored address in ret_stack */
int curr_ret_stack;
/* Stack of return addresses for return function tracing */
struct ftrace_ret_stack *ret_stack;
/* time stamp for last schedule */
unsigned long long ftrace_timestamp;
/*
* Number of functions that haven't been traced
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
/* bitmask and counter of trace recursion */
unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
#ifdef CONFIG_MEMCG
struct mem_cgroup *memcg_in_oom;
gfp_t memcg_oom_gfp_mask;
int memcg_oom_order;
/* number of pages to reclaim on returning to userland */
unsigned int memcg_nr_pages_over_high;
#endif
#ifdef CONFIG_UPROBES
struct uprobe_task *utask;
#endif
#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
int pagefault_disabled;
/* CPU-specific state of this task */
struct thread_struct thread;
/*
* WARNING: on x86, 'thread_struct' contains a variable-sized
* structure. It *MUST* be at the end of 'task_struct'.
*
* Do not put anything below here!
*/
};
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
extern int arch_task_struct_size __read_mostly;
#else
# define arch_task_struct_size (sizeof(struct task_struct))
#endif
/* Future-safe accessor for struct task_struct's cpus_allowed. */
#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p);
extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
{
}
static inline pid_t task_numa_group_id(struct task_struct *p)
{
return 0;
}
static inline void set_numabalancing_state(bool enabled)
{
}
static inline void task_numa_free(struct task_struct *p)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
struct page *page, int src_nid, int dst_cpu)
{
return true;
}
#endif
static inline struct pid *task_pid(struct task_struct *task)
{
return task->pids[PIDTYPE_PID].pid;
}
static inline struct pid *task_tgid(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PID].pid;
}
/*
* Without tasklist or rcu lock it is not safe to dereference
* the result of task_pgrp/task_session even if task == current,
* we can race with another thread doing sys_setsid/sys_setpgid.
*/
static inline struct pid *task_pgrp(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_PGID].pid;
}
static inline struct pid *task_session(struct task_struct *task)
{
return task->group_leader->pids[PIDTYPE_SID].pid;
}
struct pid_namespace;
/*
* the helpers to get the task's different pids as they are seen
* from various namespaces
*
* task_xid_nr() : global id, i.e. the id seen from the init namespace;
* task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
* current.
* task_xid_nr_ns() : id seen from the ns specified;
*
* set_task_vxid() : assigns a virtual id to a task;
*
* see also pid_nr() etc in include/linux/pid.h
*/
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
struct pid_namespace *ns);
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
}
static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
}
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
}
static inline pid_t task_tgid_nr(struct task_struct *tsk)
{
return tsk->tgid;
}
pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
return pid_vnr(task_tgid(tsk));
}
static inline int pid_alive(const struct task_struct *p);
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
{
pid_t pid = 0;
rcu_read_lock();
if (pid_alive(tsk))
pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
rcu_read_unlock();
return pid;
}
static inline pid_t task_ppid_nr(const struct task_struct *tsk)
{
return task_ppid_nr_ns(tsk, &init_pid_ns);
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
}
static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
}
static inline pid_t task_session_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
}
static inline pid_t task_session_vnr(struct task_struct *tsk)
{
return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
}
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
return task_pgrp_nr_ns(tsk, &init_pid_ns);
}
/**
* pid_alive - check that a task structure is not stale
* @p: Task structure to be checked.
*
* Test if a process is not yet dead (at most zombie state)
* If pid_alive fails, then pointers within the task structure
* can be stale and must not be dereferenced.
*
* Return: 1 if the process is alive. 0 otherwise.
*/
static inline int pid_alive(const struct task_struct *p)
{
return p->pids[PIDTYPE_PID].pid != NULL;
}
/**
* is_global_init - check if a task structure is init
* @tsk: Task structure to be checked.
*
* Check if a task structure is the first user space task the kernel created.
*
* Return: 1 if the task structure is init. 0 otherwise.
*/
static inline int is_global_init(struct task_struct *tsk)
{
return tsk->pid == 1;
}
extern struct pid *cad_pid;
extern void free_task(struct task_struct *tsk);
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
{
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime);
extern void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled, cputime_t *stimescaled);
extern cputime_t task_gtime(struct task_struct *t);
#else
static inline void task_cputime(struct task_struct *t,
cputime_t *utime, cputime_t *stime)
{
if (utime)
*utime = t->utime;
if (stime)
*stime = t->stime;
}
static inline void task_cputime_scaled(struct task_struct *t,
cputime_t *utimescaled,
cputime_t *stimescaled)
{
if (utimescaled)
*utimescaled = t->utimescaled;
if (stimescaled)
*stimescaled = t->stimescaled;
}
static inline cputime_t task_gtime(struct task_struct *t)
{
return t->gtime;
}
#endif
extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
/*
* Per process flags
*/
#define PF_EXITING 0x00000004 /* getting shut down */
#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
#define PF_MCE_PROCESS 0x00000080 /* process policy on mce errors */
#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
#define PF_DUMPCORE 0x00000200 /* dumped core */
#define PF_SIGNALED 0x00000400 /* killed by a signal */
#define PF_MEMALLOC 0x00000800 /* Allocating memory */
#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
#define PF_USED_ASYNC 0x00004000 /* used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
#define PF_FROZEN 0x00010000 /* frozen for system suspend */
#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
#define PF_KSWAPD 0x00040000 /* I am kswapd */
#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* this thread called freeze_processes and should not be frozen */
/*
* Only the _current_ task can read/write to tsk->flags, but other
* tasks can access tsk->flags in readonly mode for example
* with tsk_used_math (like during threaded core dumping).
* There is however an exception to this rule during ptrace
* or during fork: the ptracer task is allowed to write to the
* child->flags of its traced child (same goes for fork, the parent
* can write to the child->flags), because we're guaranteed the
* child is not running and in turn not changing child->flags
* at the same time the parent does it.
*/
#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
#define clear_used_math() clear_stopped_child_used_math(current)
#define set_used_math() set_stopped_child_used_math(current)
#define conditional_stopped_child_used_math(condition, child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
#define conditional_used_math(condition) \
conditional_stopped_child_used_math(condition, current)
#define copy_to_stopped_child_used_math(child) \
do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
* __GFP_FS is also cleared as it implies __GFP_IO.
*/
static inline gfp_t memalloc_noio_flags(gfp_t flags)
{
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
return flags;
}
static inline unsigned int memalloc_noio_save(void)
{
unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
current->flags |= PF_MEMALLOC_NOIO;
return flags;
}
static inline void memalloc_noio_restore(unsigned int flags)
{
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
/* Per-process atomic flags. */
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
#define TASK_PFA_TEST(name, func) \
static inline bool task_##func(struct task_struct *p) \
{ return test_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_SET(name, func) \
static inline void task_set_##func(struct task_struct *p) \
{ set_bit(PFA_##name, &p->atomic_flags); }
#define TASK_PFA_CLEAR(name, func) \
static inline void task_clear_##func(struct task_struct *p) \
{ clear_bit(PFA_##name, &p->atomic_flags); }
TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
TASK_PFA_TEST(SPREAD_PAGE, spread_page)
TASK_PFA_SET(SPREAD_PAGE, spread_page)
TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
/*
* task->jobctl flags
*/
#define JOBCTL_STOP_SIGMASK 0xffff /* signr of the last group stop */
#define JOBCTL_STOP_DEQUEUED_BIT 16 /* stop signal dequeued */
#define JOBCTL_STOP_PENDING_BIT 17 /* task should stop for group stop */
#define JOBCTL_STOP_CONSUME_BIT 18 /* consume group stop count */
#define JOBCTL_TRAP_STOP_BIT 19 /* trap for STOP */
#define JOBCTL_TRAP_NOTIFY_BIT 20 /* trap for NOTIFY */
#define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */
#define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */
#define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT)
#define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT)
#define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT)
#define JOBCTL_TRAP_STOP (1UL << JOBCTL_TRAP_STOP_BIT)
#define JOBCTL_TRAP_NOTIFY (1UL << JOBCTL_TRAP_NOTIFY_BIT)
#define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT)
#define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT)
#define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
#define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
extern bool task_set_jobctl_pending(struct task_struct *task,
unsigned long mask);
extern void task_clear_jobctl_trapping(struct task_struct *task);
extern void task_clear_jobctl_pending(struct task_struct *task,
unsigned long mask);
static inline void rcu_copy_process(struct task_struct *p)
{
#ifdef CONFIG_PREEMPT_RCU
p->rcu_read_lock_nesting = 0;
p->rcu_read_unlock_special.s = 0;
p->rcu_blocked_node = NULL;
INIT_LIST_HEAD(&p->rcu_node_entry);
#endif /* #ifdef CONFIG_PREEMPT_RCU */
#ifdef CONFIG_TASKS_RCU
p->rcu_tasks_holdout = false;
INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
p->rcu_tasks_idle_cpu = -1;
#endif /* #ifdef CONFIG_TASKS_RCU */
}
static inline void tsk_restore_flags(struct task_struct *task,
unsigned long orig_flags, unsigned long flags)
{
task->flags &= ~flags;
task->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
const struct cpumask *trial);
extern int task_can_attach(struct task_struct *p,
const struct cpumask *cs_cpus_allowed);
#ifdef CONFIG_SMP
extern void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask);
extern int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask);
#else
static inline void do_set_cpus_allowed(struct task_struct *p,
const struct cpumask *new_mask)
{
}
static inline int set_cpus_allowed_ptr(struct task_struct *p,
const struct cpumask *new_mask)
{
if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
#endif
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_enter_idle(void);
void calc_load_exit_idle(void);
#else
static inline void calc_load_enter_idle(void) { }
static inline void calc_load_exit_idle(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
/*
* Do not use outside of architecture code which knows its limitations.
*
* sched_clock() has no promise of monotonicity or bounded drift between
* CPUs, use (which you should not) requires disabling IRQs.
*
* Please use one of the three interfaces below.
*/
extern unsigned long long notrace sched_clock(void);
/*
* See the comment in kernel/sched/clock.c
*/
extern u64 cpu_clock(int cpu);
extern u64 local_clock(void);
extern u64 running_clock(void);
extern u64 sched_clock_cpu(int cpu);
extern void sched_clock_init(void);
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
static inline void sched_clock_tick(void)
{
}
static inline void sched_clock_idle_sleep_event(void)
{
}
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
{
}
#else
/*
* Architectures can set this to 1 if they have specified
* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
* but then during bootup it turns out that sched_clock()
* is reliable after all:
*/
extern int sched_clock_stable(void);
extern void set_sched_clock_stable(void);
extern void clear_sched_clock_stable(void);
extern void sched_clock_tick(void);
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* An i/f to runtime opt-in for irq time accounting based off of sched_clock.
* The reason for this explicit opt-in is not to have perf penalty with
* slow sched_clocks.
*/
extern void enable_sched_clock_irqtime(void);
extern void disable_sched_clock_irqtime(void);
#else
static inline void enable_sched_clock_irqtime(void) {}
static inline void disable_sched_clock_irqtime(void) {}
#endif
extern unsigned long long
task_sched_runtime(struct task_struct *task);
/* sched_exec is called by processes performing an exec */
#ifdef CONFIG_SMP
extern void sched_exec(void);
#else
#define sched_exec() {}
#endif
extern void sched_clock_idle_sleep_event(void);
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
#ifdef CONFIG_HOTPLUG_CPU
extern void idle_task_exit(void);
#else
static inline void idle_task_exit(void) {}
#endif
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
extern void wake_up_nohz_cpu(int cpu);
#else
static inline void wake_up_nohz_cpu(int cpu) { }
#endif
#ifdef CONFIG_NO_HZ_FULL
extern bool sched_can_stop_tick(void);
extern u64 scheduler_tick_max_deferment(void);
#else
static inline bool sched_can_stop_tick(void) { return false; }
#endif
#ifdef CONFIG_SCHED_AUTOGROUP
extern void sched_autogroup_create_attach(struct task_struct *p);
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
#endif
#else
static inline void sched_autogroup_create_attach(struct task_struct *p) { }
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
#endif
extern int yield_to(struct task_struct *p, bool preempt);
extern void set_user_nice(struct task_struct *p, long nice);
extern int task_prio(const struct task_struct *p);
/**
* task_nice - return the nice value of a given task.
* @p: the task in question.
*
* Return: The nice value [ -20 ... 0 ... 19 ].
*/
static inline int task_nice(const struct task_struct *p)
{
return PRIO_TO_NICE((p)->static_prio);
}
extern int can_nice(const struct task_struct *p, const int nice);
extern int task_curr(const struct task_struct *p);
extern int idle_cpu(int cpu);
extern int sched_setscheduler(struct task_struct *, int,
const struct sched_param *);
extern int sched_setscheduler_nocheck(struct task_struct *, int,
const struct sched_param *);
extern int sched_setattr(struct task_struct *,
const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
*
* Return: 1 if @p is an idle task. 0 otherwise.
*/
static inline bool is_idle_task(const struct task_struct *p)
{
return p->pid == 0;
}
extern struct task_struct *curr_task(int cpu);
extern void set_curr_task(int cpu, struct task_struct *p);
void yield(void);
union thread_union {
struct thread_info thread_info;
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
#ifndef __HAVE_ARCH_KSTACK_END
static inline int kstack_end(void *addr)
{
/* Reliable end of stack detection:
* Some APM bios versions misalign the stack
*/
return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
}
#endif
extern union thread_union init_thread_union;
extern struct task_struct init_task;
extern struct mm_struct init_mm;
extern struct pid_namespace init_pid_ns;
/*
* find a task by one of its numerical ids
*
* find_task_by_pid_ns():
* finds a task by its pid in the specified namespace
* find_task_by_vpid():
* finds a task by its virtual pid
*
* see also find_vpid() etc in include/linux/pid.h
*/
extern struct task_struct *find_task_by_vpid(pid_t nr);
extern struct task_struct *find_task_by_pid_ns(pid_t nr,
struct pid_namespace *ns);
/* per-UID process charging. */
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
atomic_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
#include <asm/current.h>
extern void xtime_update(unsigned long ticks);
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
#ifdef CONFIG_SMP
extern void kick_process(struct task_struct *tsk);
#else
static inline void kick_process(struct task_struct *tsk) { }
#endif
extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
static inline int kernel_dequeue_signal(siginfo_t *info)
{
struct task_struct *tsk = current;
siginfo_t __info;
int ret;
spin_lock_irq(&tsk->sighand->siglock);
ret = dequeue_signal(tsk, &tsk->blocked, info ?: &__info);
spin_unlock_irq(&tsk->sighand->siglock);
return ret;
}
static inline void kernel_signal_stop(void)
{
spin_lock_irq(¤t->sighand->siglock);
if (current->jobctl & JOBCTL_STOP_DEQUEUED)
__set_current_state(TASK_STOPPED);
spin_unlock_irq(¤t->sighand->siglock);
schedule();
}
extern void release_task(struct task_struct * p);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
extern int force_sigsegv(int, struct task_struct *);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
extern int kill_proc_info(int, struct siginfo *, pid_t);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline void restore_saved_sigmask(void)
{
if (test_and_clear_restore_sigmask())
__set_current_blocked(¤t->saved_sigmask);
}
static inline sigset_t *sigmask_to_save(void)
{
sigset_t *res = ¤t->blocked;
if (unlikely(test_restore_sigmask()))
res = ¤t->saved_sigmask;
return res;
}
static inline int kill_cad_pid(int sig, int priv)
{
return kill_pid(cad_pid, sig, priv);
}
/* These can be the second arg to send_sig_info/send_group_sig_info. */
#define SEND_SIG_NOINFO ((struct siginfo *) 0)
#define SEND_SIG_PRIV ((struct siginfo *) 1)
#define SEND_SIG_FORCED ((struct siginfo *) 2)
/*
* True if we are on the alternate signal stack.
*/
static inline int on_sig_stack(unsigned long sp)
{
#ifdef CONFIG_STACK_GROWSUP
return sp >= current->sas_ss_sp &&
sp - current->sas_ss_sp < current->sas_ss_size;
#else
return sp > current->sas_ss_sp &&
sp - current->sas_ss_sp <= current->sas_ss_size;
#endif
}
static inline int sas_ss_flags(unsigned long sp)
{
if (!current->sas_ss_size)
return SS_DISABLE;
return on_sig_stack(sp) ? SS_ONSTACK : 0;
}
static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
{
if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
#ifdef CONFIG_STACK_GROWSUP
return current->sas_ss_sp;
#else
return current->sas_ss_sp + current->sas_ss_size;
#endif
return sp;
}
/*
* Routines for handling mm_structs
*/
extern struct mm_struct * mm_alloc(void);
/* mmdrop drops the mm and the page tables */
extern void __mmdrop(struct mm_struct *);
static inline void mmdrop(struct mm_struct * mm)
{
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
__mmdrop(mm);
}
/* mmput gets rid of the mappings and all user-space */
extern void mmput(struct mm_struct *);
/* Grab a reference to a task's mm, if it is not already going away */
extern struct mm_struct *get_task_mm(struct task_struct *task);
/*
* Grab a reference to a task's mm, if it is not already going away
* and ptrace_may_access with the mode parameter passed to it
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
/* Remove the current tasks stale references to the old mm_struct */
extern void mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_HAVE_COPY_THREAD_TLS
extern int copy_thread_tls(unsigned long, unsigned long, unsigned long,
struct task_struct *, unsigned long);
#else
extern int copy_thread(unsigned long, unsigned long, unsigned long,
struct task_struct *);
/* Architectures that haven't opted into copy_thread_tls get the tls argument
* via pt_regs, so ignore the tls argument passed via C. */
static inline int copy_thread_tls(
unsigned long clone_flags, unsigned long sp, unsigned long arg,
struct task_struct *p, unsigned long tls)
{
return copy_thread(clone_flags, sp, arg, p);
}
#endif
extern void flush_thread(void);
extern void exit_thread(void);
extern void exit_files(struct task_struct *);
extern void __cleanup_sighand(struct sighand_struct *);
extern void exit_itimers(struct signal_struct *);
extern void flush_itimer_signals(void);
extern void do_group_exit(int);
extern int do_execve(struct filename *,
const char __user * const __user *,
const char __user * const __user *);
extern int do_execveat(int, struct filename *,
const char __user * const __user *,
const char __user * const __user *,
int);
extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *, unsigned long);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
static inline void set_task_comm(struct task_struct *tsk, const char *from)
{
__set_task_comm(tsk, from, false);
}
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
void scheduler_ipi(void);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
static inline void scheduler_ipi(void) { }
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
return 1;
}
#endif
#define tasklist_empty() \
list_empty(&init_task.tasks)
#define next_task(p) \
list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
extern bool current_is_single_threaded(void);
/*
* Careful: do_each_thread/while_each_thread is a double loop so
* 'break' will not work as expected - use goto instead.
*/
#define do_each_thread(g, t) \
for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
#define __for_each_thread(signal, t) \
list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
/* Careful: this is a double loop, 'break' won't work as expected. */
#define for_each_process_thread(p, t) \
for_each_process(p) for_each_thread(p, t)
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
}
static inline bool thread_group_leader(struct task_struct *p)
{
return p->exit_signal >= 0;
}
/* Do to the insanities of de_thread it is possible for a process
* to have the pid of the thread group leader without actually being
* the thread group leader. For iteration through the pids in proc
* all we care about is that we have a task with the appropriate
* pid, we don't actually care if we have the right task.
*/
static inline bool has_group_leader_pid(struct task_struct *p)
{
return task_pid(p) == p->signal->leader_pid;
}
static inline
bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
{
return p1->signal == p2->signal;
}
static inline struct task_struct *next_thread(const struct task_struct *p)
{
return list_entry_rcu(p->thread_group.next,
struct task_struct, thread_group);
}
static inline int thread_group_empty(struct task_struct *p)
{
return list_empty(&p->thread_group);
}
#define delay_group_leader(p) \
(thread_group_leader(p) && !thread_group_empty(p))
/*
* Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
* subscriptions and synchronises with wait4(). Also used in procfs. Also
* pins the final release of task.io_context. Also protects ->cpuset and
* ->cgroup.subsys[]. And ->vfork_done.
*
* Nests both inside and outside of read_lock(&tasklist_lock).
* It must not be nested with write_lock_irq(&tasklist_lock),
* neither inside nor outside.
*/
static inline void task_lock(struct task_struct *p)
{
spin_lock(&p->alloc_lock);
}
static inline void task_unlock(struct task_struct *p)
{
spin_unlock(&p->alloc_lock);
}
extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
unsigned long *flags);
static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
struct sighand_struct *ret;
ret = __lock_task_sighand(tsk, flags);
(void)__cond_lock(&tsk->sighand->siglock, ret);
return ret;
}
static inline void unlock_task_sighand(struct task_struct *tsk,
unsigned long *flags)
{
spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
}
/**
* threadgroup_change_begin - mark the beginning of changes to a threadgroup
* @tsk: task causing the changes
*
* All operations which modify a threadgroup - a new thread joining the
* group, death of a member thread (the assertion of PF_EXITING) and
* exec(2) dethreading the process and replacing the leader - are wrapped
* by threadgroup_change_{begin|end}(). This is to provide a place which
* subsystems needing threadgroup stability can hook into for
* synchronization.
*/
static inline void threadgroup_change_begin(struct task_struct *tsk)
{
might_sleep();
cgroup_threadgroup_change_begin(tsk);
}
/**
* threadgroup_change_end - mark the end of changes to a threadgroup
* @tsk: task causing the changes
*
* See threadgroup_change_begin().
*/
static inline void threadgroup_change_end(struct task_struct *tsk)
{
cgroup_threadgroup_change_end(tsk);
}
#ifndef __HAVE_THREAD_FUNCTIONS
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
#define task_stack_page(task) ((task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
*task_thread_info(p) = *task_thread_info(org);
task_thread_info(p)->task = p;
}
/*
* Return the address of the last usable long on the stack.
*
* When the stack grows down, this is just above the thread
* info struct. Going any lower will corrupt the threadinfo.
*
* When the stack grows up, this is the highest address.
* Beyond that position, we corrupt data on the next page.
*/
static inline unsigned long *end_of_stack(struct task_struct *p)
{
#ifdef CONFIG_STACK_GROWSUP
return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
#else
return (unsigned long *)(task_thread_info(p) + 1);
#endif
}
#endif
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
static inline int object_is_on_stack(void *obj)
{
void *stack = task_stack_page(current);
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
extern void thread_info_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
{
unsigned long *n = end_of_stack(p);
do { /* Skip over canary */
n++;
} while (!*n);
return (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
extern void set_task_stack_end_magic(struct task_struct *tsk);
/* set thread flags in other task's structures
* - see asm/thread_info.h for TIF_xxxx flags available
*/
static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
{
return test_ti_thread_flag(task_thread_info(tsk), flag);
}
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline void clear_tsk_need_resched(struct task_struct *tsk)
{
clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
static inline int test_tsk_need_resched(struct task_struct *tsk)
{
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
static inline int restart_syscall(void)
{
set_tsk_thread_flag(current, TIF_SIGPENDING);
return -ERESTARTNOINTR;
}
static inline int signal_pending(struct task_struct *p)
{
return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
}
static inline int __fatal_signal_pending(struct task_struct *p)
{
return unlikely(sigismember(&p->pending.signal, SIGKILL));
}
static inline int fatal_signal_pending(struct task_struct *p)
{
return signal_pending(p) && __fatal_signal_pending(p);
}
static inline int signal_pending_state(long state, struct task_struct *p)
{
if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
return 0;
if (!signal_pending(p))
return 0;
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
}
/*
* cond_resched() and cond_resched_lock(): latency reduction via
* explicit rescheduling in places that are safe. The return
* value indicates whether a reschedule was done in fact.
* cond_resched_lock() will drop the spinlock before scheduling,
* cond_resched_softirq() will enable bhs before scheduling.
*/
extern int _cond_resched(void);
#define cond_resched() ({ \
___might_sleep(__FILE__, __LINE__, 0); \
_cond_resched(); \
})
extern int __cond_resched_lock(spinlock_t *lock);
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
})
extern int __cond_resched_softirq(void);
#define cond_resched_softirq() ({ \
___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \
__cond_resched_softirq(); \
})
static inline void cond_resched_rcu(void)
{
#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
rcu_read_unlock();
cond_resched();
rcu_read_lock();
#endif
}
/*
* Does a critical section need to be broken due to another
* task waiting?: (technically does not depend on CONFIG_PREEMPT,
* but a general need for low latency)
*/
static inline int spin_needbreak(spinlock_t *lock)
{
#ifdef CONFIG_PREEMPT
return spin_is_contended(lock);
#else
return 0;
#endif
}
/*
* Idle thread specific functions to determine the need_resched
* polling state.
*/
#ifdef TIF_POLLING_NRFLAG
static inline int tsk_is_polling(struct task_struct *p)
{
return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
}
static inline void __current_set_polling(void)
{
set_thread_flag(TIF_POLLING_NRFLAG);
}
static inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
static inline void __current_clr_polling(void)
{
clear_thread_flag(TIF_POLLING_NRFLAG);
}
static inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
/*
* Polling state must be visible before we test NEED_RESCHED,
* paired by resched_curr()
*/
smp_mb__after_atomic();
return unlikely(tif_need_resched());
}
#else
static inline int tsk_is_polling(struct task_struct *p) { return 0; }
static inline void __current_set_polling(void) { }
static inline void __current_clr_polling(void) { }
static inline bool __must_check current_set_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
static inline bool __must_check current_clr_polling_and_test(void)
{
return unlikely(tif_need_resched());
}
#endif
static inline void current_clr_polling(void)
{
__current_clr_polling();
/*
* Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
* Once the bit is cleared, we'll get IPIs with every new
* TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
* fold.
*/
smp_mb(); /* paired with resched_curr() */
preempt_fold_need_resched();
}
static __always_inline bool need_resched(void)
{
return unlikely(tif_need_resched());
}
/*
* Thread group CPU time accounting.
*/
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
/*
* Reevaluate whether the task has signals pending delivery.
* Wake the task if so.
* This is required every time the blocked sigset_t changes.
* callers must hold sighand->siglock.
*/
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
static inline void signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
/*
* Wrappers for p->thread_info->cpu access. No-op on UP.
*/
#ifdef CONFIG_SMP
static inline unsigned int task_cpu(const struct task_struct *p)
{
return task_thread_info(p)->cpu;
}
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
}
extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
#else
static inline unsigned int task_cpu(const struct task_struct *p)
{
return 0;
}
static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
{
}
#endif /* CONFIG_SMP */
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#ifdef CONFIG_CGROUP_SCHED
extern struct task_group root_task_group;
#endif /* CONFIG_CGROUP_SCHED */
extern int task_can_switch_user(struct user_struct *up,
struct task_struct *tsk);
#ifdef CONFIG_TASK_XACCT
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
tsk->ioac.rchar += amt;
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
tsk->ioac.wchar += amt;
}
static inline void inc_syscr(struct task_struct *tsk)
{
tsk->ioac.syscr++;
}
static inline void inc_syscw(struct task_struct *tsk)
{
tsk->ioac.syscw++;
}
#else
static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
{
}
static inline void inc_syscr(struct task_struct *tsk)
{
}
static inline void inc_syscw(struct task_struct *tsk)
{
}
#endif
#ifndef TASK_SIZE_OF
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
#else
static inline void mm_update_next_owner(struct mm_struct *mm)
{
}
#endif /* CONFIG_MEMCG */
static inline unsigned long task_rlimit(const struct task_struct *tsk,
unsigned int limit)
{
return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
}
static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
unsigned int limit)
{
return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
}
static inline unsigned long rlimit(unsigned int limit)
{
return task_rlimit(current, limit);
}
static inline unsigned long rlimit_max(unsigned int limit)
{
return task_rlimit_max(current, limit);
}
#endif
|
5740_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* $Id: json_object.h,v 1.12 2006/01/30 23:07:57 mclark Exp $
*
* Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.
* Michael Clark <michael@metaparadigm.com>
* Copyright (c) 2009 Hewlett-Packard Development Company, L.P.
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the MIT license. See COPYING for details.
*
*/
#ifndef _json_object_h_
#define _json_object_h_
#include "json_inttypes.h"
#ifdef __cplusplus
extern "C" {
#endif
#define JSON_OBJECT_DEF_HASH_ENTRIES 16
/**
* A flag for the json_object_to_json_string_ext() and
* json_object_to_file_ext() functions which causes the output
* to have no extra whitespace or formatting applied.
*/
#define JSON_C_TO_STRING_PLAIN 0
/**
* A flag for the json_object_to_json_string_ext() and
* json_object_to_file_ext() functions which causes the output to have
* minimal whitespace inserted to make things slightly more readable.
*/
#define JSON_C_TO_STRING_SPACED (1<<0)
/**
* A flag for the json_object_to_json_string_ext() and
* json_object_to_file_ext() functions which causes
* the output to be formatted.
*
* See the "Two Space Tab" option at http://jsonformatter.curiousconcept.com/
* for an example of the format.
*/
#define JSON_C_TO_STRING_PRETTY (1<<1)
/**
* A flag to drop trailing zero for float values
*/
#define JSON_C_TO_STRING_NOZERO (1<<2)
#undef FALSE
#define FALSE ((json_bool)0)
#undef TRUE
#define TRUE ((json_bool)1)
extern const char *json_number_chars;
extern const char *json_hex_chars;
/* CAW: added for ANSI C iteration correctness */
struct json_object_iter
{
char *key;
struct json_object *val;
struct lh_entry *entry;
};
/* forward structure definitions */
typedef int json_bool;
typedef struct printbuf printbuf;
typedef struct lh_table lh_table;
typedef struct array_list array_list;
typedef struct json_object json_object;
typedef struct json_object_iter json_object_iter;
typedef struct json_tokener json_tokener;
/**
* Type of custom user delete functions. See json_object_set_serializer.
*/
typedef void (json_object_delete_fn)(struct json_object *jso, void *userdata);
/**
* Type of a custom serialization function. See json_object_set_serializer.
*/
typedef int (json_object_to_json_string_fn)(struct json_object *jso,
struct printbuf *pb,
int level,
int flags);
/* supported object types */
typedef enum json_type {
/* If you change this, be sure to update json_type_to_name() too */
json_type_null,
json_type_boolean,
json_type_double,
json_type_int,
json_type_object,
json_type_array,
json_type_string,
} json_type;
/* reference counting functions */
/**
* Increment the reference count of json_object, thereby grabbing shared
* ownership of obj.
*
* @param obj the json_object instance
*/
extern struct json_object* json_object_get(struct json_object *obj);
/**
* Decrement the reference count of json_object and free if it reaches zero.
* You must have ownership of obj prior to doing this or you will cause an
* imbalance in the reference count.
*
* @param obj the json_object instance
* @returns 1 if the object was freed.
*/
int json_object_put(struct json_object *obj);
/**
* Check if the json_object is of a given type
* @param obj the json_object instance
* @param type one of:
json_type_null (i.e. obj == NULL),
json_type_boolean,
json_type_double,
json_type_int,
json_type_object,
json_type_array,
json_type_string,
*/
extern int json_object_is_type(struct json_object *obj, enum json_type type);
/**
* Get the type of the json_object. See also json_type_to_name() to turn this
* into a string suitable, for instance, for logging.
*
* @param obj the json_object instance
* @returns type being one of:
json_type_null (i.e. obj == NULL),
json_type_boolean,
json_type_double,
json_type_int,
json_type_object,
json_type_array,
json_type_string,
*/
extern enum json_type json_object_get_type(struct json_object *obj);
/** Stringify object to json format.
* Equivalent to json_object_to_json_string_ext(obj, JSON_C_TO_STRING_SPACED)
* @param obj the json_object instance
* @returns a string in JSON format
*/
extern const char* json_object_to_json_string(struct json_object *obj);
/** Stringify object to json format
* @param obj the json_object instance
* @param flags formatting options, see JSON_C_TO_STRING_PRETTY and other constants
* @returns a string in JSON format
*/
extern const char* json_object_to_json_string_ext(struct json_object *obj, int
flags);
/**
* Set a custom serialization function to be used when this particular object
* is converted to a string by json_object_to_json_string.
*
* If a custom serializer is already set on this object, any existing
* user_delete function is called before the new one is set.
*
* If to_string_func is NULL, the other parameters are ignored
* and the default behaviour is reset.
*
* The userdata parameter is optional and may be passed as NULL. If provided,
* it is passed to to_string_func as-is. This parameter may be NULL even
* if user_delete is non-NULL.
*
* The user_delete parameter is optional and may be passed as NULL, even if
* the userdata parameter is non-NULL. It will be called just before the
* json_object is deleted, after it's reference count goes to zero
* (see json_object_put()).
* If this is not provided, it is up to the caller to free the userdata at
* an appropriate time. (i.e. after the json_object is deleted)
*
* @param jso the object to customize
* @param to_string_func the custom serialization function
* @param userdata an optional opaque cookie
* @param user_delete an optional function from freeing userdata
*/
extern void json_object_set_serializer(json_object *jso,
json_object_to_json_string_fn to_string_func,
void *userdata,
json_object_delete_fn *user_delete);
/**
* Simply call free on the userdata pointer.
* Can be used with json_object_set_serializer().
*
* @param jso unused
* @param userdata the pointer that is passed to free().
*/
json_object_delete_fn json_object_free_userdata;
/**
* Copy the jso->_userdata string over to pb as-is.
* Can be used with json_object_set_serializer().
*
* @param jso The object whose _userdata is used.
* @param pb The destination buffer.
* @param level Ignored.
* @param flags Ignored.
*/
json_object_to_json_string_fn json_object_userdata_to_json_string;
/* object type methods */
/** Create a new empty object with a reference count of 1. The caller of
* this object initially has sole ownership. Remember, when using
* json_object_object_add or json_object_array_put_idx, ownership will
* transfer to the object/array. Call json_object_get if you want to maintain
* shared ownership or also add this object as a child of multiple objects or
* arrays. Any ownerships you acquired but did not transfer must be released
* through json_object_put.
*
* @returns a json_object of type json_type_object
*/
extern struct json_object* json_object_new_object(void);
/** Get the hashtable of a json_object of type json_type_object
* @param obj the json_object instance
* @returns a linkhash
*/
extern struct lh_table* json_object_get_object(struct json_object *obj);
/** Get the size of an object in terms of the number of fields it has.
* @param obj the json_object whose length to return
*/
extern int json_object_object_length(struct json_object* obj);
/** Add an object field to a json_object of type json_type_object
*
* The reference count will *not* be incremented. This is to make adding
* fields to objects in code more compact. If you want to retain a reference
* to an added object, independent of the lifetime of obj, you must wrap the
* passed object with json_object_get.
*
* Upon calling this, the ownership of val transfers to obj. Thus you must
* make sure that you do in fact have ownership over this object. For instance,
* json_object_new_object will give you ownership until you transfer it,
* whereas json_object_object_get does not.
*
* @param obj the json_object instance
* @param key the object field name (a private copy will be duplicated)
* @param val a json_object or NULL member to associate with the given field
*/
extern void json_object_object_add(struct json_object* obj, const char *key,
struct json_object *val);
/** Get the json_object associate with a given object field
*
* *No* reference counts will be changed. There is no need to manually adjust
* reference counts through the json_object_put/json_object_get methods unless
* you need to have the child (value) reference maintain a different lifetime
* than the owning parent (obj). Ownership of the returned value is retained
* by obj (do not do json_object_put unless you have done a json_object_get).
* If you delete the value from obj (json_object_object_del) and wish to access
* the returned reference afterwards, make sure you have first gotten shared
* ownership through json_object_get (& don't forget to do a json_object_put
* or transfer ownership to prevent a memory leak).
*
* @param obj the json_object instance
* @param key the object field name
* @returns the json_object associated with the given field name
* @deprecated Please use json_object_object_get_ex
*/
extern struct json_object* json_object_object_get(struct json_object* obj,
const char *key);
/** Get the json_object associated with a given object field.
*
* This returns true if the key is found, false in all other cases (including
* if obj isn't a json_type_object).
*
* *No* reference counts will be changed. There is no need to manually adjust
* reference counts through the json_object_put/json_object_get methods unless
* you need to have the child (value) reference maintain a different lifetime
* than the owning parent (obj). Ownership of value is retained by obj.
*
* @param obj the json_object instance
* @param key the object field name
* @param value a pointer where to store a reference to the json_object
* associated with the given field name.
*
* It is safe to pass a NULL value.
* @returns whether or not the key exists
*/
extern json_bool json_object_object_get_ex(struct json_object* obj,
const char *key,
struct json_object **value);
/** Delete the given json_object field
*
* The reference count will be decremented for the deleted object. If there
* are no more owners of the value represented by this key, then the value is
* freed. Otherwise, the reference to the value will remain in memory.
*
* @param obj the json_object instance
* @param key the object field name
*/
extern void json_object_object_del(struct json_object* obj, const char *key);
/**
* Iterate through all keys and values of an object.
*
* Adding keys to the object while iterating is NOT allowed.
*
* Deleting an existing key, or replacing an existing key with a
* new value IS allowed.
*
* @param obj the json_object instance
* @param key the local name for the char* key variable defined in the body
* @param val the local name for the json_object* object variable defined in
* the body
*/
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) && __STDC_VERSION__ >= 199901L
# define json_object_object_foreach(obj,key,val) \
char *key; \
struct json_object *val __attribute__((__unused__)); \
for(struct lh_entry *entry ## key = json_object_get_object(obj)->head, *entry_next ## key = NULL; \
({ if(entry ## key) { \
key = (char*)entry ## key->k; \
val = (struct json_object*)entry ## key->v; \
entry_next ## key = entry ## key->next; \
} ; entry ## key; }); \
entry ## key = entry_next ## key )
#else /* ANSI C or MSC */
# define json_object_object_foreach(obj,key,val) \
char *key;\
struct json_object *val; \
struct lh_entry *entry ## key; \
struct lh_entry *entry_next ## key = NULL; \
for(entry ## key = json_object_get_object(obj)->head; \
(entry ## key ? ( \
key = (char*)entry ## key->k, \
val = (struct json_object*)entry ## key->v, \
entry_next ## key = entry ## key->next, \
entry ## key) : 0); \
entry ## key = entry_next ## key)
#endif /* defined(__GNUC__) && !defined(__STRICT_ANSI__) && __STDC_VERSION__ >= 199901L */
/** Iterate through all keys and values of an object (ANSI C Safe)
* @param obj the json_object instance
* @param iter the object iterator
*/
#define json_object_object_foreachC(obj,iter) \
for(iter.entry = json_object_get_object(obj)->head; (iter.entry ? (iter.key = (char*)iter.entry->k, iter.val = (struct json_object*)iter.entry->v, iter.entry) : 0); iter.entry = iter.entry->next)
/* Array type methods */
/** Create a new empty json_object of type json_type_array
* @returns a json_object of type json_type_array
*/
extern struct json_object* json_object_new_array(void);
/** Get the arraylist of a json_object of type json_type_array
* @param obj the json_object instance
* @returns an arraylist
*/
extern struct array_list* json_object_get_array(struct json_object *obj);
/** Get the length of a json_object of type json_type_array
* @param obj the json_object instance
* @returns an int
*/
extern int json_object_array_length(struct json_object *obj);
/** Sorts the elements of jso of type json_type_array
*
* Pointers to the json_object pointers will be passed as the two arguments
* to @sort_fn
*
* @param obj the json_object instance
* @param sort_fn a sorting function
*/
extern void json_object_array_sort(struct json_object *jso, int(*sort_fn)(const void *, const void *));
/** Add an element to the end of a json_object of type json_type_array
*
* The reference count will *not* be incremented. This is to make adding
* fields to objects in code more compact. If you want to retain a reference
* to an added object you must wrap the passed object with json_object_get
*
* @param obj the json_object instance
* @param val the json_object to be added
*/
extern int json_object_array_add(struct json_object *obj,
struct json_object *val);
/** Insert or replace an element at a specified index in an array (a json_object of type json_type_array)
*
* The reference count will *not* be incremented. This is to make adding
* fields to objects in code more compact. If you want to retain a reference
* to an added object you must wrap the passed object with json_object_get
*
* The reference count of a replaced object will be decremented.
*
* The array size will be automatically be expanded to the size of the
* index if the index is larger than the current size.
*
* @param obj the json_object instance
* @param idx the index to insert the element at
* @param val the json_object to be added
*/
extern int json_object_array_put_idx(struct json_object *obj, int idx,
struct json_object *val);
/** Get the element at specificed index of the array (a json_object of type json_type_array)
* @param obj the json_object instance
* @param idx the index to get the element at
* @returns the json_object at the specified index (or NULL)
*/
extern struct json_object* json_object_array_get_idx(struct json_object *obj,
int idx);
/* json_bool type methods */
/** Create a new empty json_object of type json_type_boolean
* @param b a json_bool TRUE or FALSE (0 or 1)
* @returns a json_object of type json_type_boolean
*/
extern struct json_object* json_object_new_boolean(json_bool b);
/** Get the json_bool value of a json_object
*
* The type is coerced to a json_bool if the passed object is not a json_bool.
* integer and double objects will return FALSE if there value is zero
* or TRUE otherwise. If the passed object is a string it will return
* TRUE if it has a non zero length. If any other object type is passed
* TRUE will be returned if the object is not NULL.
*
* @param obj the json_object instance
* @returns a json_bool
*/
extern json_bool json_object_get_boolean(struct json_object *obj);
/* int type methods */
/** Create a new empty json_object of type json_type_int
* Note that values are stored as 64-bit values internally.
* To ensure the full range is maintained, use json_object_new_int64 instead.
* @param i the integer
* @returns a json_object of type json_type_int
*/
extern struct json_object* json_object_new_int(int32_t i);
/** Create a new empty json_object of type json_type_int
* @param i the integer
* @returns a json_object of type json_type_int
*/
extern struct json_object* json_object_new_int64(int64_t i);
/** Get the int value of a json_object
*
* The type is coerced to a int if the passed object is not a int.
* double objects will return their integer conversion. Strings will be
* parsed as an integer. If no conversion exists then 0 is returned
* and errno is set to EINVAL. null is equivalent to 0 (no error values set)
*
* Note that integers are stored internally as 64-bit values.
* If the value of too big or too small to fit into 32-bit, INT32_MAX or
* INT32_MIN are returned, respectively.
*
* @param obj the json_object instance
* @returns an int
*/
extern int32_t json_object_get_int(struct json_object *obj);
/** Get the int value of a json_object
*
* The type is coerced to a int64 if the passed object is not a int64.
* double objects will return their int64 conversion. Strings will be
* parsed as an int64. If no conversion exists then 0 is returned.
*
* NOTE: Set errno to 0 directly before a call to this function to determine
* whether or not conversion was successful (it does not clear the value for
* you).
*
* @param obj the json_object instance
* @returns an int64
*/
extern int64_t json_object_get_int64(struct json_object *obj);
/* double type methods */
/** Create a new empty json_object of type json_type_double
* @param d the double
* @returns a json_object of type json_type_double
*/
extern struct json_object* json_object_new_double(double d);
/**
* Create a new json_object of type json_type_double, using
* the exact serialized representation of the value.
*
* This allows for numbers that would otherwise get displayed
* inefficiently (e.g. 12.3 => "12.300000000000001") to be
* serialized with the more convenient form.
*
* Note: this is used by json_tokener_parse_ex() to allow for
* an exact re-serialization of a parsed object.
*
* An equivalent sequence of calls is:
* @code
* jso = json_object_new_double(d);
* json_object_set_serializer(d, json_object_userdata_to_json_string,
* strdup(ds), json_object_free_userdata)
* @endcode
*
* @param d the numeric value of the double.
* @param ds the string representation of the double. This will be copied.
*/
extern struct json_object* json_object_new_double_s(double d, const char *ds);
/** Get the double floating point value of a json_object
*
* The type is coerced to a double if the passed object is not a double.
* integer objects will return their double conversion. Strings will be
* parsed as a double. If no conversion exists then 0.0 is returned and
* errno is set to EINVAL. null is equivalent to 0 (no error values set)
*
* If the value is too big to fit in a double, then the value is set to
* the closest infinity with errno set to ERANGE. If strings cannot be
* converted to their double value, then EINVAL is set & NaN is returned.
*
* Arrays of length 0 are interpreted as 0 (with no error flags set).
* Arrays of length 1 are effectively cast to the equivalent object and
* converted using the above rules. All other arrays set the error to
* EINVAL & return NaN.
*
* NOTE: Set errno to 0 directly before a call to this function to
* determine whether or not conversion was successful (it does not clear
* the value for you).
*
* @param obj the json_object instance
* @returns a double floating point number
*/
extern double json_object_get_double(struct json_object *obj);
/* string type methods */
/** Create a new empty json_object of type json_type_string
*
* A copy of the string is made and the memory is managed by the json_object
*
* @param s the string
* @returns a json_object of type json_type_string
*/
extern struct json_object* json_object_new_string(const char *s);
extern struct json_object* json_object_new_string_len(const char *s, int len);
/** Get the string value of a json_object
*
* If the passed object is not of type json_type_string then the JSON
* representation of the object is returned.
*
* The returned string memory is managed by the json_object and will
* be freed when the reference count of the json_object drops to zero.
*
* @param obj the json_object instance
* @returns a string
*/
extern const char* json_object_get_string(struct json_object *obj);
/** Get the string length of a json_object
*
* If the passed object is not of type json_type_string then zero
* will be returned.
*
* @param obj the json_object instance
* @returns int
*/
extern int json_object_get_string_len(struct json_object *obj);
#ifdef __cplusplus
}
#endif
#endif
|
/*
* $Id: json_object.h,v 1.12 2006/01/30 23:07:57 mclark Exp $
*
* Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.
* Michael Clark <michael@metaparadigm.com>
* Copyright (c) 2009 Hewlett-Packard Development Company, L.P.
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the MIT license. See COPYING for details.
*
*/
#ifndef _json_object_h_
#define _json_object_h_
#ifdef __GNUC__
#define THIS_FUNCTION_IS_DEPRECATED(func) func __attribute__ ((deprecated))
#elif defined(_MSC_VER)
#define THIS_FUNCTION_IS_DEPRECATED(func) __declspec(deprecated) func
#else
#define THIS_FUNCTION_IS_DEPRECATED(func) func
#endif
#include "json_inttypes.h"
#ifdef __cplusplus
extern "C" {
#endif
#define JSON_OBJECT_DEF_HASH_ENTRIES 16
/**
* A flag for the json_object_to_json_string_ext() and
* json_object_to_file_ext() functions which causes the output
* to have no extra whitespace or formatting applied.
*/
#define JSON_C_TO_STRING_PLAIN 0
/**
* A flag for the json_object_to_json_string_ext() and
* json_object_to_file_ext() functions which causes the output to have
* minimal whitespace inserted to make things slightly more readable.
*/
#define JSON_C_TO_STRING_SPACED (1<<0)
/**
* A flag for the json_object_to_json_string_ext() and
* json_object_to_file_ext() functions which causes
* the output to be formatted.
*
* See the "Two Space Tab" option at http://jsonformatter.curiousconcept.com/
* for an example of the format.
*/
#define JSON_C_TO_STRING_PRETTY (1<<1)
/**
* A flag to drop trailing zero for float values
*/
#define JSON_C_TO_STRING_NOZERO (1<<2)
#undef FALSE
#define FALSE ((json_bool)0)
#undef TRUE
#define TRUE ((json_bool)1)
extern const char *json_number_chars;
extern const char *json_hex_chars;
/* CAW: added for ANSI C iteration correctness */
struct json_object_iter
{
char *key;
struct json_object *val;
struct lh_entry *entry;
};
/* forward structure definitions */
typedef int json_bool;
typedef struct printbuf printbuf;
typedef struct lh_table lh_table;
typedef struct array_list array_list;
typedef struct json_object json_object;
typedef struct json_object_iter json_object_iter;
typedef struct json_tokener json_tokener;
/**
* Type of custom user delete functions. See json_object_set_serializer.
*/
typedef void (json_object_delete_fn)(struct json_object *jso, void *userdata);
/**
* Type of a custom serialization function. See json_object_set_serializer.
*/
typedef int (json_object_to_json_string_fn)(struct json_object *jso,
struct printbuf *pb,
int level,
int flags);
/* supported object types */
typedef enum json_type {
/* If you change this, be sure to update json_type_to_name() too */
json_type_null,
json_type_boolean,
json_type_double,
json_type_int,
json_type_object,
json_type_array,
json_type_string,
} json_type;
/* reference counting functions */
/**
* Increment the reference count of json_object, thereby grabbing shared
* ownership of obj.
*
* @param obj the json_object instance
*/
extern struct json_object* json_object_get(struct json_object *obj);
/**
* Decrement the reference count of json_object and free if it reaches zero.
* You must have ownership of obj prior to doing this or you will cause an
* imbalance in the reference count.
*
* @param obj the json_object instance
* @returns 1 if the object was freed.
*/
int json_object_put(struct json_object *obj);
/**
* Check if the json_object is of a given type
* @param obj the json_object instance
* @param type one of:
json_type_null (i.e. obj == NULL),
json_type_boolean,
json_type_double,
json_type_int,
json_type_object,
json_type_array,
json_type_string,
*/
extern int json_object_is_type(struct json_object *obj, enum json_type type);
/**
* Get the type of the json_object. See also json_type_to_name() to turn this
* into a string suitable, for instance, for logging.
*
* @param obj the json_object instance
* @returns type being one of:
json_type_null (i.e. obj == NULL),
json_type_boolean,
json_type_double,
json_type_int,
json_type_object,
json_type_array,
json_type_string,
*/
extern enum json_type json_object_get_type(struct json_object *obj);
/** Stringify object to json format.
* Equivalent to json_object_to_json_string_ext(obj, JSON_C_TO_STRING_SPACED)
* @param obj the json_object instance
* @returns a string in JSON format
*/
extern const char* json_object_to_json_string(struct json_object *obj);
/** Stringify object to json format
* @param obj the json_object instance
* @param flags formatting options, see JSON_C_TO_STRING_PRETTY and other constants
* @returns a string in JSON format
*/
extern const char* json_object_to_json_string_ext(struct json_object *obj, int
flags);
/**
* Set a custom serialization function to be used when this particular object
* is converted to a string by json_object_to_json_string.
*
* If a custom serializer is already set on this object, any existing
* user_delete function is called before the new one is set.
*
* If to_string_func is NULL, the other parameters are ignored
* and the default behaviour is reset.
*
* The userdata parameter is optional and may be passed as NULL. If provided,
* it is passed to to_string_func as-is. This parameter may be NULL even
* if user_delete is non-NULL.
*
* The user_delete parameter is optional and may be passed as NULL, even if
* the userdata parameter is non-NULL. It will be called just before the
* json_object is deleted, after it's reference count goes to zero
* (see json_object_put()).
* If this is not provided, it is up to the caller to free the userdata at
* an appropriate time. (i.e. after the json_object is deleted)
*
* @param jso the object to customize
* @param to_string_func the custom serialization function
* @param userdata an optional opaque cookie
* @param user_delete an optional function from freeing userdata
*/
extern void json_object_set_serializer(json_object *jso,
json_object_to_json_string_fn to_string_func,
void *userdata,
json_object_delete_fn *user_delete);
/**
* Simply call free on the userdata pointer.
* Can be used with json_object_set_serializer().
*
* @param jso unused
* @param userdata the pointer that is passed to free().
*/
json_object_delete_fn json_object_free_userdata;
/**
* Copy the jso->_userdata string over to pb as-is.
* Can be used with json_object_set_serializer().
*
* @param jso The object whose _userdata is used.
* @param pb The destination buffer.
* @param level Ignored.
* @param flags Ignored.
*/
json_object_to_json_string_fn json_object_userdata_to_json_string;
/* object type methods */
/** Create a new empty object with a reference count of 1. The caller of
* this object initially has sole ownership. Remember, when using
* json_object_object_add or json_object_array_put_idx, ownership will
* transfer to the object/array. Call json_object_get if you want to maintain
* shared ownership or also add this object as a child of multiple objects or
* arrays. Any ownerships you acquired but did not transfer must be released
* through json_object_put.
*
* @returns a json_object of type json_type_object
*/
extern struct json_object* json_object_new_object(void);
/** Get the hashtable of a json_object of type json_type_object
* @param obj the json_object instance
* @returns a linkhash
*/
extern struct lh_table* json_object_get_object(struct json_object *obj);
/** Get the size of an object in terms of the number of fields it has.
* @param obj the json_object whose length to return
*/
extern int json_object_object_length(struct json_object* obj);
/** Add an object field to a json_object of type json_type_object
*
* The reference count will *not* be incremented. This is to make adding
* fields to objects in code more compact. If you want to retain a reference
* to an added object, independent of the lifetime of obj, you must wrap the
* passed object with json_object_get.
*
* Upon calling this, the ownership of val transfers to obj. Thus you must
* make sure that you do in fact have ownership over this object. For instance,
* json_object_new_object will give you ownership until you transfer it,
* whereas json_object_object_get does not.
*
* @param obj the json_object instance
* @param key the object field name (a private copy will be duplicated)
* @param val a json_object or NULL member to associate with the given field
*/
extern void json_object_object_add(struct json_object* obj, const char *key,
struct json_object *val);
/** Get the json_object associate with a given object field
*
* *No* reference counts will be changed. There is no need to manually adjust
* reference counts through the json_object_put/json_object_get methods unless
* you need to have the child (value) reference maintain a different lifetime
* than the owning parent (obj). Ownership of the returned value is retained
* by obj (do not do json_object_put unless you have done a json_object_get).
* If you delete the value from obj (json_object_object_del) and wish to access
* the returned reference afterwards, make sure you have first gotten shared
* ownership through json_object_get (& don't forget to do a json_object_put
* or transfer ownership to prevent a memory leak).
*
* @param obj the json_object instance
* @param key the object field name
* @returns the json_object associated with the given field name
* @deprecated Please use json_object_object_get_ex
*/
THIS_FUNCTION_IS_DEPRECATED(extern struct json_object* json_object_object_get(struct json_object* obj,
const char *key));
/** Get the json_object associated with a given object field.
*
* This returns true if the key is found, false in all other cases (including
* if obj isn't a json_type_object).
*
* *No* reference counts will be changed. There is no need to manually adjust
* reference counts through the json_object_put/json_object_get methods unless
* you need to have the child (value) reference maintain a different lifetime
* than the owning parent (obj). Ownership of value is retained by obj.
*
* @param obj the json_object instance
* @param key the object field name
* @param value a pointer where to store a reference to the json_object
* associated with the given field name.
*
* It is safe to pass a NULL value.
* @returns whether or not the key exists
*/
extern json_bool json_object_object_get_ex(struct json_object* obj,
const char *key,
struct json_object **value);
/** Delete the given json_object field
*
* The reference count will be decremented for the deleted object. If there
* are no more owners of the value represented by this key, then the value is
* freed. Otherwise, the reference to the value will remain in memory.
*
* @param obj the json_object instance
* @param key the object field name
*/
extern void json_object_object_del(struct json_object* obj, const char *key);
/**
* Iterate through all keys and values of an object.
*
* Adding keys to the object while iterating is NOT allowed.
*
* Deleting an existing key, or replacing an existing key with a
* new value IS allowed.
*
* @param obj the json_object instance
* @param key the local name for the char* key variable defined in the body
* @param val the local name for the json_object* object variable defined in
* the body
*/
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) && __STDC_VERSION__ >= 199901L
# define json_object_object_foreach(obj,key,val) \
char *key; \
struct json_object *val __attribute__((__unused__)); \
for(struct lh_entry *entry ## key = json_object_get_object(obj)->head, *entry_next ## key = NULL; \
({ if(entry ## key) { \
key = (char*)entry ## key->k; \
val = (struct json_object*)entry ## key->v; \
entry_next ## key = entry ## key->next; \
} ; entry ## key; }); \
entry ## key = entry_next ## key )
#else /* ANSI C or MSC */
# define json_object_object_foreach(obj,key,val) \
char *key;\
struct json_object *val; \
struct lh_entry *entry ## key; \
struct lh_entry *entry_next ## key = NULL; \
for(entry ## key = json_object_get_object(obj)->head; \
(entry ## key ? ( \
key = (char*)entry ## key->k, \
val = (struct json_object*)entry ## key->v, \
entry_next ## key = entry ## key->next, \
entry ## key) : 0); \
entry ## key = entry_next ## key)
#endif /* defined(__GNUC__) && !defined(__STRICT_ANSI__) && __STDC_VERSION__ >= 199901L */
/** Iterate through all keys and values of an object (ANSI C Safe)
* @param obj the json_object instance
* @param iter the object iterator
*/
#define json_object_object_foreachC(obj,iter) \
for(iter.entry = json_object_get_object(obj)->head; (iter.entry ? (iter.key = (char*)iter.entry->k, iter.val = (struct json_object*)iter.entry->v, iter.entry) : 0); iter.entry = iter.entry->next)
/* Array type methods */
/** Create a new empty json_object of type json_type_array
* @returns a json_object of type json_type_array
*/
extern struct json_object* json_object_new_array(void);
/** Get the arraylist of a json_object of type json_type_array
* @param obj the json_object instance
* @returns an arraylist
*/
extern struct array_list* json_object_get_array(struct json_object *obj);
/** Get the length of a json_object of type json_type_array
* @param obj the json_object instance
* @returns an int
*/
extern int json_object_array_length(struct json_object *obj);
/** Sorts the elements of jso of type json_type_array
*
* Pointers to the json_object pointers will be passed as the two arguments
* to @sort_fn
*
* @param obj the json_object instance
* @param sort_fn a sorting function
*/
extern void json_object_array_sort(struct json_object *jso, int(*sort_fn)(const void *, const void *));
/** Add an element to the end of a json_object of type json_type_array
*
* The reference count will *not* be incremented. This is to make adding
* fields to objects in code more compact. If you want to retain a reference
* to an added object you must wrap the passed object with json_object_get
*
* @param obj the json_object instance
* @param val the json_object to be added
*/
extern int json_object_array_add(struct json_object *obj,
struct json_object *val);
/** Insert or replace an element at a specified index in an array (a json_object of type json_type_array)
*
* The reference count will *not* be incremented. This is to make adding
* fields to objects in code more compact. If you want to retain a reference
* to an added object you must wrap the passed object with json_object_get
*
* The reference count of a replaced object will be decremented.
*
* The array size will be automatically be expanded to the size of the
* index if the index is larger than the current size.
*
* @param obj the json_object instance
* @param idx the index to insert the element at
* @param val the json_object to be added
*/
extern int json_object_array_put_idx(struct json_object *obj, int idx,
struct json_object *val);
/** Get the element at specificed index of the array (a json_object of type json_type_array)
* @param obj the json_object instance
* @param idx the index to get the element at
* @returns the json_object at the specified index (or NULL)
*/
extern struct json_object* json_object_array_get_idx(struct json_object *obj,
int idx);
/* json_bool type methods */
/** Create a new empty json_object of type json_type_boolean
* @param b a json_bool TRUE or FALSE (0 or 1)
* @returns a json_object of type json_type_boolean
*/
extern struct json_object* json_object_new_boolean(json_bool b);
/** Get the json_bool value of a json_object
*
* The type is coerced to a json_bool if the passed object is not a json_bool.
* integer and double objects will return FALSE if there value is zero
* or TRUE otherwise. If the passed object is a string it will return
* TRUE if it has a non zero length. If any other object type is passed
* TRUE will be returned if the object is not NULL.
*
* @param obj the json_object instance
* @returns a json_bool
*/
extern json_bool json_object_get_boolean(struct json_object *obj);
/* int type methods */
/** Create a new empty json_object of type json_type_int
* Note that values are stored as 64-bit values internally.
* To ensure the full range is maintained, use json_object_new_int64 instead.
* @param i the integer
* @returns a json_object of type json_type_int
*/
extern struct json_object* json_object_new_int(int32_t i);
/** Create a new empty json_object of type json_type_int
* @param i the integer
* @returns a json_object of type json_type_int
*/
extern struct json_object* json_object_new_int64(int64_t i);
/** Get the int value of a json_object
*
* The type is coerced to a int if the passed object is not a int.
* double objects will return their integer conversion. Strings will be
* parsed as an integer. If no conversion exists then 0 is returned
* and errno is set to EINVAL. null is equivalent to 0 (no error values set)
*
* Note that integers are stored internally as 64-bit values.
* If the value of too big or too small to fit into 32-bit, INT32_MAX or
* INT32_MIN are returned, respectively.
*
* @param obj the json_object instance
* @returns an int
*/
extern int32_t json_object_get_int(struct json_object *obj);
/** Get the int value of a json_object
*
* The type is coerced to a int64 if the passed object is not a int64.
* double objects will return their int64 conversion. Strings will be
* parsed as an int64. If no conversion exists then 0 is returned.
*
* NOTE: Set errno to 0 directly before a call to this function to determine
* whether or not conversion was successful (it does not clear the value for
* you).
*
* @param obj the json_object instance
* @returns an int64
*/
extern int64_t json_object_get_int64(struct json_object *obj);
/* double type methods */
/** Create a new empty json_object of type json_type_double
* @param d the double
* @returns a json_object of type json_type_double
*/
extern struct json_object* json_object_new_double(double d);
/**
* Create a new json_object of type json_type_double, using
* the exact serialized representation of the value.
*
* This allows for numbers that would otherwise get displayed
* inefficiently (e.g. 12.3 => "12.300000000000001") to be
* serialized with the more convenient form.
*
* Note: this is used by json_tokener_parse_ex() to allow for
* an exact re-serialization of a parsed object.
*
* An equivalent sequence of calls is:
* @code
* jso = json_object_new_double(d);
* json_object_set_serializer(d, json_object_userdata_to_json_string,
* strdup(ds), json_object_free_userdata)
* @endcode
*
* @param d the numeric value of the double.
* @param ds the string representation of the double. This will be copied.
*/
extern struct json_object* json_object_new_double_s(double d, const char *ds);
/** Get the double floating point value of a json_object
*
* The type is coerced to a double if the passed object is not a double.
* integer objects will return their double conversion. Strings will be
* parsed as a double. If no conversion exists then 0.0 is returned and
* errno is set to EINVAL. null is equivalent to 0 (no error values set)
*
* If the value is too big to fit in a double, then the value is set to
* the closest infinity with errno set to ERANGE. If strings cannot be
* converted to their double value, then EINVAL is set & NaN is returned.
*
* Arrays of length 0 are interpreted as 0 (with no error flags set).
* Arrays of length 1 are effectively cast to the equivalent object and
* converted using the above rules. All other arrays set the error to
* EINVAL & return NaN.
*
* NOTE: Set errno to 0 directly before a call to this function to
* determine whether or not conversion was successful (it does not clear
* the value for you).
*
* @param obj the json_object instance
* @returns a double floating point number
*/
extern double json_object_get_double(struct json_object *obj);
/* string type methods */
/** Create a new empty json_object of type json_type_string
*
* A copy of the string is made and the memory is managed by the json_object
*
* @param s the string
* @returns a json_object of type json_type_string
*/
extern struct json_object* json_object_new_string(const char *s);
extern struct json_object* json_object_new_string_len(const char *s, int len);
/** Get the string value of a json_object
*
* If the passed object is not of type json_type_string then the JSON
* representation of the object is returned.
*
* The returned string memory is managed by the json_object and will
* be freed when the reference count of the json_object drops to zero.
*
* @param obj the json_object instance
* @returns a string
*/
extern const char* json_object_get_string(struct json_object *obj);
/** Get the string length of a json_object
*
* If the passed object is not of type json_type_string then zero
* will be returned.
*
* @param obj the json_object instance
* @returns int
*/
extern int json_object_get_string_len(struct json_object *obj);
#ifdef __cplusplus
}
#endif
#endif
|
5795_4
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* $Id: json_tokener.h,v 1.10 2006/07/25 03:24:50 mclark Exp $
*
* Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.
* Michael Clark <michael@metaparadigm.com>
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the MIT license. See COPYING for details.
*
*/
#ifndef _json_tokener_h_
#define _json_tokener_h_
#include <stddef.h>
#include "json_object.h"
#ifdef __cplusplus
extern "C" {
#endif
enum json_tokener_error {
json_tokener_success,
json_tokener_continue,
json_tokener_error_depth,
json_tokener_error_parse_eof,
json_tokener_error_parse_unexpected,
json_tokener_error_parse_null,
json_tokener_error_parse_boolean,
json_tokener_error_parse_number,
json_tokener_error_parse_array,
json_tokener_error_parse_object_key_name,
json_tokener_error_parse_object_key_sep,
json_tokener_error_parse_object_value_sep,
json_tokener_error_parse_string,
json_tokener_error_parse_comment
};
enum json_tokener_state {
json_tokener_state_eatws,
json_tokener_state_start,
json_tokener_state_finish,
json_tokener_state_null,
json_tokener_state_comment_start,
json_tokener_state_comment,
json_tokener_state_comment_eol,
json_tokener_state_comment_end,
json_tokener_state_string,
json_tokener_state_string_escape,
json_tokener_state_escape_unicode,
json_tokener_state_boolean,
json_tokener_state_number,
json_tokener_state_array,
json_tokener_state_array_add,
json_tokener_state_array_sep,
json_tokener_state_object_field_start,
json_tokener_state_object_field,
json_tokener_state_object_field_end,
json_tokener_state_object_value,
json_tokener_state_object_value_add,
json_tokener_state_object_sep,
json_tokener_state_array_after_sep,
json_tokener_state_object_field_start_after_sep,
json_tokener_state_inf
};
struct json_tokener_srec
{
enum json_tokener_state state, saved_state;
struct json_object *obj;
struct json_object *current;
char *obj_field_name;
};
#define JSON_TOKENER_DEFAULT_DEPTH 32
struct json_tokener
{
char *str;
struct printbuf *pb;
int max_depth, depth, is_double, st_pos, char_offset;
enum json_tokener_error err;
unsigned int ucs_char;
char quote_char;
struct json_tokener_srec *stack;
int flags;
};
/**
* Be strict when parsing JSON input. Use caution with
* this flag as what is considered valid may become more
* restrictive from one release to the next, causing your
* code to fail on previously working input.
*
* This flag is not set by default.
*
* @see json_tokener_set_flags()
*/
#define JSON_TOKENER_STRICT 0x01
/**
* Given an error previously returned by json_tokener_get_error(),
* return a human readable description of the error.
*
* @return a generic error message is returned if an invalid error value is provided.
*/
const char *json_tokener_error_desc(enum json_tokener_error jerr);
/**
* Retrieve the error caused by the last call to json_tokener_parse_ex(),
* or json_tokener_success if there is no error.
*
* When parsing a JSON string in pieces, if the tokener is in the middle
* of parsing this will return json_tokener_continue.
*
* See also json_tokener_error_desc().
*/
enum json_tokener_error json_tokener_get_error(struct json_tokener *tok);
extern struct json_tokener* json_tokener_new(void);
extern struct json_tokener* json_tokener_new_ex(int depth);
extern void json_tokener_free(struct json_tokener *tok);
extern void json_tokener_reset(struct json_tokener *tok);
extern struct json_object* json_tokener_parse(const char *str);
extern struct json_object* json_tokener_parse_verbose(const char *str, enum json_tokener_error *error);
/**
* Set flags that control how parsing will be done.
*/
extern void json_tokener_set_flags(struct json_tokener *tok, int flags);
/**
* Parse a string and return a non-NULL json_object if a valid JSON value
* is found. The string does not need to be a JSON object or array;
* it can also be a string, number or boolean value.
*
* A partial JSON string can be parsed. If the parsing is incomplete,
* NULL will be returned and json_tokener_get_error() will be return
* json_tokener_continue.
* json_tokener_parse_ex() can then be called with additional bytes in str
* to continue the parsing.
*
* If json_tokener_parse_ex() returns NULL and the error anything other than
* json_tokener_continue, a fatal error has occurred and parsing must be
* halted. Then tok object must not be re-used until json_tokener_reset() is
* called.
*
* When a valid JSON value is parsed, a non-NULL json_object will be
* returned. Also, json_tokener_get_error() will return json_tokener_success.
* Be sure to check the type with json_object_is_type() or
* json_object_get_type() before using the object.
*
* @b XXX this shouldn't use internal fields:
* Trailing characters after the parsed value do not automatically cause an
* error. It is up to the caller to decide whether to treat this as an
* error or to handle the additional characters, perhaps by parsing another
* json value starting from that point.
*
* Extra characters can be detected by comparing the tok->char_offset against
* the length of the last len parameter passed in.
*
* The tokener does \b not maintain an internal buffer so the caller is
* responsible for calling json_tokener_parse_ex with an appropriate str
* parameter starting with the extra characters.
*
* Example:
* @code
json_object *jobj = NULL;
const char *mystring = NULL;
int stringlen = 0;
enum json_tokener_error jerr;
do {
mystring = ... // get JSON string, e.g. read from file, etc...
stringlen = strlen(mystring);
jobj = json_tokener_parse_ex(tok, mystring, stringlen);
} while ((jerr = json_tokener_get_error(tok)) == json_tokener_continue);
if (jerr != json_tokener_success)
{
fprintf(stderr, "Error: %s\n", json_tokener_error_desc(jerr));
// Handle errors, as appropriate for your application.
}
if (tok->char_offset < stringlen) // XXX shouldn't access internal fields
{
// Handle extra characters after parsed object as desired.
// e.g. issue an error, parse another object from that point, etc...
}
// Success, use jobj here.
@endcode
*
* @param tok a json_tokener previously allocated with json_tokener_new()
* @param str an string with any valid JSON expression, or portion of. This does not need to be null terminated.
* @param len the length of str
*/
extern struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
const char *str, int len);
#ifdef __cplusplus
}
#endif
#endif
|
/*
* $Id: json_tokener.h,v 1.10 2006/07/25 03:24:50 mclark Exp $
*
* Copyright (c) 2004, 2005 Metaparadigm Pte. Ltd.
* Michael Clark <michael@metaparadigm.com>
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the MIT license. See COPYING for details.
*
*/
#ifndef _json_tokener_h_
#define _json_tokener_h_
#include <stddef.h>
#include "json_object.h"
#ifdef __cplusplus
extern "C" {
#endif
enum json_tokener_error {
json_tokener_success,
json_tokener_continue,
json_tokener_error_depth,
json_tokener_error_parse_eof,
json_tokener_error_parse_unexpected,
json_tokener_error_parse_null,
json_tokener_error_parse_boolean,
json_tokener_error_parse_number,
json_tokener_error_parse_array,
json_tokener_error_parse_object_key_name,
json_tokener_error_parse_object_key_sep,
json_tokener_error_parse_object_value_sep,
json_tokener_error_parse_string,
json_tokener_error_parse_comment,
json_tokener_error_size
};
enum json_tokener_state {
json_tokener_state_eatws,
json_tokener_state_start,
json_tokener_state_finish,
json_tokener_state_null,
json_tokener_state_comment_start,
json_tokener_state_comment,
json_tokener_state_comment_eol,
json_tokener_state_comment_end,
json_tokener_state_string,
json_tokener_state_string_escape,
json_tokener_state_escape_unicode,
json_tokener_state_boolean,
json_tokener_state_number,
json_tokener_state_array,
json_tokener_state_array_add,
json_tokener_state_array_sep,
json_tokener_state_object_field_start,
json_tokener_state_object_field,
json_tokener_state_object_field_end,
json_tokener_state_object_value,
json_tokener_state_object_value_add,
json_tokener_state_object_sep,
json_tokener_state_array_after_sep,
json_tokener_state_object_field_start_after_sep,
json_tokener_state_inf
};
struct json_tokener_srec
{
enum json_tokener_state state, saved_state;
struct json_object *obj;
struct json_object *current;
char *obj_field_name;
};
#define JSON_TOKENER_DEFAULT_DEPTH 32
struct json_tokener
{
char *str;
struct printbuf *pb;
int max_depth, depth, is_double, st_pos, char_offset;
enum json_tokener_error err;
unsigned int ucs_char;
char quote_char;
struct json_tokener_srec *stack;
int flags;
};
/**
* Be strict when parsing JSON input. Use caution with
* this flag as what is considered valid may become more
* restrictive from one release to the next, causing your
* code to fail on previously working input.
*
* This flag is not set by default.
*
* @see json_tokener_set_flags()
*/
#define JSON_TOKENER_STRICT 0x01
/**
* Given an error previously returned by json_tokener_get_error(),
* return a human readable description of the error.
*
* @return a generic error message is returned if an invalid error value is provided.
*/
const char *json_tokener_error_desc(enum json_tokener_error jerr);
/**
* Retrieve the error caused by the last call to json_tokener_parse_ex(),
* or json_tokener_success if there is no error.
*
* When parsing a JSON string in pieces, if the tokener is in the middle
* of parsing this will return json_tokener_continue.
*
* See also json_tokener_error_desc().
*/
enum json_tokener_error json_tokener_get_error(struct json_tokener *tok);
extern struct json_tokener* json_tokener_new(void);
extern struct json_tokener* json_tokener_new_ex(int depth);
extern void json_tokener_free(struct json_tokener *tok);
extern void json_tokener_reset(struct json_tokener *tok);
extern struct json_object* json_tokener_parse(const char *str);
extern struct json_object* json_tokener_parse_verbose(const char *str, enum json_tokener_error *error);
/**
* Set flags that control how parsing will be done.
*/
extern void json_tokener_set_flags(struct json_tokener *tok, int flags);
/**
* Parse a string and return a non-NULL json_object if a valid JSON value
* is found. The string does not need to be a JSON object or array;
* it can also be a string, number or boolean value.
*
* A partial JSON string can be parsed. If the parsing is incomplete,
* NULL will be returned and json_tokener_get_error() will be return
* json_tokener_continue.
* json_tokener_parse_ex() can then be called with additional bytes in str
* to continue the parsing.
*
* If json_tokener_parse_ex() returns NULL and the error anything other than
* json_tokener_continue, a fatal error has occurred and parsing must be
* halted. Then tok object must not be re-used until json_tokener_reset() is
* called.
*
* When a valid JSON value is parsed, a non-NULL json_object will be
* returned. Also, json_tokener_get_error() will return json_tokener_success.
* Be sure to check the type with json_object_is_type() or
* json_object_get_type() before using the object.
*
* @b XXX this shouldn't use internal fields:
* Trailing characters after the parsed value do not automatically cause an
* error. It is up to the caller to decide whether to treat this as an
* error or to handle the additional characters, perhaps by parsing another
* json value starting from that point.
*
* Extra characters can be detected by comparing the tok->char_offset against
* the length of the last len parameter passed in.
*
* The tokener does \b not maintain an internal buffer so the caller is
* responsible for calling json_tokener_parse_ex with an appropriate str
* parameter starting with the extra characters.
*
* This interface is presently not 64-bit clean due to the int len argument
* so the function limits the maximum string size to INT32_MAX (2GB).
* If the function is called with len == -1 then strlen is called to check
* the string length is less than INT32_MAX (2GB)
*
* Example:
* @code
json_object *jobj = NULL;
const char *mystring = NULL;
int stringlen = 0;
enum json_tokener_error jerr;
do {
mystring = ... // get JSON string, e.g. read from file, etc...
stringlen = strlen(mystring);
jobj = json_tokener_parse_ex(tok, mystring, stringlen);
} while ((jerr = json_tokener_get_error(tok)) == json_tokener_continue);
if (jerr != json_tokener_success)
{
fprintf(stderr, "Error: %s\n", json_tokener_error_desc(jerr));
// Handle errors, as appropriate for your application.
}
if (tok->char_offset < stringlen) // XXX shouldn't access internal fields
{
// Handle extra characters after parsed object as desired.
// e.g. issue an error, parse another object from that point, etc...
}
// Success, use jobj here.
@endcode
*
* @param tok a json_tokener previously allocated with json_tokener_new()
* @param str an string with any valid JSON expression, or portion of. This does not need to be null terminated.
* @param len the length of str
*/
extern struct json_object* json_tokener_parse_ex(struct json_tokener *tok,
const char *str, int len);
#ifdef __cplusplus
}
#endif
#endif
|
5795_6
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
* GPAC - Multimedia Framework C SDK
*
* Authors: Jean Le Feuvre
* Copyright (c) Telecom ParisTech 2000-2012
* All rights reserved
*
* This file is part of GPAC / common tools sub-project
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* GPAC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#ifndef _GF_TOOLS_H_
#define _GF_TOOLS_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <gpac/setup.h>
#include <gpac/version.h>
/*! \file "gpac/tools.h"
* \brief Core definitions and tools of GPAC.
*
* This file contains basic functions and core definitions of the GPAC framework. This file is
* usually included by all GPAC header files since it contains the error definitions.
*/
/*! \defgroup utils_grp Core Tools
* \brief Core definitions and tools of GPAC.
*
* You will find in this module the documentation of the core tools used in GPAC.
*/
/*!
* \ingroup utils_grp
* \brief Base definitions and functions of GPAC.
*
* This section documents some very basic functions and core definitions of the GPAC framework.
* @{
*/
/*!
* \brief Stringizer
* \hideinitializer
*
* Macro transforming its input name into a string
*/
#define gf_stringizer(x) #x
/*!
* \brief Memory allocation for a structure
* \hideinitializer
*
* Macro allocating memory and zero-ing it
*/
#define GF_SAFEALLOC(__ptr, __struct) { __ptr = (__struct *) gf_malloc(sizeof(__struct)); if (__ptr) memset((void *) __ptr, 0, sizeof(__struct)); }
/*!
* \brief Memory allocation for an array of n structs
* \hideinitializer
*
* Macro allocating memory for n structures and zero-ing it
*/
#define GF_SAFE_ALLOC_N(__ptr, __n, __struct) { __ptr = (__struct *) gf_malloc( __n * sizeof(__struct)); if (__ptr) memset((void *) __ptr, 0, __n * sizeof(__struct)); }
/*!
* \brief 4CC Formatting
* \hideinitializer
*
* Macro formating a 4-character code (or 4CC) "abcd" as 0xAABBCCDD
*/
#ifndef GF_4CC
#define GF_4CC(a,b,c,d) (((a)<<24)|((b)<<16)|((c)<<8)|(d))
#endif
/*!
* \brief GPAC feature list
*
* returns the list of features enabled/disabled in this GPAC build.
*/
const char *gpac_features();
/*!
* \brief 4CC Printing
*
* returns a 4CC printable form
*/
const char *gf_4cc_to_str(u32 type);
/*!
* \brief asprintf() portable implementation
*
* similar to sprintf, except it allows the string on the
* \note asprintf implementation for windows
*/
int gf_asprintf(char **buffer, const char *fmt, ...);
size_t gf_fread(void *ptr, size_t size, size_t nmemb, FILE *stream);
/*!
* \brief file writing helper
*
* Wrapper to properly handle calls to fwrite()
* Ensures proper error handling is invoked when it fails.
* \return Same as gf_fwrite
*
*/
size_t gf_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream);
/*!
* \brief large file opening
*
* Opens a large file (>4GB)
* \param file_name Same semantics as fopen
* \param mode Same semantics as fopen
* \return stream handle of the file object
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
FILE *gf_fopen(const char *file_name, const char *mode);
/*!
* \brief file closing
*
* Closes a file
* \param file file to close
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
s32 gf_fclose(FILE *file);
/*!
* \brief large file position query
*
* Queries the current read/write position in a large file
* \param f Same semantics as ftell
* \return position in the file
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
u64 gf_ftell(FILE *f);
/*!
* \brief large file seeking
*
* Seeks the current read/write position in a large file
* \param f Same semantics as fseek
* \param pos Same semantics as fseek
* \param whence Same semantics as fseek
* \return new position in the file
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
u64 gf_fseek(FILE *f, s64 pos, s32 whence);
/*!
* \brief get basename from filename/path
*
* Returns a pointer to the start of a filepath basename or null
* \param filename Path of the file, can be an absolute path
*/
char* gf_file_basename(const char* filename);
/*!
* \brief get extension from filename
*
* Returns a pointer to the start of a filepath extension or null
* \param filename Path of the file, can be an absolute path
*/
char* gf_file_ext_start(const char* filename);
/*! @} */
/*! \addtogroup errors_grp Error codes
* \ingroup utils_grp
* \brief Errors used in GPAC.
*
* This section documents all error codes used in the GPAC framework. Most of the GPAC's functions will use these as
* return values, and some of these errors are also used for state communication with the different modules of the framework.
* @{
*/
/*!
* GPAC Error
* \hideinitializer
*
* positive values are warning and info, 0 means no error and negative values are errors
*/
typedef enum
{
/*!Message from any scripting engine used in the presentation (ECMAScript, MPEG-J, ...) (Info).*/
GF_SCRIPT_INFO = 3,
/*!Indicates an data frame has several AU packed (not MPEG-4 compliant). This is used by decoders to force
multiple decoding of the same data frame (Info).*/
GF_PACKED_FRAMES = 2,
/*!Indicates the end of a stream or of a file (Info).*/
GF_EOS = 1,
/*!
\n\n
*/
/*!Operation success (no error).*/
GF_OK = 0,
/*!\n*/
/*!One of the input parameter is not correct or cannot be used in the current operating mode of the framework.*/
GF_BAD_PARAM = -1,
/*! Memory allocation failure.*/
GF_OUT_OF_MEM = -2,
/*! Input/Output failure (disk access, system call failures)*/
GF_IO_ERR = -3,
/*! The desired feature or operation is not supported by the framework*/
GF_NOT_SUPPORTED = -4,
/*! Input data has been corrupted*/
GF_CORRUPTED_DATA = -5,
/*! A modification was attempted on a scene node which could not be found*/
GF_SG_UNKNOWN_NODE = -6,
/*! The PROTO node interface does not match the nodes using it*/
GF_SG_INVALID_PROTO = -7,
/*! An error occured in the scripting engine*/
GF_SCRIPT_ERROR = -8,
/*! Buffer is too small to contain decoded data. Decoders shall use this error whenever they need to resize their output memory buffers*/
GF_BUFFER_TOO_SMALL = -9,
/*! Bitstream is not compliant to the specfication it refers to*/
GF_NON_COMPLIANT_BITSTREAM = -10,
/*! No decoders could be found to handle the desired media type*/
GF_CODEC_NOT_FOUND = -11,
/*! The URL is not properly formatted or cannot be found*/
GF_URL_ERROR = -12,
/*! An service error has occured at the local side*/
GF_SERVICE_ERROR = -13,
/*! A service error has occured at the remote (server) side*/
GF_REMOTE_SERVICE_ERROR = -14,
/*! The desired stream could not be found in the service*/
GF_STREAM_NOT_FOUND = -15,
/*! The IsoMedia file is not a valid one*/
GF_ISOM_INVALID_FILE = -20,
/*! The IsoMedia file is not complete. Either the file is being downloaded, or it has been truncated*/
GF_ISOM_INCOMPLETE_FILE = -21,
/*! The media in this IsoMedia track is not valid (usually due to a broken stream description)*/
GF_ISOM_INVALID_MEDIA = -22,
/*! The requested operation cannot happen in the current opening mode of the IsoMedia file*/
GF_ISOM_INVALID_MODE = -23,
/*! This IsoMedia track refers to media outside the file in an unknown way*/
GF_ISOM_UNKNOWN_DATA_REF = -24,
/*! An invalid MPEG-4 Object Descriptor was found*/
GF_ODF_INVALID_DESCRIPTOR = -30,
/*! An MPEG-4 Object Descriptor was found or added to a forbidden descriptor*/
GF_ODF_FORBIDDEN_DESCRIPTOR = -31,
/*! An invalid MPEG-4 BIFS command was detected*/
GF_ODF_INVALID_COMMAND = -32,
/*! The scene has been encoded using an unknown BIFS version*/
GF_BIFS_UNKNOWN_VERSION = -33,
/*! The remote IP address could not be solved*/
GF_IP_ADDRESS_NOT_FOUND = -40,
/*! The connection to the remote peer has failed*/
GF_IP_CONNECTION_FAILURE = -41,
/*! The network operation has failed*/
GF_IP_NETWORK_FAILURE = -42,
/*! The network connection has been closed*/
GF_IP_CONNECTION_CLOSED = -43,
/*! The network operation has failed because no data is available*/
GF_IP_NETWORK_EMPTY = -44,
/*! The network operation has been discarded because it would be a blocking one*/
GF_IP_SOCK_WOULD_BLOCK = -45,
/*! UDP connection did not receive any data at all. Signaled by client services to reconfigure network if possible*/
GF_IP_UDP_TIMEOUT = -46,
/*! Authentication with the remote host has failed*/
GF_AUTHENTICATION_FAILURE = -50,
/*! Script not ready for playback */
GF_SCRIPT_NOT_READY = -51,
/*! Bad configuration for the current contex */
GF_INVALID_CONFIGURATION = -52,
/*! The element has not been found */
GF_NOT_FOUND = -53,
/*! Unexpected format of data */
GF_PROFILE_NOT_SUPPORTED = -54,
/*! the decoder buffers were filled, it is necessary to recuperate decoded data*/
GF_CODEC_BUFFER_UNAVAILABLE = -55,
} GF_Err;
/*!
* \brief Error Printing
*
* Returns a printable version of a given error
* \param e Error code requested
* \return String representing the error
*/
const char *gf_error_to_string(GF_Err e);
/*! @} */
/*! \addtogroup log_grp Logging tools
* \ingroup utils_grp
* \brief Logging system of GPAC
* @{
*/
/*!
* GPAC Log Levels
* \hideinitializer
*
* These levels describes messages priority used when filtering logs
*/
typedef enum
{
/*! Disable all Log message*/
GF_LOG_QUIET = 0,
/*! Log message describes an error*/
GF_LOG_ERROR,
/*! Log message describes a warning*/
GF_LOG_WARNING,
/*! Log message is informational (state, etc..)*/
GF_LOG_INFO,
/*! Log message is a debug info*/
GF_LOG_DEBUG
} GF_LOG_Level;
/*!
* \brief Log exits at first error assignment
*
* When GF_LOG_ERROR happens, program leaves with instruction exit(1);
* \param strict strict behaviour when encoutering a serious error.
*
*/
void gf_log_set_strict_error(Bool strict);
/*!
* \brief gets string-formated log tools
*
* Returns the string-formatted log tools and levels. Returned string shall be freed by the caller.
* \return string-formatted log tools.
*
*/
char *gf_log_get_tools_levels();
/*!
* GPAC Log tools
* \hideinitializer
*
* These flags describes which sub-part of GPAC generates the log and are used when filtering logs
*/
typedef enum
{
/*! Log message from the core library (init, threads, network calls, etc)*/
GF_LOG_CORE = 0,
/*! Log message from a raw media parser (BIFS, LASeR, A/V formats)*/
GF_LOG_CODING,
/*! Log message from a bitstream parser (IsoMedia, MPEG-2 TS, OGG, ...)*/
GF_LOG_CONTAINER,
/*! Log message from the network/service stack (messages & co)*/
GF_LOG_NETWORK,
/*! Log message from the RTP/RTCP stack (TS info) and packet structure & hinting (debug)*/
GF_LOG_RTP,
/*! Log message from authoring subsystem (file manip, import/export)*/
GF_LOG_AUTHOR,
/*! Log message from the sync layer of the terminal*/
GF_LOG_SYNC,
/*! Log message from a codec*/
GF_LOG_CODEC,
/*! Log message from any XML parser (context loading, etc)*/
GF_LOG_PARSER,
/*! Log message from the terminal/compositor, indicating media object state*/
GF_LOG_MEDIA,
/*! Log message from the scene graph/scene manager (handling of nodes and attribute modif, DOM core)*/
GF_LOG_SCENE,
/*! Log message from the scripting engine APIs - does not cover alert() in the script code itself*/
GF_LOG_SCRIPT,
/*! Log message from event handling*/
GF_LOG_INTERACT,
/*! Log message from compositor*/
GF_LOG_COMPOSE,
/*! Log for video object cache */
GF_LOG_CACHE,
/*! Log message from multimedia I/O devices (audio/video input/output, ...)*/
GF_LOG_MMIO,
/*! Log for runtime info (times, memory, CPU usage)*/
GF_LOG_RTI,
/*! Log for SMIL timing and animation*/
GF_LOG_SMIL,
/*! Log for memory tracker*/
GF_LOG_MEMORY,
/*! Log for audio compositor*/
GF_LOG_AUDIO,
/*! Generic Log for modules*/
GF_LOG_MODULE,
/*! Log for threads and mutexes */
GF_LOG_MUTEX,
/*! Log for threads and condition */
GF_LOG_CONDITION,
/*! Log for all HTTP streaming */
GF_LOG_DASH,
/*! Log for all messages coming from GF_Terminal or script alert()*/
GF_LOG_CONSOLE,
/*! Log for all messages coming the application, not used by libgpac or the modules*/
GF_LOG_APP,
/*! Log for all messages coming from the scheduler */
GF_LOG_SCHEDULER,
/*! special value used to set a level for all tools*/
GF_LOG_ALL,
GF_LOG_TOOL_MAX = GF_LOG_ALL,
} GF_LOG_Tool;
/*!
* \brief Log modules assignment
*
* Sets the tools to be checked for log filtering. By default no logging is performed.
* \param tool tool to be logged.
* \param level level of logging for this tool.
*
*/
void gf_log_set_tool_level(GF_LOG_Tool tool, GF_LOG_Level level);
/*!
* \brief Log Message Callback
*
* The gf_log_cbk type is the type for the callback of the \ref gf_log_set_callback function. By default all logs are redirected to stderr
* \param cbck Opaque user data.
* \param log_level level of the log. This value is not guaranteed in multi-threaded context.
* \param log_tool tool emitting the log. This value is not guaranteed in multi-threaded context.
* \param fmt message log format.
* \param vlist message log param.
*
*/
typedef void (*gf_log_cbk)(void *cbck, GF_LOG_Level log_level, GF_LOG_Tool log_tool, const char* fmt, va_list vlist);
/*!
* \brief Log overwrite
*
* Assigns a user-defined callback for printing log messages. By default all logs are redirected to stderr
* \param usr_cbk Opaque user data
* \param cbk Callback log function
* \return previous callback function
*/
gf_log_cbk gf_log_set_callback(void *usr_cbk, gf_log_cbk cbk);
/*!
\cond DUMMY_DOXY_SECTION
*/
#ifndef GPAC_DISABLE_LOG
/*note:
to turn log on, change to GPAC_ENABLE_LOG
to turn log off, change to GPAC_DISABLE_LOG
this is needed by configure+sed to modify this file directly
*/
#define GPAC_ENABLE_LOG
#endif
/*!
\endcond
*/
/*this is all a bit ugly, but most compilers don't properly handle variadic macros...*/
void gf_log(const char *fmt, ...);
void gf_log_lt(GF_LOG_Level ll, GF_LOG_Tool lt);
void gf_log_va_list(GF_LOG_Level level, GF_LOG_Tool tool, const char *fmt, va_list vl);
/*!
* \brief Log level checking
*
* Checks if a given tool is logged for the given level
* \param log_tool tool to check
* \param log_level level to check
* \return 1 if logged, 0 otherwise
*/
Bool gf_log_tool_level_on(GF_LOG_Tool log_tool, GF_LOG_Level log_level);
/*!
* \brief Set log tools and levels
*
* Set log tools and levels according to the log_tools_levels string. All previous log settings are discarded.
* \param log_tools_levels string specifying the tools and levels. It is formatted as logToolX\@logLevelX:logToolZ\@logLevelZ:...
* \return GF_OK or GF_BAD_PARAM
*/
GF_Err gf_log_set_tools_levels(const char *log_tools_levels);
/*!
* \brief Modify log tools and levels
*
* Modify log tools and levels according to the log_tools_levels string. Previous log settings are kept.
* \param val string specifying the tools and levels. It is formatted as logToolX\@logLevelX:logToolZ\@logLevelZ:...
* \return GF_OK or GF_BAD_PARAM
*/
GF_Err gf_log_modify_tools_levels(const char *val);
/*!
* \brief Set log level for a given tool
*
* Set log level for a given tool.
* \param tool tool to log
* \param level log level for this tool
*/
void gf_log_set_tool_level(GF_LOG_Tool tool, GF_LOG_Level level);
#ifdef GPAC_DISABLE_LOG
#define GF_LOG(_ll, _lm, __args)
#else
/*!
* \brief Message logging
* \hideinitializer
*
* Macro for logging messages. Usage is GF_LOG(log_lev, log_module, (fmt, ...)). The log function is only called if log filtering allows it. This avoids fetching logged parameters when the tool is not being logged.
*/
#define GF_LOG(_log_level, _log_tools, __args) if (gf_log_tool_level_on(_log_tools, _log_level) ) { gf_log_lt(_log_level, _log_tools); gf_log __args ;}
#endif
/*!
* \brief PseudoRandom Integer Generation Initialization
*
* Sets the starting point for generating a series of pseudorandom integers.
* \param Reset Re-initializes the random number generator
*/
void gf_rand_init(Bool Reset);
/*!
* \brief PseudoRandom Integer Generation
*
* Returns a pseudorandom integer.
*/
u32 gf_rand();
/*!
* \brief user name
*
* Gets current user (login) name.
*/
void gf_get_user_name(char *buf, u32 buf_size);
/*!\brief FileEnum info object
*
*The FileEnumInfo object is used to get file attributes upon enumeration of a directory.
*/
typedef struct
{
/*!File is marked as hidden*/
Bool hidden;
/*!File is a directory*/
Bool directory;
/*!File is a drive mountpoint*/
Bool drive;
/*!File is a system file*/
Bool system;
/*!File size in bytes*/
u64 size;
/*!File last modif time in UTC seconds*/
u64 last_modified;
} GF_FileEnumInfo;
/*!
* \brief Directory Enumeration Callback
*
* The gf_enum_dir_item type is the type for the callback of the \ref gf_enum_directory function
* \param cbck Opaque user data.
* \param item_name File or directory name.
* \param item_path File or directory full path and name from filesystem root.
* \param file_info information for the file or directory.
* \return 1 to abort enumeration, 0 to continue enumeration.
*
*/
typedef Bool (*gf_enum_dir_item)(void *cbck, char *item_name, char *item_path, GF_FileEnumInfo *file_info);
/*!
* \brief Directory enumeration
*
* Enumerates a directory content. Feedback is provided by the enum_dir_item function
* \param dir Directory to enumerate
* \param enum_directory If set, only directories will be enumerated, otherwise only files are.
* \param enum_dir \ref gf_enum_dir_item callback function for enumeration.
* \param cbck Opaque user data passed to callback function.
* \param filter optional filter for file extensions. If a file extension without the dot '.' character is not found in the
* filter the file will be skipped.
*/
GF_Err gf_enum_directory(const char *dir, Bool enum_directory, gf_enum_dir_item enum_dir, void *cbck, const char *filter);
/*!
* \brief File Deletion
*
* Deletes a file from the disk.
* \param fileName absolute name of the file or name relative to the current working directory.
*/
GF_Err gf_delete_file(const char *fileName);
/*!
* \brief File Move
*
* Moves or renames a file or directory.
* \param fileName absolute path of the file / directory to move or rename
* \param newFileName absolute new path/name of the file / directory
*/
GF_Err gf_move_file(const char *fileName, const char *newFileName);
/*!
* \brief Temporary File Creation
*
* Creates a new temporary file in binary mode
* \param fileName if not NULL, strdup() of the temporary filename when created by GPAC (NULL otherwise as the system automatically removes its own tmp files)
* \return stream handle to the new file ressoucre
*/
FILE *gf_temp_file_new(char ** const fileName);
/*!
* \brief File Modification Time
*
* Returns the modification time of the given file. The exact meaning of this value is system dependent
* \param filename file to check
* \return modification time of the file
*/
u64 gf_file_modification_time(const char *filename);
/*!
* \brief File existence check
*
* Moves or renames a file or directory.
* \param fileName absolute path of the file / directory to move or rename
* \return GF_TRUE if file exists
*/
Bool gf_file_exists(const char *fileName);
/*!
* \brief Progress formatting
*
* Signals progress in GPAC's operations. Note that progress signaling with this function is not thread-safe, the main purpose is to use it for authoring tools only.
* \param title title string of the progress, or NULL for no progress
* \param done Current amount performed of the action.
* \param total Total amount of the action.
*/
void gf_set_progress(const char *title, u64 done, u64 total);
/*!
* \brief Progress Callback
*
* The gf_on_progress_cbk type is the type for the callback of the \ref gf_set_progress_callback function
* \param cbck Opaque user data.
* \param title preogress title.
* \param done Current amount performed of the action
* \param total Total amount of the action.
*
*/
typedef void (*gf_on_progress_cbk)(const void *cbck, const char *title, u64 done, u64 total);
/*!
* \brief Progress overwriting
*
* Iverwrites the progress signaling function by a user-defined one.
* \param user_cbk Opaque user data
* \param prog_cbk new callback function to use. Passing NULL restore default GPAC stderr notification.
*/
void gf_set_progress_callback(void *user_cbk, gf_on_progress_cbk prog_cbk);
/*!
* \brief Prompt checking
*
* Checks if a character is pending in the prompt buffer.
* \return 1 if a character is ready to be fetched, 0 otherwise.
* \note Function not available under WindowsCE nor SymbianOS
*/
Bool gf_prompt_has_input();
/*!
* \brief Prompt character flush
*
* Returns the current character entered at prompt if any.
* \return value of the character.
* \note Function not available under WindowsCE nor SymbianOS
*/
char gf_prompt_get_char();
/*!
* \brief turns prompt echo on/off
*
* Turns the prompt character echo on/off - this is useful when entering passwords.
* \param echo_off indicates whether echo should be turned on or off.
* \note Function not available under WindowsCE nor SymbianOS
*/
void gf_prompt_set_echo_off(Bool echo_off);
/*! @} */
/*!
*\addtogroup cpu_grp System time CPU and Memory tools
*\ingroup utils_grp
*\brief System time CPU and Memory functions
*
*This section documents time functionalities and CPU management in GPAC.
* @{
*/
/*!
* Selection flags for memory tracker
* \hideinitializer
*/
typedef enum
{
/*! No memory tracking*/
GF_MemTrackerNone = 0,
/*! Memory tracking without backtrace*/
GF_MemTrackerSimple,
/*! Memory tracking with backtrace*/
GF_MemTrackerBackTrace,
} GF_MemTrackerType;
/*!
* \brief System setup
*
* Inits the system high-resolution clock if any, and CPU usage manager. It is strongly recommended to call this
* function before calling any other GPAC functions, since on some systems (like winCE) it may result in a better memory usage estimation.
* \note This can be called several times but only the first call will result in system setup.
*/
void gf_sys_init(GF_MemTrackerType mem_tracker_type);
/*!
* \brief System closing
*
* Closes the system high-resolution clock and any CPU associated ressources.
* \note This can be called several times but the system will be closed when no more users are counted.
*/
void gf_sys_close();
/*!
* \brief System arguments
*
* Sets the user app arguments (used by GUI mode)
* \param argc Number of arguments
* \param argv Array of arguments
*/
void gf_sys_set_args(s32 argc, const char **argv);
/*!
* \brief Get number of args
*
* Gets the number of argument of the user application if any
* \return number of argument of the user application
*/
u32 gf_sys_get_argc();
/*!
* \brief Get number of args
*
* Gets the number of argument of the user application if any
* \param arg Index of argument to retrieve
* \return number of argument of the user application
*/
const char *gf_sys_get_arg(u32 arg);
/*!
* \brief System clock query
*
* Gets the system clock time.
* \return System clock value since GPAC initialization in milliseconds.
*/
u32 gf_sys_clock();
/*!
* \brief High precision system clock query
*
* Gets the hight precision system clock time.
* \return System clock value since GPAC initialization in microseconds.
*/
u64 gf_sys_clock_high_res();
/*!
* \brief Sleeps thread/process
*
* Locks calling thread/process execution for a given time.
* \param ms Amount of time to sleep in milliseconds.
*/
void gf_sleep(u32 ms);
#ifdef WIN32
/*!
* \brief WINCE time constant
* \hideinitializer
*
* time between jan 1, 1601 and jan 1, 1970 in units of 100 nanoseconds
*/
#define TIMESPEC_TO_FILETIME_OFFSET (((LONGLONG)27111902 << 32) + (LONGLONG)3577643008)
#endif
/*!
*\brief gets UTC time in milliseconds
*
* Gets UTC clock in milliseconds
* \return UTC time in milliseconds
*/
u64 gf_net_get_utc();
/*!
*\brief parses date and returns UTC value for this date. Date format is an XSD dateTime format or any of the supported formats from HTTP 1.1:
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() formatgets UTC time in milliseconds
*
* \param date string containing the date to parse
* \return UTC time in milliseconds
*/
u64 gf_net_parse_date(const char *date);
/*!
*\brief gets timezone adjustment in seconds
*
* Gets timezone adjustment in seconds, with localtime - timezone = UTC time
* \return timezone shift in seconds
*/
s32 gf_net_get_timezone();
/*!\brief run-time system info object
*
*The Run-Time Info object is used to get CPU and memory occupation of the calling process.
*All time values are expressed in milliseconds (accuracy is not guaranteed).
*/
typedef struct
{
/*!start of the sampling period*/
u32 sampling_instant;
/*!duration of the sampling period*/
u32 sampling_period_duration;
/*!total amount of time (User+kernel) spent in CPU for all processes as evaluated at the end of the sampling period*/
u32 total_cpu_time;
/*!total amount of time (User+kernel) spent in CPU for the calling process as evaluated at the end of the sampling period*/
u32 process_cpu_time;
/*!amount of time (User+kernel) spent in CPU for all processes during the sampling period*/
u32 total_cpu_time_diff;
/*!total amount of time (User+kernel) spent in CPU for the calling process during the sampling period*/
u32 process_cpu_time_diff;
/*!total amount of idle time during the sampling period.*/
u32 cpu_idle_time;
/*!percentage (from 0 to 100) of CPU usage during the sampling period.*/
u32 total_cpu_usage;
/*!percentage (from 0 to 100) of the CPU usage by the calling process during the sampling period.*/
u32 process_cpu_usage;
/*!calling process ID*/
u32 pid;
/*!calling process thread count if known*/
u32 thread_count;
/*!size of calling process allocated heaps*/
u64 process_memory;
/*!total physical memory in system*/
u64 physical_memory;
/*!available physical memory in system*/
u64 physical_memory_avail;
/*!total memory currently allocated by gpac*/
u64 gpac_memory;
/*!total number of cores on the system*/
u32 nb_cores;
} GF_SystemRTInfo;
/*!
* Selection flags for run-time info retrieval
* \hideinitializer
*/
enum
{
/*!Indicates all processes' times must be fetched. If not set, only the current process times will be retrieved, and the
thread count and total times won't be available*/
GF_RTI_ALL_PROCESSES_TIMES = 1,
/*!Indicates the process allocated heap size must be fetch. If not set, only the system physical memory is fetched.
Fetching the entire ocess allocated memory can have a large impact on performances*/
GF_RTI_PROCESS_MEMORY = 1<<1,
/*!Indicates that only system memory should be fetched. When set, all refreshing info is ignored*/
GF_RTI_SYSTEM_MEMORY_ONLY = 1<<2
};
/*!
* \brief Gets Run-Time info
*
* Gets CPU and memory usage info for the calling process and the system. Information gathering
* is controled through timeout values.
* \param refresh_time_ms refresh time period in milliseconds. If the last sampling was done less than this period ago, the run-time info is not refreshed.
* \param rti holder to the run-time info structure to update.
* \param flags specify which info is to be retrieved.
* \return 1 if info has been updated, 0 otherwise.
* \note You should not try to use a too small refresh time. Typical values are 500 ms or one second.
*/
Bool gf_sys_get_rti(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags);
Bool gf_sys_get_battery_state(Bool *onBattery, u32 *onCharge, u32 *level, u32 *batteryLifeTime, u32 *batteryFullLifeTime);
typedef struct _GF_GlobalLock_opaque GF_GlobalLock;
/*!
* This function allows the user to create a global lock for all GPAC instances.
* This allow to disable some features for other instances for instance.
* \param resourceName The name of the resource to lock
* \return false if resource has been locked, true if resource could not be locked
*/
GF_GlobalLock * gf_global_resource_lock(const char * resourceName);
/*!
* Unlock a previouly locked resource
* \param lock The resource to unlock
* \return GF_OK if evertything went fine
*/
GF_Err gf_global_resource_unlock(GF_GlobalLock * lock);
/*! @} */
/*!
*\addtogroup osfile_grp File System
*\ingroup utils_grp
*\brief File System tools
*
*This section documents time functionalities and CPU management in GPAC.
* @{
*/
/*!
*\brief parses 128 bit from string
*
* Parses 128 bit from string
*
* \param string the string containing the value in hexa. Non alphanum characters are skipped
* \param value the value parsed
* \return error code if any
*/
GF_Err gf_bin128_parse(const char *string, bin128 value);
/*!
* \brief Delete Directory
*
* Delete a dir within the full path.
* \param DirPathName the file path name.
*/
GF_Err gf_rmdir(const char *DirPathName);
/*!
* \brief Create Directory
*
* Create a directory within the full path.
* \param DirPathName the dir path name.
*/
GF_Err gf_mkdir(const char* DirPathName);
/*!
* \brief Check Directory Exists
*
* Create a directory within the full path.
* \param DirPathName the dir path name.
*/
Bool gf_dir_exists(const char *DirPathName);
/*!
* \brief Create Directory
*
* Cleanup a directory within the full path, removing all the files and the directories.
* \param DirPathName the dir path name.
*/
GF_Err gf_cleanup_dir(const char* DirPathName);
/**
* Gets a newly allocated string containing the default cache directory.
* It is the responsibility of the caller to free the string.
* \return a fully qualified path to the default cache directory
*/
char * gf_get_default_cache_directory();
/**
* Gets the number of open file handles (gf_fopen/gf_fclose only).
* \return number of open file handles
*/
u32 gf_file_handles_count();
/*! @} */
/*!
*\addtogroup hash_grp RawData Misc
*\ingroup utils_grp
*\brief Data integrity and parsing
*
*This section documents misc data functions such as integrity and parsing such as SHA-1 hashing CRC checksum, 128 bit ID parsing...
* @{
*/
/*!
* \brief CRC32 compute
*
* Computes the CRC32 value of a buffer.
* \param data buffer
* \param size buffer size
* \return computed CRC32
*/
u32 gf_crc_32(const char *data, u32 size);
/**
* Compresses a data buffer in place using zlib. Buffer may be reallocated in the process.
* \param data pointer to the data buffer to be compressed
* \param data_len length of the data buffer to be compressed
* \param out_size pointer for output buffer size
* \return GF_OK if evertything went fine
*/
GF_Err gf_gz_compress_payload(char **data, u32 data_len, u32 *out_size);
/**
* Decompresses a data buffer using zlib.
* \param data data buffer to be decompressed
* \param data_len length of the data buffer to be decompressed
* \param uncompressed_data pointer to the uncompressed data buffer. It is the responsibility of the caller to free this buffer.
* \param out_size size of the uncompressed buffer
* \return GF_OK if evertything went fine
*/
GF_Err gf_gz_decompress_payload(char *data, u32 data_len, char **uncompressed_data, u32 *out_size);
/*SHA1*/
typedef struct __sha1_context GF_SHA1Context;
#define GF_SHA1_DIGEST_SIZE 20
#define GF_SHA1_DIGEST_SIZE_HEXA 41
/* Create SHA-1 context */
GF_SHA1Context *gf_sha1_starts();
/* Adds byte to the SHA-1 context */
void gf_sha1_update(GF_SHA1Context *ctx, u8 *input, u32 length);
/* Generates SHA-1 of all bytes ingested */
void gf_sha1_finish(GF_SHA1Context *ctx, u8 digest[GF_SHA1_DIGEST_SIZE] );
/*
* Output SHA-1(file contents), returns 0 if successful.
*/
int gf_sha1_file(const char *filename, u8 digest[GF_SHA1_DIGEST_SIZE]);
/*
* Gets SHA-1 of input buffer
*/
void gf_sha1_csum(u8 *buf, u32 buflen, u8 digest[GF_SHA1_DIGEST_SIZE]);
/*
* Gets SHA-1 of input buffer into hexa form
*/
void gf_sha1_csum_hexa(u8 *buf, u32 buflen, u8 digest[GF_SHA1_DIGEST_SIZE_HEXA]);
/*! @} */
/* \cond dummy */
#ifdef GPAC_ANDROID
typedef void (*fm_callback_func)(void *cbk_obj, u32 type, u32 param, int *value);
extern void gf_fm_request_set_callback(void *cbk_obj, fm_callback_func cbk_func);
void gf_fm_request_call(u32 type, u32 param, int *value);
#endif //GPAC_ANDROID
/* \endcond */
#ifdef __cplusplus
}
#endif
#endif /*_GF_CORE_H_*/
|
/*
* GPAC - Multimedia Framework C SDK
*
* Authors: Jean Le Feuvre
* Copyright (c) Telecom ParisTech 2000-2012
* All rights reserved
*
* This file is part of GPAC / common tools sub-project
*
* GPAC is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* GPAC is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#ifndef _GF_TOOLS_H_
#define _GF_TOOLS_H_
#ifdef __cplusplus
extern "C" {
#endif
#include <gpac/setup.h>
#include <gpac/version.h>
/*! \file "gpac/tools.h"
* \brief Core definitions and tools of GPAC.
*
* This file contains basic functions and core definitions of the GPAC framework. This file is
* usually included by all GPAC header files since it contains the error definitions.
*/
/*! \defgroup utils_grp Core Tools
* \brief Core definitions and tools of GPAC.
*
* You will find in this module the documentation of the core tools used in GPAC.
*/
/*!
* \ingroup utils_grp
* \brief Base definitions and functions of GPAC.
*
* This section documents some very basic functions and core definitions of the GPAC framework.
* @{
*/
/*!
* \brief Stringizer
* \hideinitializer
*
* Macro transforming its input name into a string
*/
#define gf_stringizer(x) #x
/*!
* \brief Memory allocation for a structure
* \hideinitializer
*
* Macro allocating memory and zero-ing it
*/
#define GF_SAFEALLOC(__ptr, __struct) { __ptr = (__struct *) gf_malloc(sizeof(__struct)); if (__ptr) memset((void *) __ptr, 0, sizeof(__struct)); }
/*!
* \brief Memory allocation for an array of n structs
* \hideinitializer
*
* Macro allocating memory for n structures and zero-ing it
*/
#define GF_SAFE_ALLOC_N(__ptr, __n, __struct) { __ptr = (__struct *) gf_malloc( __n * sizeof(__struct)); if (__ptr) memset((void *) __ptr, 0, __n * sizeof(__struct)); }
/*!
* \brief 4CC Formatting
* \hideinitializer
*
* Macro formating a 4-character code (or 4CC) "abcd" as 0xAABBCCDD
*/
#ifndef GF_4CC
#define GF_4CC(a,b,c,d) (((a)<<24)|((b)<<16)|((c)<<8)|(d))
#endif
/*!
* \brief GPAC feature list
*
* returns the list of features enabled/disabled in this GPAC build.
*/
const char *gpac_features();
/*!
* \brief 4CC Printing
*
* returns a 4CC printable form
*/
const char *gf_4cc_to_str(u32 type);
/*!
* \brief asprintf() portable implementation
*
* similar to sprintf, except it allows the string on the
* \note asprintf implementation for windows
*/
int gf_asprintf(char **buffer, const char *fmt, ...);
size_t gf_fread(void *ptr, size_t size, size_t nmemb, FILE *stream);
/*!
* \brief file writing helper
*
* Wrapper to properly handle calls to fwrite()
* Ensures proper error handling is invoked when it fails.
* \return Same as gf_fwrite
*
*/
size_t gf_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream);
/*!
* \brief large file opening
*
* Opens a large file (>4GB)
* \param file_name Same semantics as fopen
* \param mode Same semantics as fopen
* \return stream handle of the file object
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
FILE *gf_fopen(const char *file_name, const char *mode);
/*!
* \brief file closing
*
* Closes a file
* \param file file to close
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
s32 gf_fclose(FILE *file);
/*!
* \brief large file position query
*
* Queries the current read/write position in a large file
* \param f Same semantics as ftell
* \return position in the file
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
u64 gf_ftell(FILE *f);
/*!
* \brief large file seeking
*
* Seeks the current read/write position in a large file
* \param f Same semantics as fseek
* \param pos Same semantics as fseek
* \param whence Same semantics as fseek
* \return new position in the file
* \note You only need to call this function if you're suspecting the file to be a large one (usually only media files), otherwise use regular stdio.
*/
u64 gf_fseek(FILE *f, s64 pos, s32 whence);
/*!
* \brief get basename from filename/path
*
* Returns a pointer to the start of a filepath basename or null
* \param filename Path of the file, can be an absolute path
*/
char* gf_file_basename(const char* filename);
/*!
* \brief get extension from filename
*
* Returns a pointer to the start of a filepath extension or null
* \param filename Path of the file, can be an absolute path
*/
char* gf_file_ext_start(const char* filename);
/*! @} */
/*! \addtogroup errors_grp Error codes
* \ingroup utils_grp
* \brief Errors used in GPAC.
*
* This section documents all error codes used in the GPAC framework. Most of the GPAC's functions will use these as
* return values, and some of these errors are also used for state communication with the different modules of the framework.
* @{
*/
/*!
* GPAC Error
* \hideinitializer
*
* positive values are warning and info, 0 means no error and negative values are errors
*/
typedef enum
{
/*!Message from any scripting engine used in the presentation (ECMAScript, MPEG-J, ...) (Info).*/
GF_SCRIPT_INFO = 3,
/*!Indicates an data frame has several AU packed (not MPEG-4 compliant). This is used by decoders to force
multiple decoding of the same data frame (Info).*/
GF_PACKED_FRAMES = 2,
/*!Indicates the end of a stream or of a file (Info).*/
GF_EOS = 1,
/*!
\n\n
*/
/*!Operation success (no error).*/
GF_OK = 0,
/*!\n*/
/*!One of the input parameter is not correct or cannot be used in the current operating mode of the framework.*/
GF_BAD_PARAM = -1,
/*! Memory allocation failure.*/
GF_OUT_OF_MEM = -2,
/*! Input/Output failure (disk access, system call failures)*/
GF_IO_ERR = -3,
/*! The desired feature or operation is not supported by the framework*/
GF_NOT_SUPPORTED = -4,
/*! Input data has been corrupted*/
GF_CORRUPTED_DATA = -5,
/*! A modification was attempted on a scene node which could not be found*/
GF_SG_UNKNOWN_NODE = -6,
/*! The PROTO node interface does not match the nodes using it*/
GF_SG_INVALID_PROTO = -7,
/*! An error occured in the scripting engine*/
GF_SCRIPT_ERROR = -8,
/*! Buffer is too small to contain decoded data. Decoders shall use this error whenever they need to resize their output memory buffers*/
GF_BUFFER_TOO_SMALL = -9,
/*! Bitstream is not compliant to the specfication it refers to*/
GF_NON_COMPLIANT_BITSTREAM = -10,
/*! No decoders could be found to handle the desired media type*/
GF_CODEC_NOT_FOUND = -11,
/*! The URL is not properly formatted or cannot be found*/
GF_URL_ERROR = -12,
/*! An service error has occured at the local side*/
GF_SERVICE_ERROR = -13,
/*! A service error has occured at the remote (server) side*/
GF_REMOTE_SERVICE_ERROR = -14,
/*! The desired stream could not be found in the service*/
GF_STREAM_NOT_FOUND = -15,
/*! The IsoMedia file is not a valid one*/
GF_ISOM_INVALID_FILE = -20,
/*! The IsoMedia file is not complete. Either the file is being downloaded, or it has been truncated*/
GF_ISOM_INCOMPLETE_FILE = -21,
/*! The media in this IsoMedia track is not valid (usually due to a broken stream description)*/
GF_ISOM_INVALID_MEDIA = -22,
/*! The requested operation cannot happen in the current opening mode of the IsoMedia file*/
GF_ISOM_INVALID_MODE = -23,
/*! This IsoMedia track refers to media outside the file in an unknown way*/
GF_ISOM_UNKNOWN_DATA_REF = -24,
/*! An invalid MPEG-4 Object Descriptor was found*/
GF_ODF_INVALID_DESCRIPTOR = -30,
/*! An MPEG-4 Object Descriptor was found or added to a forbidden descriptor*/
GF_ODF_FORBIDDEN_DESCRIPTOR = -31,
/*! An invalid MPEG-4 BIFS command was detected*/
GF_ODF_INVALID_COMMAND = -32,
/*! The scene has been encoded using an unknown BIFS version*/
GF_BIFS_UNKNOWN_VERSION = -33,
/*! The remote IP address could not be solved*/
GF_IP_ADDRESS_NOT_FOUND = -40,
/*! The connection to the remote peer has failed*/
GF_IP_CONNECTION_FAILURE = -41,
/*! The network operation has failed*/
GF_IP_NETWORK_FAILURE = -42,
/*! The network connection has been closed*/
GF_IP_CONNECTION_CLOSED = -43,
/*! The network operation has failed because no data is available*/
GF_IP_NETWORK_EMPTY = -44,
/*! The network operation has been discarded because it would be a blocking one*/
GF_IP_SOCK_WOULD_BLOCK = -45,
/*! UDP connection did not receive any data at all. Signaled by client services to reconfigure network if possible*/
GF_IP_UDP_TIMEOUT = -46,
/*! Authentication with the remote host has failed*/
GF_AUTHENTICATION_FAILURE = -50,
/*! Script not ready for playback */
GF_SCRIPT_NOT_READY = -51,
/*! Bad configuration for the current contex */
GF_INVALID_CONFIGURATION = -52,
/*! The element has not been found */
GF_NOT_FOUND = -53,
/*! Unexpected format of data */
GF_PROFILE_NOT_SUPPORTED = -54,
/*! the decoder buffers were filled, it is necessary to recuperate decoded data*/
GF_CODEC_BUFFER_UNAVAILABLE = -55,
} GF_Err;
/*!
* \brief Error Printing
*
* Returns a printable version of a given error
* \param e Error code requested
* \return String representing the error
*/
const char *gf_error_to_string(GF_Err e);
/*! @} */
/*! \addtogroup log_grp Logging tools
* \ingroup utils_grp
* \brief Logging system of GPAC
* @{
*/
/*!
* GPAC Log Levels
* \hideinitializer
*
* These levels describes messages priority used when filtering logs
*/
typedef enum
{
/*! Disable all Log message*/
GF_LOG_QUIET = 0,
/*! Log message describes an error*/
GF_LOG_ERROR,
/*! Log message describes a warning*/
GF_LOG_WARNING,
/*! Log message is informational (state, etc..)*/
GF_LOG_INFO,
/*! Log message is a debug info*/
GF_LOG_DEBUG
} GF_LOG_Level;
/*!
* \brief Log exits at first error assignment
*
* When GF_LOG_ERROR happens, program leaves with instruction exit(1);
* \param strict strict behaviour when encoutering a serious error.
*
*/
void gf_log_set_strict_error(Bool strict);
/*!
* \brief gets string-formated log tools
*
* Returns the string-formatted log tools and levels. Returned string shall be freed by the caller.
* \return string-formatted log tools.
*
*/
char *gf_log_get_tools_levels();
/*!
* GPAC Log tools
* \hideinitializer
*
* These flags describes which sub-part of GPAC generates the log and are used when filtering logs
*/
typedef enum
{
/*! Log message from the core library (init, threads, network calls, etc)*/
GF_LOG_CORE = 0,
/*! Log message from a raw media parser (BIFS, LASeR, A/V formats)*/
GF_LOG_CODING,
/*! Log message from a bitstream parser (IsoMedia, MPEG-2 TS, OGG, ...)*/
GF_LOG_CONTAINER,
/*! Log message from the network/service stack (messages & co)*/
GF_LOG_NETWORK,
/*! Log message from the RTP/RTCP stack (TS info) and packet structure & hinting (debug)*/
GF_LOG_RTP,
/*! Log message from authoring subsystem (file manip, import/export)*/
GF_LOG_AUTHOR,
/*! Log message from the sync layer of the terminal*/
GF_LOG_SYNC,
/*! Log message from a codec*/
GF_LOG_CODEC,
/*! Log message from any XML parser (context loading, etc)*/
GF_LOG_PARSER,
/*! Log message from the terminal/compositor, indicating media object state*/
GF_LOG_MEDIA,
/*! Log message from the scene graph/scene manager (handling of nodes and attribute modif, DOM core)*/
GF_LOG_SCENE,
/*! Log message from the scripting engine APIs - does not cover alert() in the script code itself*/
GF_LOG_SCRIPT,
/*! Log message from event handling*/
GF_LOG_INTERACT,
/*! Log message from compositor*/
GF_LOG_COMPOSE,
/*! Log for video object cache */
GF_LOG_CACHE,
/*! Log message from multimedia I/O devices (audio/video input/output, ...)*/
GF_LOG_MMIO,
/*! Log for runtime info (times, memory, CPU usage)*/
GF_LOG_RTI,
/*! Log for SMIL timing and animation*/
GF_LOG_SMIL,
/*! Log for memory tracker*/
GF_LOG_MEMORY,
/*! Log for audio compositor*/
GF_LOG_AUDIO,
/*! Generic Log for modules*/
GF_LOG_MODULE,
/*! Log for threads and mutexes */
GF_LOG_MUTEX,
/*! Log for threads and condition */
GF_LOG_CONDITION,
/*! Log for all HTTP streaming */
GF_LOG_DASH,
/*! Log for all messages coming from GF_Terminal or script alert()*/
GF_LOG_CONSOLE,
/*! Log for all messages coming the application, not used by libgpac or the modules*/
GF_LOG_APP,
/*! Log for all messages coming from the scheduler */
GF_LOG_SCHEDULER,
/*! special value used to set a level for all tools*/
GF_LOG_ALL,
GF_LOG_TOOL_MAX = GF_LOG_ALL,
} GF_LOG_Tool;
/*!
* \brief Log modules assignment
*
* Sets the tools to be checked for log filtering. By default no logging is performed.
* \param tool tool to be logged.
* \param level level of logging for this tool.
*
*/
void gf_log_set_tool_level(GF_LOG_Tool tool, GF_LOG_Level level);
/*!
* \brief Log Message Callback
*
* The gf_log_cbk type is the type for the callback of the \ref gf_log_set_callback function. By default all logs are redirected to stderr
* \param cbck Opaque user data.
* \param log_level level of the log. This value is not guaranteed in multi-threaded context.
* \param log_tool tool emitting the log. This value is not guaranteed in multi-threaded context.
* \param fmt message log format.
* \param vlist message log param.
*
*/
typedef void (*gf_log_cbk)(void *cbck, GF_LOG_Level log_level, GF_LOG_Tool log_tool, const char* fmt, va_list vlist);
/*!
* \brief Log overwrite
*
* Assigns a user-defined callback for printing log messages. By default all logs are redirected to stderr
* \param usr_cbk Opaque user data
* \param cbk Callback log function
* \return previous callback function
*/
gf_log_cbk gf_log_set_callback(void *usr_cbk, gf_log_cbk cbk);
/*!
\cond DUMMY_DOXY_SECTION
*/
#ifndef GPAC_DISABLE_LOG
/*note:
to turn log on, change to GPAC_ENABLE_LOG
to turn log off, change to GPAC_DISABLE_LOG
this is needed by configure+sed to modify this file directly
*/
#define GPAC_ENABLE_LOG
#endif
/*!
\endcond
*/
/*this is all a bit ugly, but most compilers don't properly handle variadic macros...*/
void gf_log(const char *fmt, ...);
void gf_log_lt(GF_LOG_Level ll, GF_LOG_Tool lt);
void gf_log_va_list(GF_LOG_Level level, GF_LOG_Tool tool, const char *fmt, va_list vl);
/*!
* \brief Log level checking
*
* Checks if a given tool is logged for the given level
* \param log_tool tool to check
* \param log_level level to check
* \return 1 if logged, 0 otherwise
*/
Bool gf_log_tool_level_on(GF_LOG_Tool log_tool, GF_LOG_Level log_level);
/*!
* \brief Set log tools and levels
*
* Set log tools and levels according to the log_tools_levels string. All previous log settings are discarded.
* \param log_tools_levels string specifying the tools and levels. It is formatted as logToolX\@logLevelX:logToolZ\@logLevelZ:...
* \return GF_OK or GF_BAD_PARAM
*/
GF_Err gf_log_set_tools_levels(const char *log_tools_levels);
/*!
* \brief Modify log tools and levels
*
* Modify log tools and levels according to the log_tools_levels string. Previous log settings are kept.
* \param val string specifying the tools and levels. It is formatted as logToolX\@logLevelX:logToolZ\@logLevelZ:...
* \return GF_OK or GF_BAD_PARAM
*/
GF_Err gf_log_modify_tools_levels(const char *val);
/*!
* \brief Set log level for a given tool
*
* Set log level for a given tool.
* \param tool tool to log
* \param level log level for this tool
*/
void gf_log_set_tool_level(GF_LOG_Tool tool, GF_LOG_Level level);
#ifdef GPAC_DISABLE_LOG
#define GF_LOG(_ll, _lm, __args)
#else
/*!
* \brief Message logging
* \hideinitializer
*
* Macro for logging messages. Usage is GF_LOG(log_lev, log_module, (fmt, ...)). The log function is only called if log filtering allows it. This avoids fetching logged parameters when the tool is not being logged.
*/
#define GF_LOG(_log_level, _log_tools, __args) if (gf_log_tool_level_on(_log_tools, _log_level) ) { gf_log_lt(_log_level, _log_tools); gf_log __args ;}
#endif
/*!
* \brief PseudoRandom Integer Generation Initialization
*
* Sets the starting point for generating a series of pseudorandom integers.
* \param Reset Re-initializes the random number generator
*/
void gf_rand_init(Bool Reset);
/*!
* \brief PseudoRandom Integer Generation
*
* Returns a pseudorandom integer.
*/
u32 gf_rand();
/*!
* \brief user name
*
* Gets current user (login) name.
*/
void gf_get_user_name(char *buf, u32 buf_size);
/*!\brief FileEnum info object
*
*The FileEnumInfo object is used to get file attributes upon enumeration of a directory.
*/
typedef struct
{
/*!File is marked as hidden*/
Bool hidden;
/*!File is a directory*/
Bool directory;
/*!File is a drive mountpoint*/
Bool drive;
/*!File is a system file*/
Bool system;
/*!File size in bytes*/
u64 size;
/*!File last modif time in UTC seconds*/
u64 last_modified;
} GF_FileEnumInfo;
/*!
* \brief Directory Enumeration Callback
*
* The gf_enum_dir_item type is the type for the callback of the \ref gf_enum_directory function
* \param cbck Opaque user data.
* \param item_name File or directory name.
* \param item_path File or directory full path and name from filesystem root.
* \param file_info information for the file or directory.
* \return 1 to abort enumeration, 0 to continue enumeration.
*
*/
typedef Bool (*gf_enum_dir_item)(void *cbck, char *item_name, char *item_path, GF_FileEnumInfo *file_info);
/*!
* \brief Directory enumeration
*
* Enumerates a directory content. Feedback is provided by the enum_dir_item function
* \param dir Directory to enumerate
* \param enum_directory If set, only directories will be enumerated, otherwise only files are.
* \param enum_dir \ref gf_enum_dir_item callback function for enumeration.
* \param cbck Opaque user data passed to callback function.
* \param filter optional filter for file extensions. If a file extension without the dot '.' character is not found in the
* filter the file will be skipped.
*/
GF_Err gf_enum_directory(const char *dir, Bool enum_directory, gf_enum_dir_item enum_dir, void *cbck, const char *filter);
/*!
* \brief File Deletion
*
* Deletes a file from the disk.
* \param fileName absolute name of the file or name relative to the current working directory.
*/
GF_Err gf_delete_file(const char *fileName);
/*!
* \brief File Move
*
* Moves or renames a file or directory.
* \param fileName absolute path of the file / directory to move or rename
* \param newFileName absolute new path/name of the file / directory
*/
GF_Err gf_move_file(const char *fileName, const char *newFileName);
/*!
* \brief Temporary File Creation
*
* Creates a new temporary file in binary mode
* \param fileName if not NULL, strdup() of the temporary filename when created by GPAC (NULL otherwise as the system automatically removes its own tmp files)
* \return stream handle to the new file ressoucre
*/
FILE *gf_temp_file_new(char ** const fileName);
/*!
* \brief File Modification Time
*
* Returns the modification time of the given file. The exact meaning of this value is system dependent
* \param filename file to check
* \return modification time of the file
*/
u64 gf_file_modification_time(const char *filename);
/*!
* \brief File existence check
*
* Moves or renames a file or directory.
* \param fileName absolute path of the file / directory to move or rename
* \return GF_TRUE if file exists
*/
Bool gf_file_exists(const char *fileName);
/*!
* \brief Progress formatting
*
* Signals progress in GPAC's operations. Note that progress signaling with this function is not thread-safe, the main purpose is to use it for authoring tools only.
* \param title title string of the progress, or NULL for no progress
* \param done Current amount performed of the action.
* \param total Total amount of the action.
*/
void gf_set_progress(const char *title, u64 done, u64 total);
/*!
* \brief Progress Callback
*
* The gf_on_progress_cbk type is the type for the callback of the \ref gf_set_progress_callback function
* \param cbck Opaque user data.
* \param title preogress title.
* \param done Current amount performed of the action
* \param total Total amount of the action.
*
*/
typedef void (*gf_on_progress_cbk)(const void *cbck, const char *title, u64 done, u64 total);
/*!
* \brief Progress overwriting
*
* Iverwrites the progress signaling function by a user-defined one.
* \param user_cbk Opaque user data
* \param prog_cbk new callback function to use. Passing NULL restore default GPAC stderr notification.
*/
void gf_set_progress_callback(void *user_cbk, gf_on_progress_cbk prog_cbk);
/*!
* \brief Prompt checking
*
* Checks if a character is pending in the prompt buffer.
* \return 1 if a character is ready to be fetched, 0 otherwise.
* \note Function not available under WindowsCE nor SymbianOS
*/
Bool gf_prompt_has_input();
/*!
* \brief Prompt character flush
*
* Returns the current character entered at prompt if any.
* \return value of the character.
* \note Function not available under WindowsCE nor SymbianOS
*/
char gf_prompt_get_char();
/*!
* \brief turns prompt echo on/off
*
* Turns the prompt character echo on/off - this is useful when entering passwords.
* \param echo_off indicates whether echo should be turned on or off.
* \note Function not available under WindowsCE nor SymbianOS
*/
void gf_prompt_set_echo_off(Bool echo_off);
/*! @} */
/*!
*\addtogroup cpu_grp System time CPU and Memory tools
*\ingroup utils_grp
*\brief System time CPU and Memory functions
*
*This section documents time functionalities and CPU management in GPAC.
* @{
*/
/*!
* Selection flags for memory tracker
* \hideinitializer
*/
typedef enum
{
/*! No memory tracking*/
GF_MemTrackerNone = 0,
/*! Memory tracking without backtrace*/
GF_MemTrackerSimple,
/*! Memory tracking with backtrace*/
GF_MemTrackerBackTrace,
} GF_MemTrackerType;
/*!
* \brief System setup
*
* Inits the system high-resolution clock if any, and CPU usage manager. It is strongly recommended to call this
* function before calling any other GPAC functions, since on some systems (like winCE) it may result in a better memory usage estimation.
* \note This can be called several times but only the first call will result in system setup.
*/
void gf_sys_init(GF_MemTrackerType mem_tracker_type);
/*!
* \brief System closing
*
* Closes the system high-resolution clock and any CPU associated ressources.
* \note This can be called several times but the system will be closed when no more users are counted.
*/
void gf_sys_close();
/*!
* \brief System arguments
*
* Sets the user app arguments (used by GUI mode)
* \param argc Number of arguments
* \param argv Array of arguments
*/
void gf_sys_set_args(s32 argc, const char **argv);
/*!
* \brief Get number of args
*
* Gets the number of argument of the user application if any
* \return number of argument of the user application
*/
u32 gf_sys_get_argc();
/*!
* \brief Get number of args
*
* Gets the number of argument of the user application if any
* \param arg Index of argument to retrieve
* \return number of argument of the user application
*/
const char *gf_sys_get_arg(u32 arg);
/*!
* \brief System clock query
*
* Gets the system clock time.
* \return System clock value since GPAC initialization in milliseconds.
*/
u32 gf_sys_clock();
/*!
* \brief High precision system clock query
*
* Gets the hight precision system clock time.
* \return System clock value since GPAC initialization in microseconds.
*/
u64 gf_sys_clock_high_res();
/*!
* \brief Sleeps thread/process
*
* Locks calling thread/process execution for a given time.
* \param ms Amount of time to sleep in milliseconds.
*/
void gf_sleep(u32 ms);
#ifdef WIN32
/*!
* \brief WINCE time constant
* \hideinitializer
*
* time between jan 1, 1601 and jan 1, 1970 in units of 100 nanoseconds
*/
#define TIMESPEC_TO_FILETIME_OFFSET (((LONGLONG)27111902 << 32) + (LONGLONG)3577643008)
#endif
/*!
*\brief gets UTC time in milliseconds
*
* Gets UTC clock in milliseconds
* \return UTC time in milliseconds
*/
u64 gf_net_get_utc();
/*!
*\brief parses date and returns UTC value for this date. Date format is an XSD dateTime format or any of the supported formats from HTTP 1.1:
Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() formatgets UTC time in milliseconds
*
* \param date string containing the date to parse
* \return UTC time in milliseconds
*/
u64 gf_net_parse_date(const char *date);
/*!
*\brief gets timezone adjustment in seconds
*
* Gets timezone adjustment in seconds, with localtime - timezone = UTC time
* \return timezone shift in seconds
*/
s32 gf_net_get_timezone();
/*!\brief run-time system info object
*
*The Run-Time Info object is used to get CPU and memory occupation of the calling process.
*All time values are expressed in milliseconds (accuracy is not guaranteed).
*/
typedef struct
{
/*!start of the sampling period*/
u32 sampling_instant;
/*!duration of the sampling period*/
u32 sampling_period_duration;
/*!total amount of time (User+kernel) spent in CPU for all processes as evaluated at the end of the sampling period*/
u32 total_cpu_time;
/*!total amount of time (User+kernel) spent in CPU for the calling process as evaluated at the end of the sampling period*/
u32 process_cpu_time;
/*!amount of time (User+kernel) spent in CPU for all processes during the sampling period*/
u32 total_cpu_time_diff;
/*!total amount of time (User+kernel) spent in CPU for the calling process during the sampling period*/
u32 process_cpu_time_diff;
/*!total amount of idle time during the sampling period.*/
u32 cpu_idle_time;
/*!percentage (from 0 to 100) of CPU usage during the sampling period.*/
u32 total_cpu_usage;
/*!percentage (from 0 to 100) of the CPU usage by the calling process during the sampling period.*/
u32 process_cpu_usage;
/*!calling process ID*/
u32 pid;
/*!calling process thread count if known*/
u32 thread_count;
/*!size of calling process allocated heaps*/
u64 process_memory;
/*!total physical memory in system*/
u64 physical_memory;
/*!available physical memory in system*/
u64 physical_memory_avail;
/*!total memory currently allocated by gpac*/
u64 gpac_memory;
/*!total number of cores on the system*/
u32 nb_cores;
} GF_SystemRTInfo;
/*!
* Selection flags for run-time info retrieval
* \hideinitializer
*/
enum
{
/*!Indicates all processes' times must be fetched. If not set, only the current process times will be retrieved, and the
thread count and total times won't be available*/
GF_RTI_ALL_PROCESSES_TIMES = 1,
/*!Indicates the process allocated heap size must be fetch. If not set, only the system physical memory is fetched.
Fetching the entire ocess allocated memory can have a large impact on performances*/
GF_RTI_PROCESS_MEMORY = 1<<1,
/*!Indicates that only system memory should be fetched. When set, all refreshing info is ignored*/
GF_RTI_SYSTEM_MEMORY_ONLY = 1<<2
};
/*!
* \brief Gets Run-Time info
*
* Gets CPU and memory usage info for the calling process and the system. Information gathering
* is controled through timeout values.
* \param refresh_time_ms refresh time period in milliseconds. If the last sampling was done less than this period ago, the run-time info is not refreshed.
* \param rti holder to the run-time info structure to update.
* \param flags specify which info is to be retrieved.
* \return 1 if info has been updated, 0 otherwise.
* \note You should not try to use a too small refresh time. Typical values are 500 ms or one second.
*/
Bool gf_sys_get_rti(u32 refresh_time_ms, GF_SystemRTInfo *rti, u32 flags);
Bool gf_sys_get_battery_state(Bool *onBattery, u32 *onCharge, u32 *level, u32 *batteryLifeTime, u32 *batteryFullLifeTime);
typedef struct _GF_GlobalLock_opaque GF_GlobalLock;
/*!
* This function allows the user to create a global lock for all GPAC instances.
* This allow to disable some features for other instances for instance.
* \param resourceName The name of the resource to lock
* \return false if resource has been locked, true if resource could not be locked
*/
GF_GlobalLock * gf_global_resource_lock(const char * resourceName);
/*!
* Unlock a previouly locked resource
* \param lock The resource to unlock
* \return GF_OK if evertything went fine
*/
GF_Err gf_global_resource_unlock(GF_GlobalLock * lock);
/*! @} */
/*!
*\addtogroup osfile_grp File System
*\ingroup utils_grp
*\brief File System tools
*
*This section documents time functionalities and CPU management in GPAC.
* @{
*/
/*!
*\brief parses 128 bit from string
*
* Parses 128 bit from string
*
* \param string the string containing the value in hexa. Non alphanum characters are skipped
* \param value the value parsed
* \return error code if any
*/
GF_Err gf_bin128_parse(const char *string, bin128 value);
/*!
* \brief Delete Directory
*
* Delete a dir within the full path.
* \param DirPathName the file path name.
*/
GF_Err gf_rmdir(const char *DirPathName);
/*!
* \brief Create Directory
*
* Create a directory within the full path.
* \param DirPathName the dir path name.
*/
GF_Err gf_mkdir(const char* DirPathName);
/*!
* \brief Check Directory Exists
*
* Create a directory within the full path.
* \param DirPathName the dir path name.
*/
Bool gf_dir_exists(const char *DirPathName);
/*!
* \brief Create Directory
*
* Cleanup a directory within the full path, removing all the files and the directories.
* \param DirPathName the dir path name.
*/
GF_Err gf_cleanup_dir(const char* DirPathName);
/**
* Gets a newly allocated string containing the default cache directory.
* It is the responsibility of the caller to free the string.
* \return a fully qualified path to the default cache directory
*/
char * gf_get_default_cache_directory();
/**
* Gets the number of open file handles (gf_fopen/gf_fclose only).
* \return number of open file handles
*/
u32 gf_file_handles_count();
/*! @} */
/*!
*\addtogroup hash_grp RawData Misc
*\ingroup utils_grp
*\brief Data integrity and parsing
*
*This section documents misc data functions such as integrity and parsing such as SHA-1 hashing CRC checksum, 128 bit ID parsing...
* @{
*/
/*!
* \brief CRC32 compute
*
* Computes the CRC32 value of a buffer.
* \param data buffer
* \param size buffer size
* \return computed CRC32
*/
u32 gf_crc_32(const char *data, u32 size);
/**
* Compresses a data buffer in place using zlib. Buffer may be reallocated in the process.
* \param data pointer to the data buffer to be compressed
* \param data_len length of the data buffer to be compressed
* \param out_size pointer for output buffer size
* \return GF_OK if evertything went fine
*/
GF_Err gf_gz_compress_payload(char **data, u32 data_len, u32 *out_size);
/**
* Decompresses a data buffer using zlib.
* \param data data buffer to be decompressed
* \param data_len length of the data buffer to be decompressed
* \param uncompressed_data pointer to the uncompressed data buffer. It is the responsibility of the caller to free this buffer.
* \param out_size size of the uncompressed buffer
* \return GF_OK if evertything went fine
*/
GF_Err gf_gz_decompress_payload(char *data, u32 data_len, char **uncompressed_data, u32 *out_size);
/*SHA1*/
typedef struct __sha1_context GF_SHA1Context;
#define GF_SHA1_DIGEST_SIZE 20
#define GF_SHA1_DIGEST_SIZE_HEXA 41
/* Create SHA-1 context */
GF_SHA1Context *gf_sha1_starts();
/* Adds byte to the SHA-1 context */
void gf_sha1_update(GF_SHA1Context *ctx, u8 *input, u32 length);
/* Generates SHA-1 of all bytes ingested */
void gf_sha1_finish(GF_SHA1Context *ctx, u8 digest[GF_SHA1_DIGEST_SIZE] );
/*
* Output SHA-1(file contents), returns 0 if successful.
*/
int gf_sha1_file(const char *filename, u8 digest[GF_SHA1_DIGEST_SIZE]);
/*
* Gets SHA-1 of input buffer
*/
void gf_sha1_csum(u8 *buf, u32 buflen, u8 digest[GF_SHA1_DIGEST_SIZE]);
/*
* Gets SHA-1 of input buffer into hexa form
*/
void gf_sha1_csum_hexa(u8 *buf, u32 buflen, u8 digest[GF_SHA1_DIGEST_SIZE_HEXA]);
/*! @} */
/* \cond dummy */
#ifdef GPAC_ANDROID
typedef void (*fm_callback_func)(void *cbk_obj, u32 type, u32 param, int *value);
extern void gf_fm_request_set_callback(void *cbk_obj, fm_callback_func cbk_func);
void gf_fm_request_call(u32 type, u32 param, int *value);
#endif //GPAC_ANDROID
/* \endcond */
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#ifdef __cplusplus
}
#endif
#endif /*_GF_CORE_H_*/
|
651_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/**
* FreeRDP: A Remote Desktop Protocol Implementation
* ZGFX (RDP8) Bulk Data Compression
*
* Copyright 2014 Marc-Andre Moreau <marcandre.moreau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FREERDP_CODEC_ZGFX_H
#define FREERDP_CODEC_ZGFX_H
#include <freerdp/api.h>
#include <freerdp/types.h>
#include <freerdp/codec/bulk.h>
#define ZGFX_SEGMENTED_SINGLE 0xE0
#define ZGFX_SEGMENTED_MULTIPART 0xE1
#define ZGFX_PACKET_COMPR_TYPE_RDP8 0x04
#define ZGFX_SEGMENTED_MAXSIZE 65535
struct _ZGFX_CONTEXT
{
BOOL Compressor;
const BYTE* pbInputCurrent;
const BYTE* pbInputEnd;
UINT32 bits;
UINT32 cBitsRemaining;
UINT32 BitsCurrent;
UINT32 cBitsCurrent;
BYTE OutputBuffer[65536];
UINT32 OutputCount;
BYTE HistoryBuffer[2500000];
UINT32 HistoryIndex;
UINT32 HistoryBufferSize;
};
typedef struct _ZGFX_CONTEXT ZGFX_CONTEXT;
#ifdef __cplusplus
extern "C" {
#endif
FREERDP_API int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32 flags);
FREERDP_API int zgfx_compress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize, BYTE** ppDstData, UINT32* pDstSize, UINT32* pFlags);
FREERDP_API int zgfx_compress_to_stream(ZGFX_CONTEXT* zgfx, wStream* sDst, const BYTE* pUncompressed, UINT32 uncompressedSize, UINT32* pFlags);
FREERDP_API void zgfx_context_reset(ZGFX_CONTEXT* zgfx, BOOL flush);
FREERDP_API ZGFX_CONTEXT* zgfx_context_new(BOOL Compressor);
FREERDP_API void zgfx_context_free(ZGFX_CONTEXT* zgfx);
#ifdef __cplusplus
}
#endif
#endif /* FREERDP_CODEC_ZGFX_H */
|
/**
* FreeRDP: A Remote Desktop Protocol Implementation
* ZGFX (RDP8) Bulk Data Compression
*
* Copyright 2014 Marc-Andre Moreau <marcandre.moreau@gmail.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef FREERDP_CODEC_ZGFX_H
#define FREERDP_CODEC_ZGFX_H
#include <freerdp/api.h>
#include <freerdp/types.h>
#include <freerdp/codec/bulk.h>
#define ZGFX_SEGMENTED_SINGLE 0xE0
#define ZGFX_SEGMENTED_MULTIPART 0xE1
#define ZGFX_PACKET_COMPR_TYPE_RDP8 0x04
#define ZGFX_SEGMENTED_MAXSIZE 65535
typedef struct _ZGFX_CONTEXT ZGFX_CONTEXT;
#ifdef __cplusplus
extern "C" {
#endif
FREERDP_API int zgfx_decompress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize,
BYTE** ppDstData, UINT32* pDstSize, UINT32 flags);
FREERDP_API int zgfx_compress(ZGFX_CONTEXT* zgfx, const BYTE* pSrcData, UINT32 SrcSize,
BYTE** ppDstData, UINT32* pDstSize, UINT32* pFlags);
FREERDP_API int zgfx_compress_to_stream(ZGFX_CONTEXT* zgfx, wStream* sDst,
const BYTE* pUncompressed, UINT32 uncompressedSize, UINT32* pFlags);
FREERDP_API void zgfx_context_reset(ZGFX_CONTEXT* zgfx, BOOL flush);
FREERDP_API ZGFX_CONTEXT* zgfx_context_new(BOOL Compressor);
FREERDP_API void zgfx_context_free(ZGFX_CONTEXT* zgfx);
#ifdef __cplusplus
}
#endif
#endif /* FREERDP_CODEC_ZGFX_H */
|
671_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
Formatting library for C++
Copyright (c) 2012 - 2016, Victor Zverovich
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// transition helper
#ifdef FMT_FORMAT_PROVIDE_PRINTF
#include "printf.h"
#endif
#ifndef FMT_FORMAT_H_
#define FMT_FORMAT_H_
#define FMT_INCLUDE
#include <cassert>
#include <clocale>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <limits>
#include <memory>
#include <stdexcept>
#include <string>
#include <vector>
#include <utility> // for std::pair
#undef FMT_INCLUDE
// The fmt library version in the form major * 10000 + minor * 100 + patch.
#define FMT_VERSION 40101
#if defined(__has_include)
# define FMT_HAS_INCLUDE(x) __has_include(x)
#else
# define FMT_HAS_INCLUDE(x) 0
#endif
#if (FMT_HAS_INCLUDE(<string_view>) && __cplusplus > 201402L) || \
(defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910)
# include <string_view>
# define FMT_HAS_STRING_VIEW 1
# define FMT_HAS_EXPERIMENTAL_STRING_VIEW 0
#else
# define FMT_HAS_STRING_VIEW 0
# if (FMT_HAS_INCLUDE(<experimental/string_view>) && __cplusplus >= 201402L)
# include <experimental/string_view>
# define FMT_HAS_EXPERIMENTAL_STRING_VIEW 1
# else
# define FMT_HAS_EXPERIMENTAL_STRING_VIEW 0
# endif
#endif
#if defined _SECURE_SCL && _SECURE_SCL
# define FMT_SECURE_SCL _SECURE_SCL
#else
# define FMT_SECURE_SCL 0
#endif
#if FMT_SECURE_SCL
# include <iterator>
#endif
#ifdef _MSC_VER
# define FMT_MSC_VER _MSC_VER
#else
# define FMT_MSC_VER 0
#endif
#if FMT_MSC_VER && FMT_MSC_VER <= 1500
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int64 intmax_t;
#else
#include <stdint.h>
#endif
#if !defined(FMT_HEADER_ONLY) && defined(_WIN32)
# ifdef FMT_EXPORT
# define FMT_API __declspec(dllexport)
# elif defined(FMT_SHARED)
# define FMT_API __declspec(dllimport)
# endif
#endif
#ifndef FMT_API
# define FMT_API
#endif
#ifdef __GNUC__
# define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
# define FMT_GCC_EXTENSION __extension__
# if FMT_GCC_VERSION >= 406
# pragma GCC diagnostic push
// Disable the warning about "long long" which is sometimes reported even
// when using __extension__.
# pragma GCC diagnostic ignored "-Wlong-long"
// Disable the warning about declaration shadowing because it affects too
// many valid cases.
# pragma GCC diagnostic ignored "-Wshadow"
// Disable the warning about implicit conversions that may change the sign of
// an integer; silencing it otherwise would require many explicit casts.
# pragma GCC diagnostic ignored "-Wsign-conversion"
# endif
# if __cplusplus >= 201103L || defined __GXX_EXPERIMENTAL_CXX0X__
# define FMT_HAS_GXX_CXX11 1
# endif
#else
# define FMT_GCC_VERSION 0
# define FMT_GCC_EXTENSION
# define FMT_HAS_GXX_CXX11 0
#endif
#if defined(__INTEL_COMPILER)
# define FMT_ICC_VERSION __INTEL_COMPILER
#elif defined(__ICL)
# define FMT_ICC_VERSION __ICL
#endif
#if defined(__clang__) && !defined(FMT_ICC_VERSION)
# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdocumentation-unknown-command"
# pragma clang diagnostic ignored "-Wpadded"
#endif
#ifdef __GNUC_LIBSTD__
# define FMT_GNUC_LIBSTD_VERSION (__GNUC_LIBSTD__ * 100 + __GNUC_LIBSTD_MINOR__)
#endif
#ifdef __has_feature
# define FMT_HAS_FEATURE(x) __has_feature(x)
#else
# define FMT_HAS_FEATURE(x) 0
#endif
#ifdef __has_builtin
# define FMT_HAS_BUILTIN(x) __has_builtin(x)
#else
# define FMT_HAS_BUILTIN(x) 0
#endif
#ifdef __has_cpp_attribute
# define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
# define FMT_HAS_CPP_ATTRIBUTE(x) 0
#endif
#if FMT_HAS_CPP_ATTRIBUTE(maybe_unused)
# define FMT_HAS_CXX17_ATTRIBUTE_MAYBE_UNUSED
// VC++ 1910 support /std: option and that will set _MSVC_LANG macro
// Clang with Microsoft CodeGen doesn't define _MSVC_LANG macro
#elif defined(_MSVC_LANG) && _MSVC_LANG > 201402 && _MSC_VER >= 1910
# define FMT_HAS_CXX17_ATTRIBUTE_MAYBE_UNUSED
#endif
#ifdef FMT_HAS_CXX17_ATTRIBUTE_MAYBE_UNUSED
# define FMT_MAYBE_UNUSED [[maybe_unused]]
// g++/clang++ also support [[gnu::unused]]. However, we don't use it.
#elif defined(__GNUC__)
# define FMT_MAYBE_UNUSED __attribute__((unused))
#else
# define FMT_MAYBE_UNUSED
#endif
// Use the compiler's attribute noreturn
#if defined(__MINGW32__) || defined(__MINGW64__)
# define FMT_NORETURN __attribute__((noreturn))
#elif FMT_HAS_CPP_ATTRIBUTE(noreturn) && __cplusplus >= 201103L
# define FMT_NORETURN [[noreturn]]
#else
# define FMT_NORETURN
#endif
#ifndef FMT_USE_VARIADIC_TEMPLATES
// Variadic templates are available in GCC since version 4.4
// (http://gcc.gnu.org/projects/cxx0x.html) and in Visual C++
// since version 2013.
# define FMT_USE_VARIADIC_TEMPLATES \
(FMT_HAS_FEATURE(cxx_variadic_templates) || \
(FMT_GCC_VERSION >= 404 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1800)
#endif
#ifndef FMT_USE_RVALUE_REFERENCES
// Don't use rvalue references when compiling with clang and an old libstdc++
// as the latter doesn't provide std::move.
# if defined(FMT_GNUC_LIBSTD_VERSION) && FMT_GNUC_LIBSTD_VERSION <= 402
# define FMT_USE_RVALUE_REFERENCES 0
# else
# define FMT_USE_RVALUE_REFERENCES \
(FMT_HAS_FEATURE(cxx_rvalue_references) || \
(FMT_GCC_VERSION >= 403 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1600)
# endif
#endif
#if __cplusplus >= 201103L || FMT_MSC_VER >= 1700
# define FMT_USE_ALLOCATOR_TRAITS 1
#else
# define FMT_USE_ALLOCATOR_TRAITS 0
#endif
// Check if exceptions are disabled.
#if defined(__GNUC__) && !defined(__EXCEPTIONS)
# define FMT_EXCEPTIONS 0
#endif
#if FMT_MSC_VER && !_HAS_EXCEPTIONS
# define FMT_EXCEPTIONS 0
#endif
#ifndef FMT_EXCEPTIONS
# define FMT_EXCEPTIONS 1
#endif
#ifndef FMT_THROW
# if FMT_EXCEPTIONS
# define FMT_THROW(x) throw x
# else
# define FMT_THROW(x) assert(false)
# endif
#endif
// Define FMT_USE_NOEXCEPT to make fmt use noexcept (C++11 feature).
#ifndef FMT_USE_NOEXCEPT
# define FMT_USE_NOEXCEPT 0
#endif
#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || \
(FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \
FMT_MSC_VER >= 1900
# define FMT_DETECTED_NOEXCEPT noexcept
#else
# define FMT_DETECTED_NOEXCEPT throw()
#endif
#ifndef FMT_NOEXCEPT
# if FMT_EXCEPTIONS
# define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT
# else
# define FMT_NOEXCEPT
# endif
#endif
// This is needed because GCC still uses throw() in its headers when exceptions
// are disabled.
#if FMT_GCC_VERSION
# define FMT_DTOR_NOEXCEPT FMT_DETECTED_NOEXCEPT
#else
# define FMT_DTOR_NOEXCEPT FMT_NOEXCEPT
#endif
#ifndef FMT_OVERRIDE
# if (defined(FMT_USE_OVERRIDE) && FMT_USE_OVERRIDE) || FMT_HAS_FEATURE(cxx_override) || \
(FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \
FMT_MSC_VER >= 1900
# define FMT_OVERRIDE override
# else
# define FMT_OVERRIDE
# endif
#endif
#ifndef FMT_NULL
# if FMT_HAS_FEATURE(cxx_nullptr) || \
(FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \
FMT_MSC_VER >= 1600
# define FMT_NULL nullptr
# else
# define FMT_NULL NULL
# endif
#endif
// A macro to disallow the copy constructor and operator= functions
// This should be used in the private: declarations for a class
#ifndef FMT_USE_DELETED_FUNCTIONS
# define FMT_USE_DELETED_FUNCTIONS 0
#endif
#if FMT_USE_DELETED_FUNCTIONS || FMT_HAS_FEATURE(cxx_deleted_functions) || \
(FMT_GCC_VERSION >= 404 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1800
# define FMT_DELETED_OR_UNDEFINED = delete
# define FMT_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
TypeName& operator=(const TypeName&) = delete
#else
# define FMT_DELETED_OR_UNDEFINED
# define FMT_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
TypeName& operator=(const TypeName&)
#endif
#ifndef FMT_USE_DEFAULTED_FUNCTIONS
# define FMT_USE_DEFAULTED_FUNCTIONS 0
#endif
#ifndef FMT_DEFAULTED_COPY_CTOR
# if FMT_USE_DEFAULTED_FUNCTIONS || FMT_HAS_FEATURE(cxx_defaulted_functions) || \
(FMT_GCC_VERSION >= 404 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1800
# define FMT_DEFAULTED_COPY_CTOR(TypeName) \
TypeName(const TypeName&) = default;
# else
# define FMT_DEFAULTED_COPY_CTOR(TypeName)
# endif
#endif
#ifndef FMT_USE_USER_DEFINED_LITERALS
// All compilers which support UDLs also support variadic templates. This
// makes the fmt::literals implementation easier. However, an explicit check
// for variadic templates is added here just in case.
// For Intel's compiler both it and the system gcc/msc must support UDLs.
# if FMT_USE_VARIADIC_TEMPLATES && FMT_USE_RVALUE_REFERENCES && \
(FMT_HAS_FEATURE(cxx_user_literals) || \
(FMT_GCC_VERSION >= 407 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900) && \
(!defined(FMT_ICC_VERSION) || FMT_ICC_VERSION >= 1500)
# define FMT_USE_USER_DEFINED_LITERALS 1
# else
# define FMT_USE_USER_DEFINED_LITERALS 0
# endif
#endif
#ifndef FMT_USE_EXTERN_TEMPLATES
# define FMT_USE_EXTERN_TEMPLATES \
(FMT_CLANG_VERSION >= 209 || (FMT_GCC_VERSION >= 303 && FMT_HAS_GXX_CXX11))
#endif
#ifdef FMT_HEADER_ONLY
// If header only do not use extern templates.
# undef FMT_USE_EXTERN_TEMPLATES
# define FMT_USE_EXTERN_TEMPLATES 0
#endif
#ifndef FMT_ASSERT
# define FMT_ASSERT(condition, message) assert((condition) && message)
#endif
// __builtin_clz is broken in clang with Microsoft CodeGen:
// https://github.com/fmtlib/fmt/issues/519
#ifndef _MSC_VER
# if FMT_GCC_VERSION >= 400 || FMT_HAS_BUILTIN(__builtin_clz)
# define FMT_BUILTIN_CLZ(n) __builtin_clz(n)
# endif
# if FMT_GCC_VERSION >= 400 || FMT_HAS_BUILTIN(__builtin_clzll)
# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n)
# endif
#endif
// Some compilers masquerade as both MSVC and GCC-likes or
// otherwise support __builtin_clz and __builtin_clzll, so
// only define FMT_BUILTIN_CLZ using the MSVC intrinsics
// if the clz and clzll builtins are not available.
#if FMT_MSC_VER && !defined(FMT_BUILTIN_CLZLL) && !defined(_MANAGED)
# include <intrin.h> // _BitScanReverse, _BitScanReverse64
namespace fmt {
namespace internal {
// avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning
# ifndef __clang__
# pragma intrinsic(_BitScanReverse)
# endif
inline uint32_t clz(uint32_t x) {
unsigned long r = 0;
_BitScanReverse(&r, x);
assert(x != 0);
// Static analysis complains about using uninitialized data
// "r", but the only way that can happen is if "x" is 0,
// which the callers guarantee to not happen.
# pragma warning(suppress: 6102)
return 31 - r;
}
# define FMT_BUILTIN_CLZ(n) fmt::internal::clz(n)
// avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning
# if defined(_WIN64) && !defined(__clang__)
# pragma intrinsic(_BitScanReverse64)
# endif
inline uint32_t clzll(uint64_t x) {
unsigned long r = 0;
# ifdef _WIN64
_BitScanReverse64(&r, x);
# else
// Scan the high 32 bits.
if (_BitScanReverse(&r, static_cast<uint32_t>(x >> 32)))
return 63 - (r + 32);
// Scan the low 32 bits.
_BitScanReverse(&r, static_cast<uint32_t>(x));
# endif
assert(x != 0);
// Static analysis complains about using uninitialized data
// "r", but the only way that can happen is if "x" is 0,
// which the callers guarantee to not happen.
# pragma warning(suppress: 6102)
return 63 - r;
}
# define FMT_BUILTIN_CLZLL(n) fmt::internal::clzll(n)
}
}
#endif
namespace fmt {
namespace internal {
struct DummyInt {
int data[2];
operator int() const { return 0; }
};
typedef std::numeric_limits<fmt::internal::DummyInt> FPUtil;
// Dummy implementations of system functions such as signbit and ecvt called
// if the latter are not available.
inline DummyInt signbit(...) { return DummyInt(); }
inline DummyInt _ecvt_s(...) { return DummyInt(); }
inline DummyInt isinf(...) { return DummyInt(); }
inline DummyInt _finite(...) { return DummyInt(); }
inline DummyInt isnan(...) { return DummyInt(); }
inline DummyInt _isnan(...) { return DummyInt(); }
// A helper function to suppress bogus "conditional expression is constant"
// warnings.
template <typename T>
inline T const_check(T value) { return value; }
}
} // namespace fmt
namespace std {
// Standard permits specialization of std::numeric_limits. This specialization
// is used to resolve ambiguity between isinf and std::isinf in glibc:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48891
// and the same for isnan and signbit.
template <>
class numeric_limits<fmt::internal::DummyInt> :
public std::numeric_limits<int> {
public:
// Portable version of isinf.
template <typename T>
static bool isinfinity(T x) {
using namespace fmt::internal;
// The resolution "priority" is:
// isinf macro > std::isinf > ::isinf > fmt::internal::isinf
if (const_check(sizeof(isinf(x)) != sizeof(fmt::internal::DummyInt))) {
return isinf(x) != 0;
}
return !_finite(static_cast<double>(x));
}
// Portable version of isnan.
template <typename T>
static bool isnotanumber(T x) {
using namespace fmt::internal;
if (const_check(sizeof(isnan(x)) != sizeof(fmt::internal::DummyInt))) {
return isnan(x) != 0;
}
return _isnan(static_cast<double>(x)) != 0;
}
// Portable version of signbit.
static bool isnegative(double x) {
using namespace fmt::internal;
if (const_check(sizeof(signbit(x)) != sizeof(fmt::internal::DummyInt))) {
return signbit(x) != 0;
}
if (x < 0) return true;
if (!isnotanumber(x)) return false;
int dec = 0, sign = 0;
char buffer[2]; // The buffer size must be >= 2 or _ecvt_s will fail.
_ecvt_s(buffer, sizeof(buffer), x, 0, &dec, &sign);
return sign != 0;
}
};
} // namespace std
namespace fmt {
// Fix the warning about long long on older versions of GCC
// that don't support the diagnostic pragma.
FMT_GCC_EXTENSION typedef long long LongLong;
FMT_GCC_EXTENSION typedef unsigned long long ULongLong;
#if FMT_USE_RVALUE_REFERENCES
using std::move;
#endif
template <typename Char>
class BasicWriter;
typedef BasicWriter<char> Writer;
typedef BasicWriter<wchar_t> WWriter;
template <typename Char>
class ArgFormatter;
struct FormatSpec;
template <typename Impl, typename Char, typename Spec = fmt::FormatSpec>
class BasicPrintfArgFormatter;
template <typename CharType,
typename ArgFormatter = fmt::ArgFormatter<CharType> >
class BasicFormatter;
/**
\rst
A string reference. It can be constructed from a C string or
``std::basic_string``.
You can use one of the following typedefs for common character types:
+------------+-------------------------+
| Type | Definition |
+============+=========================+
| StringRef | BasicStringRef<char> |
+------------+-------------------------+
| WStringRef | BasicStringRef<wchar_t> |
+------------+-------------------------+
This class is most useful as a parameter type to allow passing
different types of strings to a function, for example::
template <typename... Args>
std::string format(StringRef format_str, const Args & ... args);
format("{}", 42);
format(std::string("{}"), 42);
\endrst
*/
template <typename Char>
class BasicStringRef {
private:
const Char *data_;
std::size_t size_;
public:
/** Constructs a string reference object from a C string and a size. */
BasicStringRef(const Char *s, std::size_t size) : data_(s), size_(size) {}
/**
\rst
Constructs a string reference object from a C string computing
the size with ``std::char_traits<Char>::length``.
\endrst
*/
BasicStringRef(const Char *s)
: data_(s), size_(std::char_traits<Char>::length(s)) {}
/**
\rst
Constructs a string reference from a ``std::basic_string`` object.
\endrst
*/
template <typename Allocator>
BasicStringRef(
const std::basic_string<Char, std::char_traits<Char>, Allocator> &s)
: data_(s.c_str()), size_(s.size()) {}
#if FMT_HAS_STRING_VIEW
/**
\rst
Constructs a string reference from a ``std::basic_string_view`` object.
\endrst
*/
BasicStringRef(
const std::basic_string_view<Char, std::char_traits<Char>> &s)
: data_(s.data()), size_(s.size()) {}
/**
\rst
Converts a string reference to an ``std::string_view`` object.
\endrst
*/
explicit operator std::basic_string_view<Char>() const FMT_NOEXCEPT {
return std::basic_string_view<Char>(data_, size_);
}
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
/**
\rst
Constructs a string reference from a ``std::experimental::basic_string_view`` object.
\endrst
*/
BasicStringRef(
const std::experimental::basic_string_view<Char, std::char_traits<Char>> &s)
: data_(s.data()), size_(s.size()) {}
/**
\rst
Converts a string reference to an ``std::string_view`` object.
\endrst
*/
explicit operator std::experimental::basic_string_view<Char>() const FMT_NOEXCEPT {
return std::experimental::basic_string_view<Char>(data_, size_);
}
#endif
/**
\rst
Converts a string reference to an ``std::string`` object.
\endrst
*/
std::basic_string<Char> to_string() const {
return std::basic_string<Char>(data_, size_);
}
/** Returns a pointer to the string data. */
const Char *data() const { return data_; }
/** Returns the string size. */
std::size_t size() const { return size_; }
// Lexicographically compare this string reference to other.
int compare(BasicStringRef other) const {
std::size_t size = size_ < other.size_ ? size_ : other.size_;
int result = std::char_traits<Char>::compare(data_, other.data_, size);
if (result == 0)
result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1);
return result;
}
friend bool operator==(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) == 0;
}
friend bool operator!=(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) != 0;
}
friend bool operator<(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) < 0;
}
friend bool operator<=(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) <= 0;
}
friend bool operator>(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) > 0;
}
friend bool operator>=(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) >= 0;
}
};
typedef BasicStringRef<char> StringRef;
typedef BasicStringRef<wchar_t> WStringRef;
/**
\rst
A reference to a null terminated string. It can be constructed from a C
string or ``std::basic_string``.
You can use one of the following typedefs for common character types:
+-------------+--------------------------+
| Type | Definition |
+=============+==========================+
| CStringRef | BasicCStringRef<char> |
+-------------+--------------------------+
| WCStringRef | BasicCStringRef<wchar_t> |
+-------------+--------------------------+
This class is most useful as a parameter type to allow passing
different types of strings to a function, for example::
template <typename... Args>
std::string format(CStringRef format_str, const Args & ... args);
format("{}", 42);
format(std::string("{}"), 42);
\endrst
*/
template <typename Char>
class BasicCStringRef {
private:
const Char *data_;
public:
/** Constructs a string reference object from a C string. */
BasicCStringRef(const Char *s) : data_(s) {}
/**
\rst
Constructs a string reference from a ``std::basic_string`` object.
\endrst
*/
template <typename Allocator>
BasicCStringRef(
const std::basic_string<Char, std::char_traits<Char>, Allocator> &s)
: data_(s.c_str()) {}
/** Returns the pointer to a C string. */
const Char *c_str() const { return data_; }
};
typedef BasicCStringRef<char> CStringRef;
typedef BasicCStringRef<wchar_t> WCStringRef;
/** A formatting error such as invalid format string. */
class FormatError : public std::runtime_error {
public:
explicit FormatError(CStringRef message)
: std::runtime_error(message.c_str()) {}
FormatError(const FormatError &ferr) : std::runtime_error(ferr) {}
FMT_API ~FormatError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE;
};
namespace internal {
// MakeUnsigned<T>::Type gives an unsigned type corresponding to integer type T.
template <typename T>
struct MakeUnsigned { typedef T Type; };
#define FMT_SPECIALIZE_MAKE_UNSIGNED(T, U) \
template <> \
struct MakeUnsigned<T> { typedef U Type; }
FMT_SPECIALIZE_MAKE_UNSIGNED(char, unsigned char);
FMT_SPECIALIZE_MAKE_UNSIGNED(signed char, unsigned char);
FMT_SPECIALIZE_MAKE_UNSIGNED(short, unsigned short);
FMT_SPECIALIZE_MAKE_UNSIGNED(int, unsigned);
FMT_SPECIALIZE_MAKE_UNSIGNED(long, unsigned long);
FMT_SPECIALIZE_MAKE_UNSIGNED(LongLong, ULongLong);
// Casts nonnegative integer to unsigned.
template <typename Int>
inline typename MakeUnsigned<Int>::Type to_unsigned(Int value) {
FMT_ASSERT(value >= 0, "negative value");
return static_cast<typename MakeUnsigned<Int>::Type>(value);
}
// The number of characters to store in the MemoryBuffer object itself
// to avoid dynamic memory allocation.
enum { INLINE_BUFFER_SIZE = 500 };
#if FMT_SECURE_SCL
// Use checked iterator to avoid warnings on MSVC.
template <typename T>
inline stdext::checked_array_iterator<T*> make_ptr(T *ptr, std::size_t size) {
return stdext::checked_array_iterator<T*>(ptr, size);
}
#else
template <typename T>
inline T *make_ptr(T *ptr, std::size_t) { return ptr; }
#endif
} // namespace internal
/**
\rst
A buffer supporting a subset of ``std::vector``'s operations.
\endrst
*/
template <typename T>
class Buffer {
private:
FMT_DISALLOW_COPY_AND_ASSIGN(Buffer);
protected:
T *ptr_;
std::size_t size_;
std::size_t capacity_;
Buffer(T *ptr = FMT_NULL, std::size_t capacity = 0)
: ptr_(ptr), size_(0), capacity_(capacity) {}
/**
\rst
Increases the buffer capacity to hold at least *size* elements updating
``ptr_`` and ``capacity_``.
\endrst
*/
virtual void grow(std::size_t size) = 0;
public:
virtual ~Buffer() {}
/** Returns the size of this buffer. */
std::size_t size() const { return size_; }
/** Returns the capacity of this buffer. */
std::size_t capacity() const { return capacity_; }
/**
Resizes the buffer. If T is a POD type new elements may not be initialized.
*/
void resize(std::size_t new_size) {
if (new_size > capacity_)
grow(new_size);
size_ = new_size;
}
/**
\rst
Reserves space to store at least *capacity* elements.
\endrst
*/
void reserve(std::size_t capacity) {
if (capacity > capacity_)
grow(capacity);
}
void clear() FMT_NOEXCEPT { size_ = 0; }
void push_back(const T &value) {
if (size_ == capacity_)
grow(size_ + 1);
ptr_[size_++] = value;
}
/** Appends data to the end of the buffer. */
template <typename U>
void append(const U *begin, const U *end);
T &operator[](std::size_t index) { return ptr_[index]; }
const T &operator[](std::size_t index) const { return ptr_[index]; }
};
template <typename T>
template <typename U>
void Buffer<T>::append(const U *begin, const U *end) {
FMT_ASSERT(end >= begin, "negative value");
std::size_t new_size = size_ + static_cast<std::size_t>(end - begin);
if (new_size > capacity_)
grow(new_size);
std::uninitialized_copy(begin, end,
internal::make_ptr(ptr_, capacity_) + size_);
size_ = new_size;
}
namespace internal {
// A memory buffer for trivially copyable/constructible types with the first
// SIZE elements stored in the object itself.
template <typename T, std::size_t SIZE, typename Allocator = std::allocator<T> >
class MemoryBuffer : private Allocator, public Buffer<T> {
private:
T data_[SIZE];
// Deallocate memory allocated by the buffer.
void deallocate() {
if (this->ptr_ != data_) Allocator::deallocate(this->ptr_, this->capacity_);
}
protected:
void grow(std::size_t size) FMT_OVERRIDE;
public:
explicit MemoryBuffer(const Allocator &alloc = Allocator())
: Allocator(alloc), Buffer<T>(data_, SIZE) {}
~MemoryBuffer() FMT_OVERRIDE { deallocate(); }
#if FMT_USE_RVALUE_REFERENCES
private:
// Move data from other to this buffer.
void move(MemoryBuffer &other) {
Allocator &this_alloc = *this, &other_alloc = other;
this_alloc = std::move(other_alloc);
this->size_ = other.size_;
this->capacity_ = other.capacity_;
if (other.ptr_ == other.data_) {
this->ptr_ = data_;
std::uninitialized_copy(other.data_, other.data_ + this->size_,
make_ptr(data_, this->capacity_));
} else {
this->ptr_ = other.ptr_;
// Set pointer to the inline array so that delete is not called
// when deallocating.
other.ptr_ = other.data_;
}
}
public:
MemoryBuffer(MemoryBuffer &&other) {
move(other);
}
MemoryBuffer &operator=(MemoryBuffer &&other) {
assert(this != &other);
deallocate();
move(other);
return *this;
}
#endif
// Returns a copy of the allocator associated with this buffer.
Allocator get_allocator() const { return *this; }
};
template <typename T, std::size_t SIZE, typename Allocator>
void MemoryBuffer<T, SIZE, Allocator>::grow(std::size_t size) {
std::size_t new_capacity = this->capacity_ + this->capacity_ / 2;
if (size > new_capacity)
new_capacity = size;
#if FMT_USE_ALLOCATOR_TRAITS
T *new_ptr =
std::allocator_traits<Allocator>::allocate(*this, new_capacity, FMT_NULL);
#else
T *new_ptr = this->allocate(new_capacity, FMT_NULL);
#endif
// The following code doesn't throw, so the raw pointer above doesn't leak.
std::uninitialized_copy(this->ptr_, this->ptr_ + this->size_,
make_ptr(new_ptr, new_capacity));
std::size_t old_capacity = this->capacity_;
T *old_ptr = this->ptr_;
this->capacity_ = new_capacity;
this->ptr_ = new_ptr;
// deallocate may throw (at least in principle), but it doesn't matter since
// the buffer already uses the new storage and will deallocate it in case
// of exception.
if (old_ptr != data_)
Allocator::deallocate(old_ptr, old_capacity);
}
// A fixed-size buffer.
template <typename Char>
class FixedBuffer : public fmt::Buffer<Char> {
public:
FixedBuffer(Char *array, std::size_t size) : fmt::Buffer<Char>(array, size) {}
protected:
FMT_API void grow(std::size_t size) FMT_OVERRIDE;
};
template <typename Char>
class BasicCharTraits {
public:
#if FMT_SECURE_SCL
typedef stdext::checked_array_iterator<Char*> CharPtr;
#else
typedef Char *CharPtr;
#endif
static Char cast(int value) { return static_cast<Char>(value); }
};
template <typename Char>
class CharTraits;
template <>
class CharTraits<char> : public BasicCharTraits<char> {
private:
// Conversion from wchar_t to char is not allowed.
static char convert(wchar_t);
public:
static char convert(char value) { return value; }
// Formats a floating-point number.
template <typename T>
FMT_API static int format_float(char *buffer, std::size_t size,
const char *format, unsigned width, int precision, T value);
};
#if FMT_USE_EXTERN_TEMPLATES
extern template int CharTraits<char>::format_float<double>
(char *buffer, std::size_t size,
const char* format, unsigned width, int precision, double value);
extern template int CharTraits<char>::format_float<long double>
(char *buffer, std::size_t size,
const char* format, unsigned width, int precision, long double value);
#endif
template <>
class CharTraits<wchar_t> : public BasicCharTraits<wchar_t> {
public:
static wchar_t convert(char value) { return value; }
static wchar_t convert(wchar_t value) { return value; }
template <typename T>
FMT_API static int format_float(wchar_t *buffer, std::size_t size,
const wchar_t *format, unsigned width, int precision, T value);
};
#if FMT_USE_EXTERN_TEMPLATES
extern template int CharTraits<wchar_t>::format_float<double>
(wchar_t *buffer, std::size_t size,
const wchar_t* format, unsigned width, int precision, double value);
extern template int CharTraits<wchar_t>::format_float<long double>
(wchar_t *buffer, std::size_t size,
const wchar_t* format, unsigned width, int precision, long double value);
#endif
// Checks if a number is negative - used to avoid warnings.
template <bool IsSigned>
struct SignChecker {
template <typename T>
static bool is_negative(T value) { return value < 0; }
};
template <>
struct SignChecker<false> {
template <typename T>
static bool is_negative(T) { return false; }
};
// Returns true if value is negative, false otherwise.
// Same as (value < 0) but doesn't produce warnings if T is an unsigned type.
template <typename T>
inline bool is_negative(T value) {
return SignChecker<std::numeric_limits<T>::is_signed>::is_negative(value);
}
// Selects uint32_t if FitsIn32Bits is true, uint64_t otherwise.
template <bool FitsIn32Bits>
struct TypeSelector { typedef uint32_t Type; };
template <>
struct TypeSelector<false> { typedef uint64_t Type; };
template <typename T>
struct IntTraits {
// Smallest of uint32_t and uint64_t that is large enough to represent
// all values of T.
typedef typename
TypeSelector<std::numeric_limits<T>::digits <= 32>::Type MainType;
};
FMT_API FMT_NORETURN void report_unknown_type(char code, const char *type);
// Static data is placed in this class template to allow header-only
// configuration.
template <typename T = void>
struct FMT_API BasicData {
static const uint32_t POWERS_OF_10_32[];
static const uint64_t POWERS_OF_10_64[];
static const char DIGITS[];
};
#if FMT_USE_EXTERN_TEMPLATES
extern template struct BasicData<void>;
#endif
typedef BasicData<> Data;
#ifdef FMT_BUILTIN_CLZLL
// Returns the number of decimal digits in n. Leading zeros are not counted
// except for n == 0 in which case count_digits returns 1.
inline unsigned count_digits(uint64_t n) {
// Based on http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
// and the benchmark https://github.com/localvoid/cxx-benchmark-count-digits.
int t = (64 - FMT_BUILTIN_CLZLL(n | 1)) * 1233 >> 12;
return to_unsigned(t) - (n < Data::POWERS_OF_10_64[t]) + 1;
}
#else
// Fallback version of count_digits used when __builtin_clz is not available.
inline unsigned count_digits(uint64_t n) {
unsigned count = 1;
for (;;) {
// Integer division is slow so do it for a group of four digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
if (n < 10) return count;
if (n < 100) return count + 1;
if (n < 1000) return count + 2;
if (n < 10000) return count + 3;
n /= 10000u;
count += 4;
}
}
#endif
#ifdef FMT_BUILTIN_CLZ
// Optional version of count_digits for better performance on 32-bit platforms.
inline unsigned count_digits(uint32_t n) {
int t = (32 - FMT_BUILTIN_CLZ(n | 1)) * 1233 >> 12;
return to_unsigned(t) - (n < Data::POWERS_OF_10_32[t]) + 1;
}
#endif
// A functor that doesn't add a thousands separator.
struct NoThousandsSep {
template <typename Char>
void operator()(Char *) {}
};
// A functor that adds a thousands separator.
class ThousandsSep {
private:
fmt::StringRef sep_;
// Index of a decimal digit with the least significant digit having index 0.
unsigned digit_index_;
public:
explicit ThousandsSep(fmt::StringRef sep) : sep_(sep), digit_index_(0) {}
template <typename Char>
void operator()(Char *&buffer) {
if (++digit_index_ % 3 != 0)
return;
buffer -= sep_.size();
std::uninitialized_copy(sep_.data(), sep_.data() + sep_.size(),
internal::make_ptr(buffer, sep_.size()));
}
};
// Formats a decimal unsigned integer value writing into buffer.
// thousands_sep is a functor that is called after writing each char to
// add a thousands separator if necessary.
template <typename UInt, typename Char, typename ThousandsSep>
inline void format_decimal(Char *buffer, UInt value, unsigned num_digits,
ThousandsSep thousands_sep) {
buffer += num_digits;
while (value >= 100) {
// Integer division is slow so do it for a group of two digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
unsigned index = static_cast<unsigned>((value % 100) * 2);
value /= 100;
*--buffer = Data::DIGITS[index + 1];
thousands_sep(buffer);
*--buffer = Data::DIGITS[index];
thousands_sep(buffer);
}
if (value < 10) {
*--buffer = static_cast<char>('0' + value);
return;
}
unsigned index = static_cast<unsigned>(value * 2);
*--buffer = Data::DIGITS[index + 1];
thousands_sep(buffer);
*--buffer = Data::DIGITS[index];
}
template <typename UInt, typename Char>
inline void format_decimal(Char *buffer, UInt value, unsigned num_digits) {
format_decimal(buffer, value, num_digits, NoThousandsSep());
return;
}
#ifndef _WIN32
# define FMT_USE_WINDOWS_H 0
#elif !defined(FMT_USE_WINDOWS_H)
# define FMT_USE_WINDOWS_H 1
#endif
// Define FMT_USE_WINDOWS_H to 0 to disable use of windows.h.
// All the functionality that relies on it will be disabled too.
#if FMT_USE_WINDOWS_H
// A converter from UTF-8 to UTF-16.
// It is only provided for Windows since other systems support UTF-8 natively.
class UTF8ToUTF16 {
private:
MemoryBuffer<wchar_t, INLINE_BUFFER_SIZE> buffer_;
public:
FMT_API explicit UTF8ToUTF16(StringRef s);
operator WStringRef() const { return WStringRef(&buffer_[0], size()); }
size_t size() const { return buffer_.size() - 1; }
const wchar_t *c_str() const { return &buffer_[0]; }
std::wstring str() const { return std::wstring(&buffer_[0], size()); }
};
// A converter from UTF-16 to UTF-8.
// It is only provided for Windows since other systems support UTF-8 natively.
class UTF16ToUTF8 {
private:
MemoryBuffer<char, INLINE_BUFFER_SIZE> buffer_;
public:
UTF16ToUTF8() {}
FMT_API explicit UTF16ToUTF8(WStringRef s);
operator StringRef() const { return StringRef(&buffer_[0], size()); }
size_t size() const { return buffer_.size() - 1; }
const char *c_str() const { return &buffer_[0]; }
std::string str() const { return std::string(&buffer_[0], size()); }
// Performs conversion returning a system error code instead of
// throwing exception on conversion error. This method may still throw
// in case of memory allocation error.
FMT_API int convert(WStringRef s);
};
FMT_API void format_windows_error(fmt::Writer &out, int error_code,
fmt::StringRef message) FMT_NOEXCEPT;
#endif
// A formatting argument value.
struct Value {
template <typename Char>
struct StringValue {
const Char *value;
std::size_t size;
};
typedef void (*FormatFunc)(
void *formatter, const void *arg, void *format_str_ptr);
struct CustomValue {
const void *value;
FormatFunc format;
};
union {
int int_value;
unsigned uint_value;
LongLong long_long_value;
ULongLong ulong_long_value;
double double_value;
long double long_double_value;
const void *pointer;
StringValue<char> string;
StringValue<signed char> sstring;
StringValue<unsigned char> ustring;
StringValue<wchar_t> wstring;
CustomValue custom;
};
enum Type {
NONE, NAMED_ARG,
// Integer types should go first,
INT, UINT, LONG_LONG, ULONG_LONG, BOOL, CHAR, LAST_INTEGER_TYPE = CHAR,
// followed by floating-point types.
DOUBLE, LONG_DOUBLE, LAST_NUMERIC_TYPE = LONG_DOUBLE,
CSTRING, STRING, WSTRING, POINTER, CUSTOM
};
};
// A formatting argument. It is a trivially copyable/constructible type to
// allow storage in internal::MemoryBuffer.
struct Arg : Value {
Type type;
};
template <typename Char>
struct NamedArg;
template <typename Char, typename T>
struct NamedArgWithType;
template <typename T = void>
struct Null {};
// A helper class template to enable or disable overloads taking wide
// characters and strings in MakeValue.
template <typename T, typename Char>
struct WCharHelper {
typedef Null<T> Supported;
typedef T Unsupported;
};
template <typename T>
struct WCharHelper<T, wchar_t> {
typedef T Supported;
typedef Null<T> Unsupported;
};
typedef char Yes[1];
typedef char No[2];
template <typename T>
T &get();
// These are non-members to workaround an overload resolution bug in bcc32.
Yes &convert(fmt::ULongLong);
No &convert(...);
template <typename T, bool ENABLE_CONVERSION>
struct ConvertToIntImpl {
enum { value = ENABLE_CONVERSION };
};
template <typename T, bool ENABLE_CONVERSION>
struct ConvertToIntImpl2 {
enum { value = false };
};
template <typename T>
struct ConvertToIntImpl2<T, true> {
enum {
// Don't convert numeric types.
value = ConvertToIntImpl<T, !std::numeric_limits<T>::is_specialized>::value
};
};
template <typename T>
struct ConvertToInt {
enum {
enable_conversion = sizeof(fmt::internal::convert(get<T>())) == sizeof(Yes)
};
enum { value = ConvertToIntImpl2<T, enable_conversion>::value };
};
#define FMT_DISABLE_CONVERSION_TO_INT(Type) \
template <> \
struct ConvertToInt<Type> { enum { value = 0 }; }
// Silence warnings about convering float to int.
FMT_DISABLE_CONVERSION_TO_INT(float);
FMT_DISABLE_CONVERSION_TO_INT(double);
FMT_DISABLE_CONVERSION_TO_INT(long double);
template <bool B, class T = void>
struct EnableIf {};
template <class T>
struct EnableIf<true, T> { typedef T type; };
template <bool B, class T, class F>
struct Conditional { typedef T type; };
template <class T, class F>
struct Conditional<false, T, F> { typedef F type; };
// For bcc32 which doesn't understand ! in template arguments.
template <bool>
struct Not { enum { value = 0 }; };
template <>
struct Not<false> { enum { value = 1 }; };
template <typename T>
struct FalseType { enum { value = 0 }; };
template <typename T, T> struct LConvCheck {
LConvCheck(int) {}
};
// Returns the thousands separator for the current locale.
// We check if ``lconv`` contains ``thousands_sep`` because on Android
// ``lconv`` is stubbed as an empty struct.
template <typename LConv>
inline StringRef thousands_sep(
LConv *lc, LConvCheck<char *LConv::*, &LConv::thousands_sep> = 0) {
return lc->thousands_sep;
}
inline fmt::StringRef thousands_sep(...) { return ""; }
#define FMT_CONCAT(a, b) a##b
#if FMT_GCC_VERSION >= 303
# define FMT_UNUSED __attribute__((unused))
#else
# define FMT_UNUSED
#endif
#ifndef FMT_USE_STATIC_ASSERT
# define FMT_USE_STATIC_ASSERT 0
#endif
#if FMT_USE_STATIC_ASSERT || FMT_HAS_FEATURE(cxx_static_assert) || \
(FMT_GCC_VERSION >= 403 && FMT_HAS_GXX_CXX11) || _MSC_VER >= 1600
# define FMT_STATIC_ASSERT(cond, message) static_assert(cond, message)
#else
# define FMT_CONCAT_(a, b) FMT_CONCAT(a, b)
# define FMT_STATIC_ASSERT(cond, message) \
typedef int FMT_CONCAT_(Assert, __LINE__)[(cond) ? 1 : -1] FMT_UNUSED
#endif
template <typename Formatter>
void format_arg(Formatter&, ...) {
FMT_STATIC_ASSERT(FalseType<Formatter>::value,
"Cannot format argument. To enable the use of ostream "
"operator<< include fmt/ostream.h. Otherwise provide "
"an overload of format_arg.");
}
// Makes an Arg object from any type.
template <typename Formatter>
class MakeValue : public Arg {
public:
typedef typename Formatter::Char Char;
private:
// The following two methods are private to disallow formatting of
// arbitrary pointers. If you want to output a pointer cast it to
// "void *" or "const void *". In particular, this forbids formatting
// of "[const] volatile char *" which is printed as bool by iostreams.
// Do not implement!
template <typename T>
MakeValue(const T *value);
template <typename T>
MakeValue(T *value);
// The following methods are private to disallow formatting of wide
// characters and strings into narrow strings as in
// fmt::format("{}", L"test");
// To fix this, use a wide format string: fmt::format(L"{}", L"test").
#if !FMT_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED)
MakeValue(typename WCharHelper<wchar_t, Char>::Unsupported);
#endif
MakeValue(typename WCharHelper<wchar_t *, Char>::Unsupported);
MakeValue(typename WCharHelper<const wchar_t *, Char>::Unsupported);
MakeValue(typename WCharHelper<const std::wstring &, Char>::Unsupported);
#if FMT_HAS_STRING_VIEW
MakeValue(typename WCharHelper<const std::wstring_view &, Char>::Unsupported);
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
MakeValue(typename WCharHelper<const std::experimental::wstring_view &, Char>::Unsupported);
#endif
MakeValue(typename WCharHelper<WStringRef, Char>::Unsupported);
void set_string(StringRef str) {
string.value = str.data();
string.size = str.size();
}
void set_string(WStringRef str) {
wstring.value = str.data();
wstring.size = str.size();
}
// Formats an argument of a custom type, such as a user-defined class.
template <typename T>
static void format_custom_arg(
void *formatter, const void *arg, void *format_str_ptr) {
format_arg(*static_cast<Formatter*>(formatter),
*static_cast<const Char**>(format_str_ptr),
*static_cast<const T*>(arg));
}
public:
MakeValue() {}
#define FMT_MAKE_VALUE_(Type, field, TYPE, rhs) \
MakeValue(Type value) { field = rhs; } \
static uint64_t type(Type) { return Arg::TYPE; }
#define FMT_MAKE_VALUE(Type, field, TYPE) \
FMT_MAKE_VALUE_(Type, field, TYPE, value)
FMT_MAKE_VALUE(bool, int_value, BOOL)
FMT_MAKE_VALUE(short, int_value, INT)
FMT_MAKE_VALUE(unsigned short, uint_value, UINT)
FMT_MAKE_VALUE(int, int_value, INT)
FMT_MAKE_VALUE(unsigned, uint_value, UINT)
MakeValue(long value) {
// To minimize the number of types we need to deal with, long is
// translated either to int or to long long depending on its size.
if (const_check(sizeof(long) == sizeof(int)))
int_value = static_cast<int>(value);
else
long_long_value = value;
}
static uint64_t type(long) {
return sizeof(long) == sizeof(int) ? Arg::INT : Arg::LONG_LONG;
}
MakeValue(unsigned long value) {
if (const_check(sizeof(unsigned long) == sizeof(unsigned)))
uint_value = static_cast<unsigned>(value);
else
ulong_long_value = value;
}
static uint64_t type(unsigned long) {
return sizeof(unsigned long) == sizeof(unsigned) ?
Arg::UINT : Arg::ULONG_LONG;
}
FMT_MAKE_VALUE(LongLong, long_long_value, LONG_LONG)
FMT_MAKE_VALUE(ULongLong, ulong_long_value, ULONG_LONG)
FMT_MAKE_VALUE(float, double_value, DOUBLE)
FMT_MAKE_VALUE(double, double_value, DOUBLE)
FMT_MAKE_VALUE(long double, long_double_value, LONG_DOUBLE)
FMT_MAKE_VALUE(signed char, int_value, INT)
FMT_MAKE_VALUE(unsigned char, uint_value, UINT)
FMT_MAKE_VALUE(char, int_value, CHAR)
#if __cplusplus >= 201103L
template <
typename T,
typename = typename std::enable_if<
std::is_enum<T>::value && ConvertToInt<T>::value>::type>
MakeValue(T value) { int_value = value; }
template <
typename T,
typename = typename std::enable_if<
std::is_enum<T>::value && ConvertToInt<T>::value>::type>
static uint64_t type(T) { return Arg::INT; }
#endif
#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
MakeValue(typename WCharHelper<wchar_t, Char>::Supported value) {
int_value = value;
}
static uint64_t type(wchar_t) { return Arg::CHAR; }
#endif
#define FMT_MAKE_STR_VALUE(Type, TYPE) \
MakeValue(Type value) { set_string(value); } \
static uint64_t type(Type) { return Arg::TYPE; }
FMT_MAKE_VALUE(char *, string.value, CSTRING)
FMT_MAKE_VALUE(const char *, string.value, CSTRING)
FMT_MAKE_VALUE(signed char *, sstring.value, CSTRING)
FMT_MAKE_VALUE(const signed char *, sstring.value, CSTRING)
FMT_MAKE_VALUE(unsigned char *, ustring.value, CSTRING)
FMT_MAKE_VALUE(const unsigned char *, ustring.value, CSTRING)
FMT_MAKE_STR_VALUE(const std::string &, STRING)
#if FMT_HAS_STRING_VIEW
FMT_MAKE_STR_VALUE(const std::string_view &, STRING)
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
FMT_MAKE_STR_VALUE(const std::experimental::string_view &, STRING)
#endif
FMT_MAKE_STR_VALUE(StringRef, STRING)
FMT_MAKE_VALUE_(CStringRef, string.value, CSTRING, value.c_str())
#define FMT_MAKE_WSTR_VALUE(Type, TYPE) \
MakeValue(typename WCharHelper<Type, Char>::Supported value) { \
set_string(value); \
} \
static uint64_t type(Type) { return Arg::TYPE; }
FMT_MAKE_WSTR_VALUE(wchar_t *, WSTRING)
FMT_MAKE_WSTR_VALUE(const wchar_t *, WSTRING)
FMT_MAKE_WSTR_VALUE(const std::wstring &, WSTRING)
#if FMT_HAS_STRING_VIEW
FMT_MAKE_WSTR_VALUE(const std::wstring_view &, WSTRING)
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
FMT_MAKE_WSTR_VALUE(const std::experimental::wstring_view &, WSTRING)
#endif
FMT_MAKE_WSTR_VALUE(WStringRef, WSTRING)
FMT_MAKE_VALUE(void *, pointer, POINTER)
FMT_MAKE_VALUE(const void *, pointer, POINTER)
template <typename T>
MakeValue(const T &value,
typename EnableIf<Not<
ConvertToInt<T>::value>::value, int>::type = 0) {
custom.value = &value;
custom.format = &format_custom_arg<T>;
}
template <typename T>
static typename EnableIf<Not<ConvertToInt<T>::value>::value, uint64_t>::type
type(const T &) {
return Arg::CUSTOM;
}
// Additional template param `Char_` is needed here because make_type always
// uses char.
template <typename Char_>
MakeValue(const NamedArg<Char_> &value) { pointer = &value; }
template <typename Char_, typename T>
MakeValue(const NamedArgWithType<Char_, T> &value) { pointer = &value; }
template <typename Char_>
static uint64_t type(const NamedArg<Char_> &) { return Arg::NAMED_ARG; }
template <typename Char_, typename T>
static uint64_t type(const NamedArgWithType<Char_, T> &) { return Arg::NAMED_ARG; }
};
template <typename Formatter>
class MakeArg : public Arg {
public:
MakeArg() {
type = Arg::NONE;
}
template <typename T>
MakeArg(const T &value)
: Arg(MakeValue<Formatter>(value)) {
type = static_cast<Arg::Type>(MakeValue<Formatter>::type(value));
}
};
template <typename Char>
struct NamedArg : Arg {
BasicStringRef<Char> name;
template <typename T>
NamedArg(BasicStringRef<Char> argname, const T &value)
: Arg(MakeArg< BasicFormatter<Char> >(value)), name(argname) {}
};
template <typename Char, typename T>
struct NamedArgWithType : NamedArg<Char> {
NamedArgWithType(BasicStringRef<Char> argname, const T &value)
: NamedArg<Char>(argname, value) {}
};
class RuntimeError : public std::runtime_error {
protected:
RuntimeError() : std::runtime_error("") {}
RuntimeError(const RuntimeError &rerr) : std::runtime_error(rerr) {}
FMT_API ~RuntimeError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE;
};
template <typename Char>
class ArgMap;
} // namespace internal
/** An argument list. */
class ArgList {
private:
// To reduce compiled code size per formatting function call, types of first
// MAX_PACKED_ARGS arguments are passed in the types_ field.
uint64_t types_;
union {
// If the number of arguments is less than MAX_PACKED_ARGS, the argument
// values are stored in values_, otherwise they are stored in args_.
// This is done to reduce compiled code size as storing larger objects
// may require more code (at least on x86-64) even if the same amount of
// data is actually copied to stack. It saves ~10% on the bloat test.
const internal::Value *values_;
const internal::Arg *args_;
};
internal::Arg::Type type(unsigned index) const {
return type(types_, index);
}
template <typename Char>
friend class internal::ArgMap;
public:
// Maximum number of arguments with packed types.
enum { MAX_PACKED_ARGS = 16 };
ArgList() : types_(0) {}
ArgList(ULongLong types, const internal::Value *values)
: types_(types), values_(values) {}
ArgList(ULongLong types, const internal::Arg *args)
: types_(types), args_(args) {}
uint64_t types() const { return types_; }
/** Returns the argument at specified index. */
internal::Arg operator[](unsigned index) const {
using internal::Arg;
Arg arg;
bool use_values = type(MAX_PACKED_ARGS - 1) == Arg::NONE;
if (index < MAX_PACKED_ARGS) {
Arg::Type arg_type = type(index);
internal::Value &val = arg;
if (arg_type != Arg::NONE)
val = use_values ? values_[index] : args_[index];
arg.type = arg_type;
return arg;
}
if (use_values) {
// The index is greater than the number of arguments that can be stored
// in values, so return a "none" argument.
arg.type = Arg::NONE;
return arg;
}
for (unsigned i = MAX_PACKED_ARGS; i <= index; ++i) {
if (args_[i].type == Arg::NONE)
return args_[i];
}
return args_[index];
}
static internal::Arg::Type type(uint64_t types, unsigned index) {
unsigned shift = index * 4;
uint64_t mask = 0xf;
return static_cast<internal::Arg::Type>(
(types & (mask << shift)) >> shift);
}
};
#define FMT_DISPATCH(call) static_cast<Impl*>(this)->call
/**
\rst
An argument visitor based on the `curiously recurring template pattern
<http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern>`_.
To use `~fmt::ArgVisitor` define a subclass that implements some or all of the
visit methods with the same signatures as the methods in `~fmt::ArgVisitor`,
for example, `~fmt::ArgVisitor::visit_int()`.
Pass the subclass as the *Impl* template parameter. Then calling
`~fmt::ArgVisitor::visit` for some argument will dispatch to a visit method
specific to the argument type. For example, if the argument type is
``double`` then the `~fmt::ArgVisitor::visit_double()` method of a subclass
will be called. If the subclass doesn't contain a method with this signature,
then a corresponding method of `~fmt::ArgVisitor` will be called.
**Example**::
class MyArgVisitor : public fmt::ArgVisitor<MyArgVisitor, void> {
public:
void visit_int(int value) { fmt::print("{}", value); }
void visit_double(double value) { fmt::print("{}", value ); }
};
\endrst
*/
template <typename Impl, typename Result>
class ArgVisitor {
private:
typedef internal::Arg Arg;
public:
void report_unhandled_arg() {}
Result visit_unhandled_arg() {
FMT_DISPATCH(report_unhandled_arg());
return Result();
}
/** Visits an ``int`` argument. **/
Result visit_int(int value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits a ``long long`` argument. **/
Result visit_long_long(LongLong value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits an ``unsigned`` argument. **/
Result visit_uint(unsigned value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits an ``unsigned long long`` argument. **/
Result visit_ulong_long(ULongLong value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits a ``bool`` argument. **/
Result visit_bool(bool value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits a ``char`` or ``wchar_t`` argument. **/
Result visit_char(int value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits an argument of any integral type. **/
template <typename T>
Result visit_any_int(T) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a ``double`` argument. **/
Result visit_double(double value) {
return FMT_DISPATCH(visit_any_double(value));
}
/** Visits a ``long double`` argument. **/
Result visit_long_double(long double value) {
return FMT_DISPATCH(visit_any_double(value));
}
/** Visits a ``double`` or ``long double`` argument. **/
template <typename T>
Result visit_any_double(T) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a null-terminated C string (``const char *``) argument. **/
Result visit_cstring(const char *) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a string argument. **/
Result visit_string(Arg::StringValue<char>) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a wide string argument. **/
Result visit_wstring(Arg::StringValue<wchar_t>) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a pointer argument. **/
Result visit_pointer(const void *) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits an argument of a custom (user-defined) type. **/
Result visit_custom(Arg::CustomValue) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/**
\rst
Visits an argument dispatching to the appropriate visit method based on
the argument type. For example, if the argument type is ``double`` then
the `~fmt::ArgVisitor::visit_double()` method of the *Impl* class will be
called.
\endrst
*/
Result visit(const Arg &arg) {
switch (arg.type) {
case Arg::NONE:
case Arg::NAMED_ARG:
FMT_ASSERT(false, "invalid argument type");
break;
case Arg::INT:
return FMT_DISPATCH(visit_int(arg.int_value));
case Arg::UINT:
return FMT_DISPATCH(visit_uint(arg.uint_value));
case Arg::LONG_LONG:
return FMT_DISPATCH(visit_long_long(arg.long_long_value));
case Arg::ULONG_LONG:
return FMT_DISPATCH(visit_ulong_long(arg.ulong_long_value));
case Arg::BOOL:
return FMT_DISPATCH(visit_bool(arg.int_value != 0));
case Arg::CHAR:
return FMT_DISPATCH(visit_char(arg.int_value));
case Arg::DOUBLE:
return FMT_DISPATCH(visit_double(arg.double_value));
case Arg::LONG_DOUBLE:
return FMT_DISPATCH(visit_long_double(arg.long_double_value));
case Arg::CSTRING:
return FMT_DISPATCH(visit_cstring(arg.string.value));
case Arg::STRING:
return FMT_DISPATCH(visit_string(arg.string));
case Arg::WSTRING:
return FMT_DISPATCH(visit_wstring(arg.wstring));
case Arg::POINTER:
return FMT_DISPATCH(visit_pointer(arg.pointer));
case Arg::CUSTOM:
return FMT_DISPATCH(visit_custom(arg.custom));
}
return Result();
}
};
enum Alignment {
ALIGN_DEFAULT, ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER, ALIGN_NUMERIC
};
// Flags.
enum {
SIGN_FLAG = 1, PLUS_FLAG = 2, MINUS_FLAG = 4, HASH_FLAG = 8,
CHAR_FLAG = 0x10 // Argument has char type - used in error reporting.
};
// An empty format specifier.
struct EmptySpec {};
// A type specifier.
template <char TYPE>
struct TypeSpec : EmptySpec {
Alignment align() const { return ALIGN_DEFAULT; }
unsigned width() const { return 0; }
int precision() const { return -1; }
bool flag(unsigned) const { return false; }
char type() const { return TYPE; }
char type_prefix() const { return TYPE; }
char fill() const { return ' '; }
};
// A width specifier.
struct WidthSpec {
unsigned width_;
// Fill is always wchar_t and cast to char if necessary to avoid having
// two specialization of WidthSpec and its subclasses.
wchar_t fill_;
WidthSpec(unsigned width, wchar_t fill) : width_(width), fill_(fill) {}
unsigned width() const { return width_; }
wchar_t fill() const { return fill_; }
};
// An alignment specifier.
struct AlignSpec : WidthSpec {
Alignment align_;
AlignSpec(unsigned width, wchar_t fill, Alignment align = ALIGN_DEFAULT)
: WidthSpec(width, fill), align_(align) {}
Alignment align() const { return align_; }
int precision() const { return -1; }
};
// An alignment and type specifier.
template <char TYPE>
struct AlignTypeSpec : AlignSpec {
AlignTypeSpec(unsigned width, wchar_t fill) : AlignSpec(width, fill) {}
bool flag(unsigned) const { return false; }
char type() const { return TYPE; }
char type_prefix() const { return TYPE; }
};
// A full format specifier.
struct FormatSpec : AlignSpec {
unsigned flags_;
int precision_;
char type_;
FormatSpec(
unsigned width = 0, char type = 0, wchar_t fill = ' ')
: AlignSpec(width, fill), flags_(0), precision_(-1), type_(type) {}
bool flag(unsigned f) const { return (flags_ & f) != 0; }
int precision() const { return precision_; }
char type() const { return type_; }
char type_prefix() const { return type_; }
};
// An integer format specifier.
template <typename T, typename SpecT = TypeSpec<0>, typename Char = char>
class IntFormatSpec : public SpecT {
private:
T value_;
public:
IntFormatSpec(T val, const SpecT &spec = SpecT())
: SpecT(spec), value_(val) {}
T value() const { return value_; }
};
// A string format specifier.
template <typename Char>
class StrFormatSpec : public AlignSpec {
private:
const Char *str_;
public:
template <typename FillChar>
StrFormatSpec(const Char *str, unsigned width, FillChar fill)
: AlignSpec(width, fill), str_(str) {
internal::CharTraits<Char>::convert(FillChar());
}
const Char *str() const { return str_; }
};
/**
Returns an integer format specifier to format the value in base 2.
*/
IntFormatSpec<int, TypeSpec<'b'> > bin(int value);
/**
Returns an integer format specifier to format the value in base 8.
*/
IntFormatSpec<int, TypeSpec<'o'> > oct(int value);
/**
Returns an integer format specifier to format the value in base 16 using
lower-case letters for the digits above 9.
*/
IntFormatSpec<int, TypeSpec<'x'> > hex(int value);
/**
Returns an integer formatter format specifier to format in base 16 using
upper-case letters for the digits above 9.
*/
IntFormatSpec<int, TypeSpec<'X'> > hexu(int value);
/**
\rst
Returns an integer format specifier to pad the formatted argument with the
fill character to the specified width using the default (right) numeric
alignment.
**Example**::
MemoryWriter out;
out << pad(hex(0xcafe), 8, '0');
// out.str() == "0000cafe"
\endrst
*/
template <char TYPE_CODE, typename Char>
IntFormatSpec<int, AlignTypeSpec<TYPE_CODE>, Char> pad(
int value, unsigned width, Char fill = ' ');
#define FMT_DEFINE_INT_FORMATTERS(TYPE) \
inline IntFormatSpec<TYPE, TypeSpec<'b'> > bin(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'b'> >(value, TypeSpec<'b'>()); \
} \
\
inline IntFormatSpec<TYPE, TypeSpec<'o'> > oct(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'o'> >(value, TypeSpec<'o'>()); \
} \
\
inline IntFormatSpec<TYPE, TypeSpec<'x'> > hex(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'x'> >(value, TypeSpec<'x'>()); \
} \
\
inline IntFormatSpec<TYPE, TypeSpec<'X'> > hexu(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'X'> >(value, TypeSpec<'X'>()); \
} \
\
template <char TYPE_CODE> \
inline IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE> > pad( \
IntFormatSpec<TYPE, TypeSpec<TYPE_CODE> > f, unsigned width) { \
return IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE> >( \
f.value(), AlignTypeSpec<TYPE_CODE>(width, ' ')); \
} \
\
/* For compatibility with older compilers we provide two overloads for pad, */ \
/* one that takes a fill character and one that doesn't. In the future this */ \
/* can be replaced with one overload making the template argument Char */ \
/* default to char (C++11). */ \
template <char TYPE_CODE, typename Char> \
inline IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE>, Char> pad( \
IntFormatSpec<TYPE, TypeSpec<TYPE_CODE>, Char> f, \
unsigned width, Char fill) { \
return IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE>, Char>( \
f.value(), AlignTypeSpec<TYPE_CODE>(width, fill)); \
} \
\
inline IntFormatSpec<TYPE, AlignTypeSpec<0> > pad( \
TYPE value, unsigned width) { \
return IntFormatSpec<TYPE, AlignTypeSpec<0> >( \
value, AlignTypeSpec<0>(width, ' ')); \
} \
\
template <typename Char> \
inline IntFormatSpec<TYPE, AlignTypeSpec<0>, Char> pad( \
TYPE value, unsigned width, Char fill) { \
return IntFormatSpec<TYPE, AlignTypeSpec<0>, Char>( \
value, AlignTypeSpec<0>(width, fill)); \
}
FMT_DEFINE_INT_FORMATTERS(int)
FMT_DEFINE_INT_FORMATTERS(long)
FMT_DEFINE_INT_FORMATTERS(unsigned)
FMT_DEFINE_INT_FORMATTERS(unsigned long)
FMT_DEFINE_INT_FORMATTERS(LongLong)
FMT_DEFINE_INT_FORMATTERS(ULongLong)
/**
\rst
Returns a string formatter that pads the formatted argument with the fill
character to the specified width using the default (left) string alignment.
**Example**::
std::string s = str(MemoryWriter() << pad("abc", 8));
// s == "abc "
\endrst
*/
template <typename Char>
inline StrFormatSpec<Char> pad(
const Char *str, unsigned width, Char fill = ' ') {
return StrFormatSpec<Char>(str, width, fill);
}
inline StrFormatSpec<wchar_t> pad(
const wchar_t *str, unsigned width, char fill = ' ') {
return StrFormatSpec<wchar_t>(str, width, fill);
}
namespace internal {
template <typename Char>
class ArgMap {
private:
typedef std::vector<
std::pair<fmt::BasicStringRef<Char>, internal::Arg> > MapType;
typedef typename MapType::value_type Pair;
MapType map_;
public:
void init(const ArgList &args);
const internal::Arg *find(const fmt::BasicStringRef<Char> &name) const {
// The list is unsorted, so just return the first matching name.
for (typename MapType::const_iterator it = map_.begin(), end = map_.end();
it != end; ++it) {
if (it->first == name)
return &it->second;
}
return FMT_NULL;
}
};
template <typename Char>
void ArgMap<Char>::init(const ArgList &args) {
if (!map_.empty())
return;
typedef internal::NamedArg<Char> NamedArg;
const NamedArg *named_arg = FMT_NULL;
bool use_values =
args.type(ArgList::MAX_PACKED_ARGS - 1) == internal::Arg::NONE;
if (use_values) {
for (unsigned i = 0;/*nothing*/; ++i) {
internal::Arg::Type arg_type = args.type(i);
switch (arg_type) {
case internal::Arg::NONE:
return;
case internal::Arg::NAMED_ARG:
named_arg = static_cast<const NamedArg*>(args.values_[i].pointer);
map_.push_back(Pair(named_arg->name, *named_arg));
break;
default:
/*nothing*/;
}
}
return;
}
for (unsigned i = 0; i != ArgList::MAX_PACKED_ARGS; ++i) {
internal::Arg::Type arg_type = args.type(i);
if (arg_type == internal::Arg::NAMED_ARG) {
named_arg = static_cast<const NamedArg*>(args.args_[i].pointer);
map_.push_back(Pair(named_arg->name, *named_arg));
}
}
for (unsigned i = ArgList::MAX_PACKED_ARGS;/*nothing*/; ++i) {
switch (args.args_[i].type) {
case internal::Arg::NONE:
return;
case internal::Arg::NAMED_ARG:
named_arg = static_cast<const NamedArg*>(args.args_[i].pointer);
map_.push_back(Pair(named_arg->name, *named_arg));
break;
default:
/*nothing*/;
}
}
}
template <typename Impl, typename Char, typename Spec = fmt::FormatSpec>
class ArgFormatterBase : public ArgVisitor<Impl, void> {
private:
BasicWriter<Char> &writer_;
Spec &spec_;
FMT_DISALLOW_COPY_AND_ASSIGN(ArgFormatterBase);
void write_pointer(const void *p) {
spec_.flags_ = HASH_FLAG;
spec_.type_ = 'x';
writer_.write_int(reinterpret_cast<uintptr_t>(p), spec_);
}
// workaround MSVC two-phase lookup issue
typedef internal::Arg Arg;
protected:
BasicWriter<Char> &writer() { return writer_; }
Spec &spec() { return spec_; }
void write(bool value) {
const char *str_value = value ? "true" : "false";
Arg::StringValue<char> str = { str_value, std::strlen(str_value) };
writer_.write_str(str, spec_);
}
void write(const char *value) {
Arg::StringValue<char> str = {value, value ? std::strlen(value) : 0};
writer_.write_str(str, spec_);
}
public:
typedef Spec SpecType;
ArgFormatterBase(BasicWriter<Char> &w, Spec &s)
: writer_(w), spec_(s) {}
template <typename T>
void visit_any_int(T value) { writer_.write_int(value, spec_); }
template <typename T>
void visit_any_double(T value) { writer_.write_double(value, spec_); }
void visit_bool(bool value) {
if (spec_.type_) {
visit_any_int(value);
return;
}
write(value);
}
void visit_char(int value) {
if (spec_.type_ && spec_.type_ != 'c') {
spec_.flags_ |= CHAR_FLAG;
writer_.write_int(value, spec_);
return;
}
if (spec_.align_ == ALIGN_NUMERIC || spec_.flags_ != 0)
FMT_THROW(FormatError("invalid format specifier for char"));
typedef typename BasicWriter<Char>::CharPtr CharPtr;
Char fill = internal::CharTraits<Char>::cast(spec_.fill());
CharPtr out = CharPtr();
const unsigned CHAR_SIZE = 1;
if (spec_.width_ > CHAR_SIZE) {
out = writer_.grow_buffer(spec_.width_);
if (spec_.align_ == ALIGN_RIGHT) {
std::uninitialized_fill_n(out, spec_.width_ - CHAR_SIZE, fill);
out += spec_.width_ - CHAR_SIZE;
} else if (spec_.align_ == ALIGN_CENTER) {
out = writer_.fill_padding(out, spec_.width_,
internal::const_check(CHAR_SIZE), fill);
} else {
std::uninitialized_fill_n(out + CHAR_SIZE,
spec_.width_ - CHAR_SIZE, fill);
}
} else {
out = writer_.grow_buffer(CHAR_SIZE);
}
*out = internal::CharTraits<Char>::cast(value);
}
void visit_cstring(const char *value) {
if (spec_.type_ == 'p')
return write_pointer(value);
write(value);
}
// Qualification with "internal" here and below is a workaround for nvcc.
void visit_string(internal::Arg::StringValue<char> value) {
writer_.write_str(value, spec_);
}
using ArgVisitor<Impl, void>::visit_wstring;
void visit_wstring(internal::Arg::StringValue<Char> value) {
writer_.write_str(value, spec_);
}
void visit_pointer(const void *value) {
if (spec_.type_ && spec_.type_ != 'p')
report_unknown_type(spec_.type_, "pointer");
write_pointer(value);
}
};
class FormatterBase {
private:
ArgList args_;
int next_arg_index_;
// Returns the argument with specified index.
FMT_API Arg do_get_arg(unsigned arg_index, const char *&error);
protected:
const ArgList &args() const { return args_; }
explicit FormatterBase(const ArgList &args) {
args_ = args;
next_arg_index_ = 0;
}
// Returns the next argument.
Arg next_arg(const char *&error) {
if (next_arg_index_ >= 0)
return do_get_arg(internal::to_unsigned(next_arg_index_++), error);
error = "cannot switch from manual to automatic argument indexing";
return Arg();
}
// Checks if manual indexing is used and returns the argument with
// specified index.
Arg get_arg(unsigned arg_index, const char *&error) {
return check_no_auto_index(error) ? do_get_arg(arg_index, error) : Arg();
}
bool check_no_auto_index(const char *&error) {
if (next_arg_index_ > 0) {
error = "cannot switch from automatic to manual argument indexing";
return false;
}
next_arg_index_ = -1;
return true;
}
template <typename Char>
void write(BasicWriter<Char> &w, const Char *start, const Char *end) {
if (start != end)
w << BasicStringRef<Char>(start, internal::to_unsigned(end - start));
}
};
} // namespace internal
/**
\rst
An argument formatter based on the `curiously recurring template pattern
<http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern>`_.
To use `~fmt::BasicArgFormatter` define a subclass that implements some or
all of the visit methods with the same signatures as the methods in
`~fmt::ArgVisitor`, for example, `~fmt::ArgVisitor::visit_int()`.
Pass the subclass as the *Impl* template parameter. When a formatting
function processes an argument, it will dispatch to a visit method
specific to the argument type. For example, if the argument type is
``double`` then the `~fmt::ArgVisitor::visit_double()` method of a subclass
will be called. If the subclass doesn't contain a method with this signature,
then a corresponding method of `~fmt::BasicArgFormatter` or its superclass
will be called.
\endrst
*/
template <typename Impl, typename Char, typename Spec = fmt::FormatSpec>
class BasicArgFormatter : public internal::ArgFormatterBase<Impl, Char, Spec> {
private:
BasicFormatter<Char, Impl> &formatter_;
const Char *format_;
public:
/**
\rst
Constructs an argument formatter object.
*formatter* is a reference to the main formatter object, *spec* contains
format specifier information for standard argument types, and *fmt* points
to the part of the format string being parsed for custom argument types.
\endrst
*/
BasicArgFormatter(BasicFormatter<Char, Impl> &formatter,
Spec &spec, const Char *fmt)
: internal::ArgFormatterBase<Impl, Char, Spec>(formatter.writer(), spec),
formatter_(formatter), format_(fmt) {}
/** Formats an argument of a custom (user-defined) type. */
void visit_custom(internal::Arg::CustomValue c) {
c.format(&formatter_, c.value, &format_);
}
};
/** The default argument formatter. */
template <typename Char>
class ArgFormatter :
public BasicArgFormatter<ArgFormatter<Char>, Char, FormatSpec> {
public:
/** Constructs an argument formatter object. */
ArgFormatter(BasicFormatter<Char> &formatter,
FormatSpec &spec, const Char *fmt)
: BasicArgFormatter<ArgFormatter<Char>,
Char, FormatSpec>(formatter, spec, fmt) {}
};
/** This template formats data and writes the output to a writer. */
template <typename CharType, typename ArgFormatter>
class BasicFormatter : private internal::FormatterBase {
public:
/** The character type for the output. */
typedef CharType Char;
private:
BasicWriter<Char> &writer_;
internal::ArgMap<Char> map_;
FMT_DISALLOW_COPY_AND_ASSIGN(BasicFormatter);
using internal::FormatterBase::get_arg;
// Checks if manual indexing is used and returns the argument with
// specified name.
internal::Arg get_arg(BasicStringRef<Char> arg_name, const char *&error);
// Parses argument index and returns corresponding argument.
internal::Arg parse_arg_index(const Char *&s);
// Parses argument name and returns corresponding argument.
internal::Arg parse_arg_name(const Char *&s);
public:
/**
\rst
Constructs a ``BasicFormatter`` object. References to the arguments and
the writer are stored in the formatter object so make sure they have
appropriate lifetimes.
\endrst
*/
BasicFormatter(const ArgList &args, BasicWriter<Char> &w)
: internal::FormatterBase(args), writer_(w) {}
/** Returns a reference to the writer associated with this formatter. */
BasicWriter<Char> &writer() { return writer_; }
/** Formats stored arguments and writes the output to the writer. */
void format(BasicCStringRef<Char> format_str);
// Formats a single argument and advances format_str, a format string pointer.
const Char *format(const Char *&format_str, const internal::Arg &arg);
};
// Generates a comma-separated list with results of applying f to
// numbers 0..n-1.
# define FMT_GEN(n, f) FMT_GEN##n(f)
# define FMT_GEN1(f) f(0)
# define FMT_GEN2(f) FMT_GEN1(f), f(1)
# define FMT_GEN3(f) FMT_GEN2(f), f(2)
# define FMT_GEN4(f) FMT_GEN3(f), f(3)
# define FMT_GEN5(f) FMT_GEN4(f), f(4)
# define FMT_GEN6(f) FMT_GEN5(f), f(5)
# define FMT_GEN7(f) FMT_GEN6(f), f(6)
# define FMT_GEN8(f) FMT_GEN7(f), f(7)
# define FMT_GEN9(f) FMT_GEN8(f), f(8)
# define FMT_GEN10(f) FMT_GEN9(f), f(9)
# define FMT_GEN11(f) FMT_GEN10(f), f(10)
# define FMT_GEN12(f) FMT_GEN11(f), f(11)
# define FMT_GEN13(f) FMT_GEN12(f), f(12)
# define FMT_GEN14(f) FMT_GEN13(f), f(13)
# define FMT_GEN15(f) FMT_GEN14(f), f(14)
namespace internal {
inline uint64_t make_type() { return 0; }
template <typename T>
inline uint64_t make_type(const T &arg) {
return MakeValue< BasicFormatter<char> >::type(arg);
}
template <std::size_t N, bool/*IsPacked*/= (N < ArgList::MAX_PACKED_ARGS)>
struct ArgArray;
template <std::size_t N>
struct ArgArray<N, true/*IsPacked*/> {
// '+' is used to silence GCC -Wduplicated-branches warning.
typedef Value Type[N > 0 ? N : +1];
template <typename Formatter, typename T>
static Value make(const T &value) {
#ifdef __clang__
Value result = MakeValue<Formatter>(value);
// Workaround a bug in Apple LLVM version 4.2 (clang-425.0.28) of clang:
// https://github.com/fmtlib/fmt/issues/276
(void)result.custom.format;
return result;
#else
return MakeValue<Formatter>(value);
#endif
}
};
template <std::size_t N>
struct ArgArray<N, false/*IsPacked*/> {
typedef Arg Type[N + 1]; // +1 for the list end Arg::NONE
template <typename Formatter, typename T>
static Arg make(const T &value) { return MakeArg<Formatter>(value); }
};
#if FMT_USE_VARIADIC_TEMPLATES
template <typename Arg, typename... Args>
inline uint64_t make_type(const Arg &first, const Args & ... tail) {
return make_type(first) | (make_type(tail...) << 4);
}
#else
struct ArgType {
uint64_t type;
ArgType() : type(0) {}
template <typename T>
ArgType(const T &arg) : type(make_type(arg)) {}
};
# define FMT_ARG_TYPE_DEFAULT(n) ArgType t##n = ArgType()
inline uint64_t make_type(FMT_GEN15(FMT_ARG_TYPE_DEFAULT)) {
return t0.type | (t1.type << 4) | (t2.type << 8) | (t3.type << 12) |
(t4.type << 16) | (t5.type << 20) | (t6.type << 24) | (t7.type << 28) |
(t8.type << 32) | (t9.type << 36) | (t10.type << 40) | (t11.type << 44) |
(t12.type << 48) | (t13.type << 52) | (t14.type << 56);
}
#endif
} // namespace internal
# define FMT_MAKE_TEMPLATE_ARG(n) typename T##n
# define FMT_MAKE_ARG_TYPE(n) T##n
# define FMT_MAKE_ARG(n) const T##n &v##n
# define FMT_ASSIGN_char(n) \
arr[n] = fmt::internal::MakeValue< fmt::BasicFormatter<char> >(v##n)
# define FMT_ASSIGN_wchar_t(n) \
arr[n] = fmt::internal::MakeValue< fmt::BasicFormatter<wchar_t> >(v##n)
#if FMT_USE_VARIADIC_TEMPLATES
// Defines a variadic function returning void.
# define FMT_VARIADIC_VOID(func, arg_type) \
template <typename... Args> \
void func(arg_type arg0, const Args & ... args) { \
typedef fmt::internal::ArgArray<sizeof...(Args)> ArgArray; \
typename ArgArray::Type array{ \
ArgArray::template make<fmt::BasicFormatter<Char> >(args)...}; \
func(arg0, fmt::ArgList(fmt::internal::make_type(args...), array)); \
}
// Defines a variadic constructor.
# define FMT_VARIADIC_CTOR(ctor, func, arg0_type, arg1_type) \
template <typename... Args> \
ctor(arg0_type arg0, arg1_type arg1, const Args & ... args) { \
typedef fmt::internal::ArgArray<sizeof...(Args)> ArgArray; \
typename ArgArray::Type array{ \
ArgArray::template make<fmt::BasicFormatter<Char> >(args)...}; \
func(arg0, arg1, fmt::ArgList(fmt::internal::make_type(args...), array)); \
}
#else
# define FMT_MAKE_REF(n) \
fmt::internal::MakeValue< fmt::BasicFormatter<Char> >(v##n)
# define FMT_MAKE_REF2(n) v##n
// Defines a wrapper for a function taking one argument of type arg_type
// and n additional arguments of arbitrary types.
# define FMT_WRAP1(func, arg_type, n) \
template <FMT_GEN(n, FMT_MAKE_TEMPLATE_ARG)> \
inline void func(arg_type arg1, FMT_GEN(n, FMT_MAKE_ARG)) { \
const fmt::internal::ArgArray<n>::Type array = {FMT_GEN(n, FMT_MAKE_REF)}; \
func(arg1, fmt::ArgList( \
fmt::internal::make_type(FMT_GEN(n, FMT_MAKE_REF2)), array)); \
}
// Emulates a variadic function returning void on a pre-C++11 compiler.
# define FMT_VARIADIC_VOID(func, arg_type) \
inline void func(arg_type arg) { func(arg, fmt::ArgList()); } \
FMT_WRAP1(func, arg_type, 1) FMT_WRAP1(func, arg_type, 2) \
FMT_WRAP1(func, arg_type, 3) FMT_WRAP1(func, arg_type, 4) \
FMT_WRAP1(func, arg_type, 5) FMT_WRAP1(func, arg_type, 6) \
FMT_WRAP1(func, arg_type, 7) FMT_WRAP1(func, arg_type, 8) \
FMT_WRAP1(func, arg_type, 9) FMT_WRAP1(func, arg_type, 10)
# define FMT_CTOR(ctor, func, arg0_type, arg1_type, n) \
template <FMT_GEN(n, FMT_MAKE_TEMPLATE_ARG)> \
ctor(arg0_type arg0, arg1_type arg1, FMT_GEN(n, FMT_MAKE_ARG)) { \
const fmt::internal::ArgArray<n>::Type array = {FMT_GEN(n, FMT_MAKE_REF)}; \
func(arg0, arg1, fmt::ArgList( \
fmt::internal::make_type(FMT_GEN(n, FMT_MAKE_REF2)), array)); \
}
// Emulates a variadic constructor on a pre-C++11 compiler.
# define FMT_VARIADIC_CTOR(ctor, func, arg0_type, arg1_type) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 1) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 2) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 3) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 4) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 5) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 6) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 7) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 8) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 9) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 10)
#endif
// Generates a comma-separated list with results of applying f to pairs
// (argument, index).
#define FMT_FOR_EACH1(f, x0) f(x0, 0)
#define FMT_FOR_EACH2(f, x0, x1) \
FMT_FOR_EACH1(f, x0), f(x1, 1)
#define FMT_FOR_EACH3(f, x0, x1, x2) \
FMT_FOR_EACH2(f, x0 ,x1), f(x2, 2)
#define FMT_FOR_EACH4(f, x0, x1, x2, x3) \
FMT_FOR_EACH3(f, x0, x1, x2), f(x3, 3)
#define FMT_FOR_EACH5(f, x0, x1, x2, x3, x4) \
FMT_FOR_EACH4(f, x0, x1, x2, x3), f(x4, 4)
#define FMT_FOR_EACH6(f, x0, x1, x2, x3, x4, x5) \
FMT_FOR_EACH5(f, x0, x1, x2, x3, x4), f(x5, 5)
#define FMT_FOR_EACH7(f, x0, x1, x2, x3, x4, x5, x6) \
FMT_FOR_EACH6(f, x0, x1, x2, x3, x4, x5), f(x6, 6)
#define FMT_FOR_EACH8(f, x0, x1, x2, x3, x4, x5, x6, x7) \
FMT_FOR_EACH7(f, x0, x1, x2, x3, x4, x5, x6), f(x7, 7)
#define FMT_FOR_EACH9(f, x0, x1, x2, x3, x4, x5, x6, x7, x8) \
FMT_FOR_EACH8(f, x0, x1, x2, x3, x4, x5, x6, x7), f(x8, 8)
#define FMT_FOR_EACH10(f, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9) \
FMT_FOR_EACH9(f, x0, x1, x2, x3, x4, x5, x6, x7, x8), f(x9, 9)
/**
An error returned by an operating system or a language runtime,
for example a file opening error.
*/
class SystemError : public internal::RuntimeError {
private:
FMT_API void init(int err_code, CStringRef format_str, ArgList args);
protected:
int error_code_;
typedef char Char; // For FMT_VARIADIC_CTOR.
SystemError() {}
public:
/**
\rst
Constructs a :class:`fmt::SystemError` object with a description
formatted with `fmt::format_system_error`. *message* and additional
arguments passed into the constructor are formatted similarly to
`fmt::format`.
**Example**::
// This throws a SystemError with the description
// cannot open file 'madeup': No such file or directory
// or similar (system message may vary).
const char *filename = "madeup";
std::FILE *file = std::fopen(filename, "r");
if (!file)
throw fmt::SystemError(errno, "cannot open file '{}'", filename);
\endrst
*/
SystemError(int error_code, CStringRef message) {
init(error_code, message, ArgList());
}
FMT_DEFAULTED_COPY_CTOR(SystemError)
FMT_VARIADIC_CTOR(SystemError, init, int, CStringRef)
FMT_API ~SystemError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE;
int error_code() const { return error_code_; }
};
/**
\rst
Formats an error returned by an operating system or a language runtime,
for example a file opening error, and writes it to *out* in the following
form:
.. parsed-literal::
*<message>*: *<system-message>*
where *<message>* is the passed message and *<system-message>* is
the system message corresponding to the error code.
*error_code* is a system error code as given by ``errno``.
If *error_code* is not a valid error code such as -1, the system message
may look like "Unknown error -1" and is platform-dependent.
\endrst
*/
FMT_API void format_system_error(fmt::Writer &out, int error_code,
fmt::StringRef message) FMT_NOEXCEPT;
/**
\rst
This template provides operations for formatting and writing data into
a character stream. The output is stored in a buffer provided by a subclass
such as :class:`fmt::BasicMemoryWriter`.
You can use one of the following typedefs for common character types:
+---------+----------------------+
| Type | Definition |
+=========+======================+
| Writer | BasicWriter<char> |
+---------+----------------------+
| WWriter | BasicWriter<wchar_t> |
+---------+----------------------+
\endrst
*/
template <typename Char>
class BasicWriter {
private:
// Output buffer.
Buffer<Char> &buffer_;
FMT_DISALLOW_COPY_AND_ASSIGN(BasicWriter);
typedef typename internal::CharTraits<Char>::CharPtr CharPtr;
#if FMT_SECURE_SCL
// Returns pointer value.
static Char *get(CharPtr p) { return p.base(); }
#else
static Char *get(Char *p) { return p; }
#endif
// Fills the padding around the content and returns the pointer to the
// content area.
static CharPtr fill_padding(CharPtr buffer,
unsigned total_size, std::size_t content_size, wchar_t fill);
// Grows the buffer by n characters and returns a pointer to the newly
// allocated area.
CharPtr grow_buffer(std::size_t n) {
std::size_t size = buffer_.size();
buffer_.resize(size + n);
return internal::make_ptr(&buffer_[size], n);
}
// Writes an unsigned decimal integer.
template <typename UInt>
Char *write_unsigned_decimal(UInt value, unsigned prefix_size = 0) {
unsigned num_digits = internal::count_digits(value);
Char *ptr = get(grow_buffer(prefix_size + num_digits));
internal::format_decimal(ptr + prefix_size, value, num_digits);
return ptr;
}
// Writes a decimal integer.
template <typename Int>
void write_decimal(Int value) {
typedef typename internal::IntTraits<Int>::MainType MainType;
MainType abs_value = static_cast<MainType>(value);
if (internal::is_negative(value)) {
abs_value = 0 - abs_value;
*write_unsigned_decimal(abs_value, 1) = '-';
} else {
write_unsigned_decimal(abs_value, 0);
}
}
// Prepare a buffer for integer formatting.
CharPtr prepare_int_buffer(unsigned num_digits,
const EmptySpec &, const char *prefix, unsigned prefix_size) {
unsigned size = prefix_size + num_digits;
CharPtr p = grow_buffer(size);
std::uninitialized_copy(prefix, prefix + prefix_size, p);
return p + size - 1;
}
template <typename Spec>
CharPtr prepare_int_buffer(unsigned num_digits,
const Spec &spec, const char *prefix, unsigned prefix_size);
// Formats an integer.
template <typename T, typename Spec>
void write_int(T value, Spec spec);
// Formats a floating-point number (double or long double).
template <typename T, typename Spec>
void write_double(T value, const Spec &spec);
// Writes a formatted string.
template <typename StrChar>
CharPtr write_str(const StrChar *s, std::size_t size, const AlignSpec &spec);
template <typename StrChar, typename Spec>
void write_str(const internal::Arg::StringValue<StrChar> &str,
const Spec &spec);
// This following methods are private to disallow writing wide characters
// and strings to a char stream. If you want to print a wide string as a
// pointer as std::ostream does, cast it to const void*.
// Do not implement!
void operator<<(typename internal::WCharHelper<wchar_t, Char>::Unsupported);
void operator<<(
typename internal::WCharHelper<const wchar_t *, Char>::Unsupported);
// Appends floating-point length specifier to the format string.
// The second argument is only used for overload resolution.
void append_float_length(Char *&format_ptr, long double) {
*format_ptr++ = 'L';
}
template<typename T>
void append_float_length(Char *&, T) {}
template <typename Impl, typename Char_, typename Spec_>
friend class internal::ArgFormatterBase;
template <typename Impl, typename Char_, typename Spec_>
friend class BasicPrintfArgFormatter;
protected:
/**
Constructs a ``BasicWriter`` object.
*/
explicit BasicWriter(Buffer<Char> &b) : buffer_(b) {}
public:
/**
\rst
Destroys a ``BasicWriter`` object.
\endrst
*/
virtual ~BasicWriter() {}
/**
Returns the total number of characters written.
*/
std::size_t size() const { return buffer_.size(); }
/**
Returns a pointer to the output buffer content. No terminating null
character is appended.
*/
const Char *data() const FMT_NOEXCEPT { return &buffer_[0]; }
/**
Returns a pointer to the output buffer content with terminating null
character appended.
*/
const Char *c_str() const {
std::size_t size = buffer_.size();
buffer_.reserve(size + 1);
buffer_[size] = '\0';
return &buffer_[0];
}
/**
\rst
Returns the content of the output buffer as an `std::string`.
\endrst
*/
std::basic_string<Char> str() const {
return std::basic_string<Char>(&buffer_[0], buffer_.size());
}
/**
\rst
Writes formatted data.
*args* is an argument list representing arbitrary arguments.
**Example**::
MemoryWriter out;
out.write("Current point:\n");
out.write("({:+f}, {:+f})", -3.14, 3.14);
This will write the following output to the ``out`` object:
.. code-block:: none
Current point:
(-3.140000, +3.140000)
The output can be accessed using :func:`data()`, :func:`c_str` or
:func:`str` methods.
See also :ref:`syntax`.
\endrst
*/
void write(BasicCStringRef<Char> format, ArgList args) {
BasicFormatter<Char>(args, *this).format(format);
}
FMT_VARIADIC_VOID(write, BasicCStringRef<Char>)
BasicWriter &operator<<(int value) {
write_decimal(value);
return *this;
}
BasicWriter &operator<<(unsigned value) {
return *this << IntFormatSpec<unsigned>(value);
}
BasicWriter &operator<<(long value) {
write_decimal(value);
return *this;
}
BasicWriter &operator<<(unsigned long value) {
return *this << IntFormatSpec<unsigned long>(value);
}
BasicWriter &operator<<(LongLong value) {
write_decimal(value);
return *this;
}
/**
\rst
Formats *value* and writes it to the stream.
\endrst
*/
BasicWriter &operator<<(ULongLong value) {
return *this << IntFormatSpec<ULongLong>(value);
}
BasicWriter &operator<<(double value) {
write_double(value, FormatSpec());
return *this;
}
/**
\rst
Formats *value* using the general format for floating-point numbers
(``'g'``) and writes it to the stream.
\endrst
*/
BasicWriter &operator<<(long double value) {
write_double(value, FormatSpec());
return *this;
}
/**
Writes a character to the stream.
*/
BasicWriter &operator<<(char value) {
buffer_.push_back(value);
return *this;
}
BasicWriter &operator<<(
typename internal::WCharHelper<wchar_t, Char>::Supported value) {
buffer_.push_back(value);
return *this;
}
/**
\rst
Writes *value* to the stream.
\endrst
*/
BasicWriter &operator<<(fmt::BasicStringRef<Char> value) {
const Char *str = value.data();
buffer_.append(str, str + value.size());
return *this;
}
BasicWriter &operator<<(
typename internal::WCharHelper<StringRef, Char>::Supported value) {
const char *str = value.data();
buffer_.append(str, str + value.size());
return *this;
}
template <typename T, typename Spec, typename FillChar>
BasicWriter &operator<<(IntFormatSpec<T, Spec, FillChar> spec) {
internal::CharTraits<Char>::convert(FillChar());
write_int(spec.value(), spec);
return *this;
}
template <typename StrChar>
BasicWriter &operator<<(const StrFormatSpec<StrChar> &spec) {
const StrChar *s = spec.str();
write_str(s, std::char_traits<Char>::length(s), spec);
return *this;
}
void clear() FMT_NOEXCEPT { buffer_.clear(); }
Buffer<Char> &buffer() FMT_NOEXCEPT { return buffer_; }
};
template <typename Char>
template <typename StrChar>
typename BasicWriter<Char>::CharPtr BasicWriter<Char>::write_str(
const StrChar *s, std::size_t size, const AlignSpec &spec) {
CharPtr out = CharPtr();
if (spec.width() > size) {
out = grow_buffer(spec.width());
Char fill = internal::CharTraits<Char>::cast(spec.fill());
if (spec.align() == ALIGN_RIGHT) {
std::uninitialized_fill_n(out, spec.width() - size, fill);
out += spec.width() - size;
} else if (spec.align() == ALIGN_CENTER) {
out = fill_padding(out, spec.width(), size, fill);
} else {
std::uninitialized_fill_n(out + size, spec.width() - size, fill);
}
} else {
out = grow_buffer(size);
}
std::uninitialized_copy(s, s + size, out);
return out;
}
template <typename Char>
template <typename StrChar, typename Spec>
void BasicWriter<Char>::write_str(
const internal::Arg::StringValue<StrChar> &s, const Spec &spec) {
// Check if StrChar is convertible to Char.
internal::CharTraits<Char>::convert(StrChar());
if (spec.type_ && spec.type_ != 's')
internal::report_unknown_type(spec.type_, "string");
const StrChar *str_value = s.value;
std::size_t str_size = s.size;
if (str_size == 0) {
if (!str_value) {
FMT_THROW(FormatError("string pointer is null"));
}
}
std::size_t precision = static_cast<std::size_t>(spec.precision_);
if (spec.precision_ >= 0 && precision < str_size)
str_size = precision;
write_str(str_value, str_size, spec);
}
template <typename Char>
typename BasicWriter<Char>::CharPtr
BasicWriter<Char>::fill_padding(
CharPtr buffer, unsigned total_size,
std::size_t content_size, wchar_t fill) {
std::size_t padding = total_size - content_size;
std::size_t left_padding = padding / 2;
Char fill_char = internal::CharTraits<Char>::cast(fill);
std::uninitialized_fill_n(buffer, left_padding, fill_char);
buffer += left_padding;
CharPtr content = buffer;
std::uninitialized_fill_n(buffer + content_size,
padding - left_padding, fill_char);
return content;
}
template <typename Char>
template <typename Spec>
typename BasicWriter<Char>::CharPtr
BasicWriter<Char>::prepare_int_buffer(
unsigned num_digits, const Spec &spec,
const char *prefix, unsigned prefix_size) {
unsigned width = spec.width();
Alignment align = spec.align();
Char fill = internal::CharTraits<Char>::cast(spec.fill());
if (spec.precision() > static_cast<int>(num_digits)) {
// Octal prefix '0' is counted as a digit, so ignore it if precision
// is specified.
if (prefix_size > 0 && prefix[prefix_size - 1] == '0')
--prefix_size;
unsigned number_size =
prefix_size + internal::to_unsigned(spec.precision());
AlignSpec subspec(number_size, '0', ALIGN_NUMERIC);
if (number_size >= width)
return prepare_int_buffer(num_digits, subspec, prefix, prefix_size);
buffer_.reserve(width);
unsigned fill_size = width - number_size;
if (align != ALIGN_LEFT) {
CharPtr p = grow_buffer(fill_size);
std::uninitialized_fill(p, p + fill_size, fill);
}
CharPtr result = prepare_int_buffer(
num_digits, subspec, prefix, prefix_size);
if (align == ALIGN_LEFT) {
CharPtr p = grow_buffer(fill_size);
std::uninitialized_fill(p, p + fill_size, fill);
}
return result;
}
unsigned size = prefix_size + num_digits;
if (width <= size) {
CharPtr p = grow_buffer(size);
std::uninitialized_copy(prefix, prefix + prefix_size, p);
return p + size - 1;
}
CharPtr p = grow_buffer(width);
CharPtr end = p + width;
if (align == ALIGN_LEFT) {
std::uninitialized_copy(prefix, prefix + prefix_size, p);
p += size;
std::uninitialized_fill(p, end, fill);
} else if (align == ALIGN_CENTER) {
p = fill_padding(p, width, size, fill);
std::uninitialized_copy(prefix, prefix + prefix_size, p);
p += size;
} else {
if (align == ALIGN_NUMERIC) {
if (prefix_size != 0) {
p = std::uninitialized_copy(prefix, prefix + prefix_size, p);
size -= prefix_size;
}
} else {
std::uninitialized_copy(prefix, prefix + prefix_size, end - size);
}
std::uninitialized_fill(p, end - size, fill);
p = end;
}
return p - 1;
}
template <typename Char>
template <typename T, typename Spec>
void BasicWriter<Char>::write_int(T value, Spec spec) {
unsigned prefix_size = 0;
typedef typename internal::IntTraits<T>::MainType UnsignedType;
UnsignedType abs_value = static_cast<UnsignedType>(value);
char prefix[4] = "";
if (internal::is_negative(value)) {
prefix[0] = '-';
++prefix_size;
abs_value = 0 - abs_value;
} else if (spec.flag(SIGN_FLAG)) {
prefix[0] = spec.flag(PLUS_FLAG) ? '+' : ' ';
++prefix_size;
}
switch (spec.type()) {
case 0: case 'd': {
unsigned num_digits = internal::count_digits(abs_value);
CharPtr p = prepare_int_buffer(num_digits, spec, prefix, prefix_size) + 1;
internal::format_decimal(get(p), abs_value, 0);
break;
}
case 'x': case 'X': {
UnsignedType n = abs_value;
if (spec.flag(HASH_FLAG)) {
prefix[prefix_size++] = '0';
prefix[prefix_size++] = spec.type_prefix();
}
unsigned num_digits = 0;
do {
++num_digits;
} while ((n >>= 4) != 0);
Char *p = get(prepare_int_buffer(
num_digits, spec, prefix, prefix_size));
n = abs_value;
const char *digits = spec.type() == 'x' ?
"0123456789abcdef" : "0123456789ABCDEF";
do {
*p-- = digits[n & 0xf];
} while ((n >>= 4) != 0);
break;
}
case 'b': case 'B': {
UnsignedType n = abs_value;
if (spec.flag(HASH_FLAG)) {
prefix[prefix_size++] = '0';
prefix[prefix_size++] = spec.type_prefix();
}
unsigned num_digits = 0;
do {
++num_digits;
} while ((n >>= 1) != 0);
Char *p = get(prepare_int_buffer(num_digits, spec, prefix, prefix_size));
n = abs_value;
do {
*p-- = static_cast<Char>('0' + (n & 1));
} while ((n >>= 1) != 0);
break;
}
case 'o': {
UnsignedType n = abs_value;
if (spec.flag(HASH_FLAG))
prefix[prefix_size++] = '0';
unsigned num_digits = 0;
do {
++num_digits;
} while ((n >>= 3) != 0);
Char *p = get(prepare_int_buffer(num_digits, spec, prefix, prefix_size));
n = abs_value;
do {
*p-- = static_cast<Char>('0' + (n & 7));
} while ((n >>= 3) != 0);
break;
}
case 'n': {
unsigned num_digits = internal::count_digits(abs_value);
fmt::StringRef sep = "";
#if !(defined(ANDROID) || defined(__ANDROID__))
sep = internal::thousands_sep(std::localeconv());
#endif
unsigned size = static_cast<unsigned>(
num_digits + sep.size() * ((num_digits - 1) / 3));
CharPtr p = prepare_int_buffer(size, spec, prefix, prefix_size) + 1;
internal::format_decimal(get(p), abs_value, 0, internal::ThousandsSep(sep));
break;
}
default:
internal::report_unknown_type(
spec.type(), spec.flag(CHAR_FLAG) ? "char" : "integer");
break;
}
}
template <typename Char>
template <typename T, typename Spec>
void BasicWriter<Char>::write_double(T value, const Spec &spec) {
// Check type.
char type = spec.type();
bool upper = false;
switch (type) {
case 0:
type = 'g';
break;
case 'e': case 'f': case 'g': case 'a':
break;
case 'F':
#if FMT_MSC_VER
// MSVC's printf doesn't support 'F'.
type = 'f';
#endif
// Fall through.
case 'E': case 'G': case 'A':
upper = true;
break;
default:
internal::report_unknown_type(type, "double");
break;
}
char sign = 0;
// Use isnegative instead of value < 0 because the latter is always
// false for NaN.
if (internal::FPUtil::isnegative(static_cast<double>(value))) {
sign = '-';
value = -value;
} else if (spec.flag(SIGN_FLAG)) {
sign = spec.flag(PLUS_FLAG) ? '+' : ' ';
}
if (internal::FPUtil::isnotanumber(value)) {
// Format NaN ourselves because sprintf's output is not consistent
// across platforms.
std::size_t nan_size = 4;
const char *nan = upper ? " NAN" : " nan";
if (!sign) {
--nan_size;
++nan;
}
CharPtr out = write_str(nan, nan_size, spec);
if (sign)
*out = sign;
return;
}
if (internal::FPUtil::isinfinity(value)) {
// Format infinity ourselves because sprintf's output is not consistent
// across platforms.
std::size_t inf_size = 4;
const char *inf = upper ? " INF" : " inf";
if (!sign) {
--inf_size;
++inf;
}
CharPtr out = write_str(inf, inf_size, spec);
if (sign)
*out = sign;
return;
}
std::size_t offset = buffer_.size();
unsigned width = spec.width();
if (sign) {
buffer_.reserve(buffer_.size() + (width > 1u ? width : 1u));
if (width > 0)
--width;
++offset;
}
// Build format string.
enum { MAX_FORMAT_SIZE = 10}; // longest format: %#-*.*Lg
Char format[MAX_FORMAT_SIZE];
Char *format_ptr = format;
*format_ptr++ = '%';
unsigned width_for_sprintf = width;
if (spec.flag(HASH_FLAG))
*format_ptr++ = '#';
if (spec.align() == ALIGN_CENTER) {
width_for_sprintf = 0;
} else {
if (spec.align() == ALIGN_LEFT)
*format_ptr++ = '-';
if (width != 0)
*format_ptr++ = '*';
}
if (spec.precision() >= 0) {
*format_ptr++ = '.';
*format_ptr++ = '*';
}
append_float_length(format_ptr, value);
*format_ptr++ = type;
*format_ptr = '\0';
// Format using snprintf.
Char fill = internal::CharTraits<Char>::cast(spec.fill());
unsigned n = 0;
Char *start = FMT_NULL;
for (;;) {
std::size_t buffer_size = buffer_.capacity() - offset;
#if FMT_MSC_VER
// MSVC's vsnprintf_s doesn't work with zero size, so reserve
// space for at least one extra character to make the size non-zero.
// Note that the buffer's capacity will increase by more than 1.
if (buffer_size == 0) {
buffer_.reserve(offset + 1);
buffer_size = buffer_.capacity() - offset;
}
#endif
start = &buffer_[offset];
int result = internal::CharTraits<Char>::format_float(
start, buffer_size, format, width_for_sprintf, spec.precision(), value);
if (result >= 0) {
n = internal::to_unsigned(result);
if (offset + n < buffer_.capacity())
break; // The buffer is large enough - continue with formatting.
buffer_.reserve(offset + n + 1);
} else {
// If result is negative we ask to increase the capacity by at least 1,
// but as std::vector, the buffer grows exponentially.
buffer_.reserve(buffer_.capacity() + 1);
}
}
if (sign) {
if ((spec.align() != ALIGN_RIGHT && spec.align() != ALIGN_DEFAULT) ||
*start != ' ') {
*(start - 1) = sign;
sign = 0;
} else {
*(start - 1) = fill;
}
++n;
}
if (spec.align() == ALIGN_CENTER && spec.width() > n) {
width = spec.width();
CharPtr p = grow_buffer(width);
std::memmove(get(p) + (width - n) / 2, get(p), n * sizeof(Char));
fill_padding(p, spec.width(), n, fill);
return;
}
if (spec.fill() != ' ' || sign) {
while (*start == ' ')
*start++ = fill;
if (sign)
*(start - 1) = sign;
}
grow_buffer(n);
}
/**
\rst
This class template provides operations for formatting and writing data
into a character stream. The output is stored in a memory buffer that grows
dynamically.
You can use one of the following typedefs for common character types
and the standard allocator:
+---------------+-----------------------------------------------------+
| Type | Definition |
+===============+=====================================================+
| MemoryWriter | BasicMemoryWriter<char, std::allocator<char>> |
+---------------+-----------------------------------------------------+
| WMemoryWriter | BasicMemoryWriter<wchar_t, std::allocator<wchar_t>> |
+---------------+-----------------------------------------------------+
**Example**::
MemoryWriter out;
out << "The answer is " << 42 << "\n";
out.write("({:+f}, {:+f})", -3.14, 3.14);
This will write the following output to the ``out`` object:
.. code-block:: none
The answer is 42
(-3.140000, +3.140000)
The output can be converted to an ``std::string`` with ``out.str()`` or
accessed as a C string with ``out.c_str()``.
\endrst
*/
template <typename Char, typename Allocator = std::allocator<Char> >
class BasicMemoryWriter : public BasicWriter<Char> {
private:
internal::MemoryBuffer<Char, internal::INLINE_BUFFER_SIZE, Allocator> buffer_;
public:
explicit BasicMemoryWriter(const Allocator& alloc = Allocator())
: BasicWriter<Char>(buffer_), buffer_(alloc) {}
#if FMT_USE_RVALUE_REFERENCES
/**
\rst
Constructs a :class:`fmt::BasicMemoryWriter` object moving the content
of the other object to it.
\endrst
*/
BasicMemoryWriter(BasicMemoryWriter &&other)
: BasicWriter<Char>(buffer_), buffer_(std::move(other.buffer_)) {
}
/**
\rst
Moves the content of the other ``BasicMemoryWriter`` object to this one.
\endrst
*/
BasicMemoryWriter &operator=(BasicMemoryWriter &&other) {
buffer_ = std::move(other.buffer_);
return *this;
}
#endif
};
typedef BasicMemoryWriter<char> MemoryWriter;
typedef BasicMemoryWriter<wchar_t> WMemoryWriter;
/**
\rst
This class template provides operations for formatting and writing data
into a fixed-size array. For writing into a dynamically growing buffer
use :class:`fmt::BasicMemoryWriter`.
Any write method will throw ``std::runtime_error`` if the output doesn't fit
into the array.
You can use one of the following typedefs for common character types:
+--------------+---------------------------+
| Type | Definition |
+==============+===========================+
| ArrayWriter | BasicArrayWriter<char> |
+--------------+---------------------------+
| WArrayWriter | BasicArrayWriter<wchar_t> |
+--------------+---------------------------+
\endrst
*/
template <typename Char>
class BasicArrayWriter : public BasicWriter<Char> {
private:
internal::FixedBuffer<Char> buffer_;
public:
/**
\rst
Constructs a :class:`fmt::BasicArrayWriter` object for *array* of the
given size.
\endrst
*/
BasicArrayWriter(Char *array, std::size_t size)
: BasicWriter<Char>(buffer_), buffer_(array, size) {}
/**
\rst
Constructs a :class:`fmt::BasicArrayWriter` object for *array* of the
size known at compile time.
\endrst
*/
template <std::size_t SIZE>
explicit BasicArrayWriter(Char (&array)[SIZE])
: BasicWriter<Char>(buffer_), buffer_(array, SIZE) {}
};
typedef BasicArrayWriter<char> ArrayWriter;
typedef BasicArrayWriter<wchar_t> WArrayWriter;
// Reports a system error without throwing an exception.
// Can be used to report errors from destructors.
FMT_API void report_system_error(int error_code,
StringRef message) FMT_NOEXCEPT;
#if FMT_USE_WINDOWS_H
/** A Windows error. */
class WindowsError : public SystemError {
private:
FMT_API void init(int error_code, CStringRef format_str, ArgList args);
public:
/**
\rst
Constructs a :class:`fmt::WindowsError` object with the description
of the form
.. parsed-literal::
*<message>*: *<system-message>*
where *<message>* is the formatted message and *<system-message>* is the
system message corresponding to the error code.
*error_code* is a Windows error code as given by ``GetLastError``.
If *error_code* is not a valid error code such as -1, the system message
will look like "error -1".
**Example**::
// This throws a WindowsError with the description
// cannot open file 'madeup': The system cannot find the file specified.
// or similar (system message may vary).
const char *filename = "madeup";
LPOFSTRUCT of = LPOFSTRUCT();
HFILE file = OpenFile(filename, &of, OF_READ);
if (file == HFILE_ERROR) {
throw fmt::WindowsError(GetLastError(),
"cannot open file '{}'", filename);
}
\endrst
*/
WindowsError(int error_code, CStringRef message) {
init(error_code, message, ArgList());
}
FMT_VARIADIC_CTOR(WindowsError, init, int, CStringRef)
};
// Reports a Windows error without throwing an exception.
// Can be used to report errors from destructors.
FMT_API void report_windows_error(int error_code,
StringRef message) FMT_NOEXCEPT;
#endif
enum Color { BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE };
/**
Formats a string and prints it to stdout using ANSI escape sequences
to specify color (experimental).
Example:
print_colored(fmt::RED, "Elapsed time: {0:.2f} seconds", 1.23);
*/
FMT_API void print_colored(Color c, CStringRef format, ArgList args);
/**
\rst
Formats arguments and returns the result as a string.
**Example**::
std::string message = format("The answer is {}", 42);
\endrst
*/
inline std::string format(CStringRef format_str, ArgList args) {
MemoryWriter w;
w.write(format_str, args);
return w.str();
}
inline std::wstring format(WCStringRef format_str, ArgList args) {
WMemoryWriter w;
w.write(format_str, args);
return w.str();
}
/**
\rst
Prints formatted data to the file *f*.
**Example**::
print(stderr, "Don't {}!", "panic");
\endrst
*/
FMT_API void print(std::FILE *f, CStringRef format_str, ArgList args);
/**
\rst
Prints formatted data to ``stdout``.
**Example**::
print("Elapsed time: {0:.2f} seconds", 1.23);
\endrst
*/
FMT_API void print(CStringRef format_str, ArgList args);
/**
Fast integer formatter.
*/
class FormatInt {
private:
// Buffer should be large enough to hold all digits (digits10 + 1),
// a sign and a null character.
enum {BUFFER_SIZE = std::numeric_limits<ULongLong>::digits10 + 3};
mutable char buffer_[BUFFER_SIZE];
char *str_;
// Formats value in reverse and returns the number of digits.
char *format_decimal(ULongLong value) {
char *buffer_end = buffer_ + BUFFER_SIZE - 1;
while (value >= 100) {
// Integer division is slow so do it for a group of two digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
unsigned index = static_cast<unsigned>((value % 100) * 2);
value /= 100;
*--buffer_end = internal::Data::DIGITS[index + 1];
*--buffer_end = internal::Data::DIGITS[index];
}
if (value < 10) {
*--buffer_end = static_cast<char>('0' + value);
return buffer_end;
}
unsigned index = static_cast<unsigned>(value * 2);
*--buffer_end = internal::Data::DIGITS[index + 1];
*--buffer_end = internal::Data::DIGITS[index];
return buffer_end;
}
void FormatSigned(LongLong value) {
ULongLong abs_value = static_cast<ULongLong>(value);
bool negative = value < 0;
if (negative)
abs_value = 0 - abs_value;
str_ = format_decimal(abs_value);
if (negative)
*--str_ = '-';
}
public:
explicit FormatInt(int value) { FormatSigned(value); }
explicit FormatInt(long value) { FormatSigned(value); }
explicit FormatInt(LongLong value) { FormatSigned(value); }
explicit FormatInt(unsigned value) : str_(format_decimal(value)) {}
explicit FormatInt(unsigned long value) : str_(format_decimal(value)) {}
explicit FormatInt(ULongLong value) : str_(format_decimal(value)) {}
/** Returns the number of characters written to the output buffer. */
std::size_t size() const {
return internal::to_unsigned(buffer_ - str_ + BUFFER_SIZE - 1);
}
/**
Returns a pointer to the output buffer content. No terminating null
character is appended.
*/
const char *data() const { return str_; }
/**
Returns a pointer to the output buffer content with terminating null
character appended.
*/
const char *c_str() const {
buffer_[BUFFER_SIZE - 1] = '\0';
return str_;
}
/**
\rst
Returns the content of the output buffer as an ``std::string``.
\endrst
*/
std::string str() const { return std::string(str_, size()); }
};
// Formats a decimal integer value writing into buffer and returns
// a pointer to the end of the formatted string. This function doesn't
// write a terminating null character.
template <typename T>
inline void format_decimal(char *&buffer, T value) {
typedef typename internal::IntTraits<T>::MainType MainType;
MainType abs_value = static_cast<MainType>(value);
if (internal::is_negative(value)) {
*buffer++ = '-';
abs_value = 0 - abs_value;
}
if (abs_value < 100) {
if (abs_value < 10) {
*buffer++ = static_cast<char>('0' + abs_value);
return;
}
unsigned index = static_cast<unsigned>(abs_value * 2);
*buffer++ = internal::Data::DIGITS[index];
*buffer++ = internal::Data::DIGITS[index + 1];
return;
}
unsigned num_digits = internal::count_digits(abs_value);
internal::format_decimal(buffer, abs_value, num_digits);
buffer += num_digits;
}
/**
\rst
Returns a named argument for formatting functions.
**Example**::
print("Elapsed time: {s:.2f} seconds", arg("s", 1.23));
\endrst
*/
template <typename T>
inline internal::NamedArgWithType<char, T> arg(StringRef name, const T &arg) {
return internal::NamedArgWithType<char, T>(name, arg);
}
template <typename T>
inline internal::NamedArgWithType<wchar_t, T> arg(WStringRef name, const T &arg) {
return internal::NamedArgWithType<wchar_t, T>(name, arg);
}
// The following two functions are deleted intentionally to disable
// nested named arguments as in ``format("{}", arg("a", arg("b", 42)))``.
template <typename Char>
void arg(StringRef, const internal::NamedArg<Char>&) FMT_DELETED_OR_UNDEFINED;
template <typename Char>
void arg(WStringRef, const internal::NamedArg<Char>&) FMT_DELETED_OR_UNDEFINED;
}
#if FMT_GCC_VERSION
// Use the system_header pragma to suppress warnings about variadic macros
// because suppressing -Wvariadic-macros with the diagnostic pragma doesn't
// work. It is used at the end because we want to suppress as little warnings
// as possible.
# pragma GCC system_header
#endif
// This is used to work around VC++ bugs in handling variadic macros.
#define FMT_EXPAND(args) args
// Returns the number of arguments.
// Based on https://groups.google.com/forum/#!topic/comp.std.c/d-6Mj5Lko_s.
#define FMT_NARG(...) FMT_NARG_(__VA_ARGS__, FMT_RSEQ_N())
#define FMT_NARG_(...) FMT_EXPAND(FMT_ARG_N(__VA_ARGS__))
#define FMT_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
#define FMT_RSEQ_N() 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
#define FMT_FOR_EACH_(N, f, ...) \
FMT_EXPAND(FMT_CONCAT(FMT_FOR_EACH, N)(f, __VA_ARGS__))
#define FMT_FOR_EACH(f, ...) \
FMT_EXPAND(FMT_FOR_EACH_(FMT_NARG(__VA_ARGS__), f, __VA_ARGS__))
#define FMT_ADD_ARG_NAME(type, index) type arg##index
#define FMT_GET_ARG_NAME(type, index) arg##index
#if FMT_USE_VARIADIC_TEMPLATES
# define FMT_VARIADIC_(Const, Char, ReturnType, func, call, ...) \
template <typename... Args> \
ReturnType func(FMT_FOR_EACH(FMT_ADD_ARG_NAME, __VA_ARGS__), \
const Args & ... args) Const { \
typedef fmt::internal::ArgArray<sizeof...(Args)> ArgArray; \
typename ArgArray::Type array{ \
ArgArray::template make<fmt::BasicFormatter<Char> >(args)...}; \
call(FMT_FOR_EACH(FMT_GET_ARG_NAME, __VA_ARGS__), \
fmt::ArgList(fmt::internal::make_type(args...), array)); \
}
#else
// Defines a wrapper for a function taking __VA_ARGS__ arguments
// and n additional arguments of arbitrary types.
# define FMT_WRAP(Const, Char, ReturnType, func, call, n, ...) \
template <FMT_GEN(n, FMT_MAKE_TEMPLATE_ARG)> \
inline ReturnType func(FMT_FOR_EACH(FMT_ADD_ARG_NAME, __VA_ARGS__), \
FMT_GEN(n, FMT_MAKE_ARG)) Const { \
fmt::internal::ArgArray<n>::Type arr; \
FMT_GEN(n, FMT_ASSIGN_##Char); \
call(FMT_FOR_EACH(FMT_GET_ARG_NAME, __VA_ARGS__), fmt::ArgList( \
fmt::internal::make_type(FMT_GEN(n, FMT_MAKE_REF2)), arr)); \
}
# define FMT_VARIADIC_(Const, Char, ReturnType, func, call, ...) \
inline ReturnType func(FMT_FOR_EACH(FMT_ADD_ARG_NAME, __VA_ARGS__)) Const { \
call(FMT_FOR_EACH(FMT_GET_ARG_NAME, __VA_ARGS__), fmt::ArgList()); \
} \
FMT_WRAP(Const, Char, ReturnType, func, call, 1, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 2, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 3, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 4, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 5, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 6, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 7, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 8, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 9, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 10, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 11, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 12, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 13, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 14, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 15, __VA_ARGS__)
#endif // FMT_USE_VARIADIC_TEMPLATES
/**
\rst
Defines a variadic function with the specified return type, function name
and argument types passed as variable arguments to this macro.
**Example**::
void print_error(const char *file, int line, const char *format,
fmt::ArgList args) {
fmt::print("{}: {}: ", file, line);
fmt::print(format, args);
}
FMT_VARIADIC(void, print_error, const char *, int, const char *)
``FMT_VARIADIC`` is used for compatibility with legacy C++ compilers that
don't implement variadic templates. You don't have to use this macro if
you don't need legacy compiler support and can use variadic templates
directly::
template <typename... Args>
void print_error(const char *file, int line, const char *format,
const Args & ... args) {
fmt::print("{}: {}: ", file, line);
fmt::print(format, args...);
}
\endrst
*/
#define FMT_VARIADIC(ReturnType, func, ...) \
FMT_VARIADIC_(, char, ReturnType, func, return func, __VA_ARGS__)
#define FMT_VARIADIC_CONST(ReturnType, func, ...) \
FMT_VARIADIC_(const, char, ReturnType, func, return func, __VA_ARGS__)
#define FMT_VARIADIC_W(ReturnType, func, ...) \
FMT_VARIADIC_(, wchar_t, ReturnType, func, return func, __VA_ARGS__)
#define FMT_VARIADIC_CONST_W(ReturnType, func, ...) \
FMT_VARIADIC_(const, wchar_t, ReturnType, func, return func, __VA_ARGS__)
#define FMT_CAPTURE_ARG_(id, index) ::fmt::arg(#id, id)
#define FMT_CAPTURE_ARG_W_(id, index) ::fmt::arg(L###id, id)
/**
\rst
Convenient macro to capture the arguments' names and values into several
``fmt::arg(name, value)``.
**Example**::
int x = 1, y = 2;
print("point: ({x}, {y})", FMT_CAPTURE(x, y));
// same as:
// print("point: ({x}, {y})", arg("x", x), arg("y", y));
\endrst
*/
#define FMT_CAPTURE(...) FMT_FOR_EACH(FMT_CAPTURE_ARG_, __VA_ARGS__)
#define FMT_CAPTURE_W(...) FMT_FOR_EACH(FMT_CAPTURE_ARG_W_, __VA_ARGS__)
namespace fmt {
FMT_VARIADIC(std::string, format, CStringRef)
FMT_VARIADIC_W(std::wstring, format, WCStringRef)
FMT_VARIADIC(void, print, CStringRef)
FMT_VARIADIC(void, print, std::FILE *, CStringRef)
FMT_VARIADIC(void, print_colored, Color, CStringRef)
namespace internal {
template <typename Char>
inline bool is_name_start(Char c) {
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c;
}
// Parses an unsigned integer advancing s to the end of the parsed input.
// This function assumes that the first character of s is a digit.
template <typename Char>
unsigned parse_nonnegative_int(const Char *&s) {
assert('0' <= *s && *s <= '9');
unsigned value = 0;
// Convert to unsigned to prevent a warning.
unsigned max_int = (std::numeric_limits<int>::max)();
unsigned big = max_int / 10;
do {
// Check for overflow.
if (value > big) {
value = max_int + 1;
break;
}
value = value * 10 + (*s - '0');
++s;
} while ('0' <= *s && *s <= '9');
// Convert to unsigned to prevent a warning.
if (value > max_int)
FMT_THROW(FormatError("number is too big"));
return value;
}
inline void require_numeric_argument(const Arg &arg, char spec) {
if (arg.type > Arg::LAST_NUMERIC_TYPE) {
std::string message =
fmt::format("format specifier '{}' requires numeric argument", spec);
FMT_THROW(fmt::FormatError(message));
}
}
template <typename Char>
void check_sign(const Char *&s, const Arg &arg) {
char sign = static_cast<char>(*s);
require_numeric_argument(arg, sign);
if (arg.type == Arg::UINT || arg.type == Arg::ULONG_LONG) {
FMT_THROW(FormatError(fmt::format(
"format specifier '{}' requires signed argument", sign)));
}
++s;
}
} // namespace internal
template <typename Char, typename AF>
inline internal::Arg BasicFormatter<Char, AF>::get_arg(
BasicStringRef<Char> arg_name, const char *&error) {
if (check_no_auto_index(error)) {
map_.init(args());
const internal::Arg *arg = map_.find(arg_name);
if (arg)
return *arg;
error = "argument not found";
}
return internal::Arg();
}
template <typename Char, typename AF>
inline internal::Arg BasicFormatter<Char, AF>::parse_arg_index(const Char *&s) {
const char *error = FMT_NULL;
internal::Arg arg = *s < '0' || *s > '9' ?
next_arg(error) : get_arg(internal::parse_nonnegative_int(s), error);
if (error) {
FMT_THROW(FormatError(
*s != '}' && *s != ':' ? "invalid format string" : error));
}
return arg;
}
template <typename Char, typename AF>
inline internal::Arg BasicFormatter<Char, AF>::parse_arg_name(const Char *&s) {
assert(internal::is_name_start(*s));
const Char *start = s;
Char c;
do {
c = *++s;
} while (internal::is_name_start(c) || ('0' <= c && c <= '9'));
const char *error = FMT_NULL;
internal::Arg arg = get_arg(BasicStringRef<Char>(start, s - start), error);
if (error)
FMT_THROW(FormatError(error));
return arg;
}
template <typename Char, typename ArgFormatter>
const Char *BasicFormatter<Char, ArgFormatter>::format(
const Char *&format_str, const internal::Arg &arg) {
using internal::Arg;
const Char *s = format_str;
typename ArgFormatter::SpecType spec;
if (*s == ':') {
if (arg.type == Arg::CUSTOM) {
arg.custom.format(this, arg.custom.value, &s);
return s;
}
++s;
// Parse fill and alignment.
if (Char c = *s) {
const Char *p = s + 1;
spec.align_ = ALIGN_DEFAULT;
do {
switch (*p) {
case '<':
spec.align_ = ALIGN_LEFT;
break;
case '>':
spec.align_ = ALIGN_RIGHT;
break;
case '=':
spec.align_ = ALIGN_NUMERIC;
break;
case '^':
spec.align_ = ALIGN_CENTER;
break;
}
if (spec.align_ != ALIGN_DEFAULT) {
if (p != s) {
if (c == '}') break;
if (c == '{')
FMT_THROW(FormatError("invalid fill character '{'"));
s += 2;
spec.fill_ = c;
} else ++s;
if (spec.align_ == ALIGN_NUMERIC)
require_numeric_argument(arg, '=');
break;
}
} while (--p >= s);
}
// Parse sign.
switch (*s) {
case '+':
check_sign(s, arg);
spec.flags_ |= SIGN_FLAG | PLUS_FLAG;
break;
case '-':
check_sign(s, arg);
spec.flags_ |= MINUS_FLAG;
break;
case ' ':
check_sign(s, arg);
spec.flags_ |= SIGN_FLAG;
break;
}
if (*s == '#') {
require_numeric_argument(arg, '#');
spec.flags_ |= HASH_FLAG;
++s;
}
// Parse zero flag.
if (*s == '0') {
require_numeric_argument(arg, '0');
spec.align_ = ALIGN_NUMERIC;
spec.fill_ = '0';
++s;
}
// Parse width.
if ('0' <= *s && *s <= '9') {
spec.width_ = internal::parse_nonnegative_int(s);
} else if (*s == '{') {
++s;
Arg width_arg = internal::is_name_start(*s) ?
parse_arg_name(s) : parse_arg_index(s);
if (*s++ != '}')
FMT_THROW(FormatError("invalid format string"));
ULongLong value = 0;
switch (width_arg.type) {
case Arg::INT:
if (width_arg.int_value < 0)
FMT_THROW(FormatError("negative width"));
value = width_arg.int_value;
break;
case Arg::UINT:
value = width_arg.uint_value;
break;
case Arg::LONG_LONG:
if (width_arg.long_long_value < 0)
FMT_THROW(FormatError("negative width"));
value = width_arg.long_long_value;
break;
case Arg::ULONG_LONG:
value = width_arg.ulong_long_value;
break;
default:
FMT_THROW(FormatError("width is not integer"));
}
unsigned max_int = (std::numeric_limits<int>::max)();
if (value > max_int)
FMT_THROW(FormatError("number is too big"));
spec.width_ = static_cast<int>(value);
}
// Parse precision.
if (*s == '.') {
++s;
spec.precision_ = 0;
if ('0' <= *s && *s <= '9') {
spec.precision_ = internal::parse_nonnegative_int(s);
} else if (*s == '{') {
++s;
Arg precision_arg = internal::is_name_start(*s) ?
parse_arg_name(s) : parse_arg_index(s);
if (*s++ != '}')
FMT_THROW(FormatError("invalid format string"));
ULongLong value = 0;
switch (precision_arg.type) {
case Arg::INT:
if (precision_arg.int_value < 0)
FMT_THROW(FormatError("negative precision"));
value = precision_arg.int_value;
break;
case Arg::UINT:
value = precision_arg.uint_value;
break;
case Arg::LONG_LONG:
if (precision_arg.long_long_value < 0)
FMT_THROW(FormatError("negative precision"));
value = precision_arg.long_long_value;
break;
case Arg::ULONG_LONG:
value = precision_arg.ulong_long_value;
break;
default:
FMT_THROW(FormatError("precision is not integer"));
}
unsigned max_int = (std::numeric_limits<int>::max)();
if (value > max_int)
FMT_THROW(FormatError("number is too big"));
spec.precision_ = static_cast<int>(value);
} else {
FMT_THROW(FormatError("missing precision specifier"));
}
if (arg.type <= Arg::LAST_INTEGER_TYPE || arg.type == Arg::POINTER) {
FMT_THROW(FormatError(
fmt::format("precision not allowed in {} format specifier",
arg.type == Arg::POINTER ? "pointer" : "integer")));
}
}
// Parse type.
if (*s != '}' && *s)
spec.type_ = static_cast<char>(*s++);
}
if (*s++ != '}')
FMT_THROW(FormatError("missing '}' in format string"));
// Format argument.
ArgFormatter(*this, spec, s - 1).visit(arg);
return s;
}
template <typename Char, typename AF>
void BasicFormatter<Char, AF>::format(BasicCStringRef<Char> format_str) {
const Char *s = format_str.c_str();
const Char *start = s;
while (*s) {
Char c = *s++;
if (c != '{' && c != '}') continue;
if (*s == c) {
write(writer_, start, s);
start = ++s;
continue;
}
if (c == '}')
FMT_THROW(FormatError("unmatched '}' in format string"));
write(writer_, start, s - 1);
internal::Arg arg = internal::is_name_start(*s) ?
parse_arg_name(s) : parse_arg_index(s);
start = s = format(s, arg);
}
write(writer_, start, s);
}
template <typename Char, typename It>
struct ArgJoin {
It first;
It last;
BasicCStringRef<Char> sep;
ArgJoin(It first, It last, const BasicCStringRef<Char>& sep) :
first(first),
last(last),
sep(sep) {}
};
template <typename It>
ArgJoin<char, It> join(It first, It last, const BasicCStringRef<char>& sep) {
return ArgJoin<char, It>(first, last, sep);
}
template <typename It>
ArgJoin<wchar_t, It> join(It first, It last, const BasicCStringRef<wchar_t>& sep) {
return ArgJoin<wchar_t, It>(first, last, sep);
}
#if FMT_HAS_GXX_CXX11
template <typename Range>
auto join(const Range& range, const BasicCStringRef<char>& sep)
-> ArgJoin<char, decltype(std::begin(range))> {
return join(std::begin(range), std::end(range), sep);
}
template <typename Range>
auto join(const Range& range, const BasicCStringRef<wchar_t>& sep)
-> ArgJoin<wchar_t, decltype(std::begin(range))> {
return join(std::begin(range), std::end(range), sep);
}
#endif
template <typename ArgFormatter, typename Char, typename It>
void format_arg(fmt::BasicFormatter<Char, ArgFormatter> &f,
const Char *&format_str, const ArgJoin<Char, It>& e) {
const Char* end = format_str;
int brace_level = 1;
while (*end) {
if (*end == '}' && --brace_level == 0)
break;
if (*end == '{')
++brace_level;
++end;
}
if (*end != '}')
FMT_THROW(FormatError("missing '}' in format string"));
It it = e.first;
if (it != e.last) {
const Char* save = format_str;
f.format(format_str, internal::MakeArg<fmt::BasicFormatter<Char, ArgFormatter> >(*it++));
while (it != e.last) {
f.writer().write(e.sep);
format_str = save;
f.format(format_str, internal::MakeArg<fmt::BasicFormatter<Char, ArgFormatter> >(*it++));
}
}
format_str = end + 1;
}
} // namespace fmt
#if FMT_USE_USER_DEFINED_LITERALS
namespace fmt {
namespace internal {
template <typename Char>
struct UdlFormat {
const Char *str;
template <typename... Args>
auto operator()(Args && ... args) const
-> decltype(format(str, std::forward<Args>(args)...)) {
return format(str, std::forward<Args>(args)...);
}
};
template <typename Char>
struct UdlArg {
const Char *str;
template <typename T>
NamedArgWithType<Char, T> operator=(T &&value) const {
return {str, std::forward<T>(value)};
}
};
} // namespace internal
inline namespace literals {
/**
\rst
C++11 literal equivalent of :func:`fmt::format`.
**Example**::
using namespace fmt::literals;
std::string message = "The answer is {}"_format(42);
\endrst
*/
inline internal::UdlFormat<char>
operator"" _format(const char *s, std::size_t) { return {s}; }
inline internal::UdlFormat<wchar_t>
operator"" _format(const wchar_t *s, std::size_t) { return {s}; }
/**
\rst
C++11 literal equivalent of :func:`fmt::arg`.
**Example**::
using namespace fmt::literals;
print("Elapsed time: {s:.2f} seconds", "s"_a=1.23);
\endrst
*/
inline internal::UdlArg<char>
operator"" _a(const char *s, std::size_t) { return {s}; }
inline internal::UdlArg<wchar_t>
operator"" _a(const wchar_t *s, std::size_t) { return {s}; }
} // inline namespace literals
} // namespace fmt
#endif // FMT_USE_USER_DEFINED_LITERALS
// Restore warnings.
#if FMT_GCC_VERSION >= 406
# pragma GCC diagnostic pop
#endif
#if defined(__clang__) && !defined(FMT_ICC_VERSION)
# pragma clang diagnostic pop
#endif
#ifdef FMT_HEADER_ONLY
# define FMT_FUNC inline
# include "format.cc"
#else
# define FMT_FUNC
#endif
#endif // FMT_FORMAT_H_
|
/*
Formatting library for C++
Copyright (c) 2012 - 2016, Victor Zverovich
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
// transition helper
#ifdef FMT_FORMAT_PROVIDE_PRINTF
#include "printf.h"
#endif
#ifndef FMT_FORMAT_H_
#define FMT_FORMAT_H_
#define FMT_INCLUDE
#include <cassert>
#include <clocale>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <limits>
#include <memory>
#include <stdexcept>
#include <string>
#include <vector>
#include <utility> // for std::pair
#undef FMT_INCLUDE
// The fmt library version in the form major * 10000 + minor * 100 + patch.
#define FMT_VERSION 40101
#if defined(__has_include)
# define FMT_HAS_INCLUDE(x) __has_include(x)
#else
# define FMT_HAS_INCLUDE(x) 0
#endif
#if (FMT_HAS_INCLUDE(<string_view>) && __cplusplus > 201402L) || \
(defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910)
# include <string_view>
# define FMT_HAS_STRING_VIEW 1
# define FMT_HAS_EXPERIMENTAL_STRING_VIEW 0
#else
# define FMT_HAS_STRING_VIEW 0
# if (FMT_HAS_INCLUDE(<experimental/string_view>) && __cplusplus >= 201402L)
# include <experimental/string_view>
# define FMT_HAS_EXPERIMENTAL_STRING_VIEW 1
# else
# define FMT_HAS_EXPERIMENTAL_STRING_VIEW 0
# endif
#endif
#if defined _SECURE_SCL && _SECURE_SCL
# define FMT_SECURE_SCL _SECURE_SCL
#else
# define FMT_SECURE_SCL 0
#endif
#if FMT_SECURE_SCL
# include <iterator>
#endif
#ifdef _MSC_VER
# define FMT_MSC_VER _MSC_VER
#else
# define FMT_MSC_VER 0
#endif
#if FMT_MSC_VER && FMT_MSC_VER <= 1500
typedef unsigned __int32 uint32_t;
typedef unsigned __int64 uint64_t;
typedef __int64 intmax_t;
#else
#include <stdint.h>
#endif
#if !defined(FMT_HEADER_ONLY) && defined(_WIN32)
# ifdef FMT_EXPORT
# define FMT_API __declspec(dllexport)
# elif defined(FMT_SHARED)
# define FMT_API __declspec(dllimport)
# endif
#endif
#ifndef FMT_API
# define FMT_API
#endif
#ifdef __GNUC__
# define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
# define FMT_GCC_EXTENSION __extension__
# if FMT_GCC_VERSION >= 406
# pragma GCC diagnostic push
// Disable the warning about "long long" which is sometimes reported even
// when using __extension__.
# pragma GCC diagnostic ignored "-Wlong-long"
// Disable the warning about declaration shadowing because it affects too
// many valid cases.
# pragma GCC diagnostic ignored "-Wshadow"
// Disable the warning about implicit conversions that may change the sign of
// an integer; silencing it otherwise would require many explicit casts.
# pragma GCC diagnostic ignored "-Wsign-conversion"
# endif
# if __cplusplus >= 201103L || defined __GXX_EXPERIMENTAL_CXX0X__
# define FMT_HAS_GXX_CXX11 1
# endif
#else
# define FMT_GCC_VERSION 0
# define FMT_GCC_EXTENSION
# define FMT_HAS_GXX_CXX11 0
#endif
#if defined(__INTEL_COMPILER)
# define FMT_ICC_VERSION __INTEL_COMPILER
#elif defined(__ICL)
# define FMT_ICC_VERSION __ICL
#endif
#if defined(__clang__) && !defined(FMT_ICC_VERSION)
# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wdocumentation-unknown-command"
# pragma clang diagnostic ignored "-Wpadded"
#endif
#ifdef __GNUC_LIBSTD__
# define FMT_GNUC_LIBSTD_VERSION (__GNUC_LIBSTD__ * 100 + __GNUC_LIBSTD_MINOR__)
#endif
#ifdef __has_feature
# define FMT_HAS_FEATURE(x) __has_feature(x)
#else
# define FMT_HAS_FEATURE(x) 0
#endif
#ifdef __has_builtin
# define FMT_HAS_BUILTIN(x) __has_builtin(x)
#else
# define FMT_HAS_BUILTIN(x) 0
#endif
#ifdef __has_cpp_attribute
# define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
#else
# define FMT_HAS_CPP_ATTRIBUTE(x) 0
#endif
#if FMT_HAS_CPP_ATTRIBUTE(maybe_unused)
# define FMT_HAS_CXX17_ATTRIBUTE_MAYBE_UNUSED
// VC++ 1910 support /std: option and that will set _MSVC_LANG macro
// Clang with Microsoft CodeGen doesn't define _MSVC_LANG macro
#elif defined(_MSVC_LANG) && _MSVC_LANG > 201402 && _MSC_VER >= 1910
# define FMT_HAS_CXX17_ATTRIBUTE_MAYBE_UNUSED
#endif
#ifdef FMT_HAS_CXX17_ATTRIBUTE_MAYBE_UNUSED
# define FMT_MAYBE_UNUSED [[maybe_unused]]
// g++/clang++ also support [[gnu::unused]]. However, we don't use it.
#elif defined(__GNUC__)
# define FMT_MAYBE_UNUSED __attribute__((unused))
#else
# define FMT_MAYBE_UNUSED
#endif
// Use the compiler's attribute noreturn
#if defined(__MINGW32__) || defined(__MINGW64__)
# define FMT_NORETURN __attribute__((noreturn))
#elif FMT_HAS_CPP_ATTRIBUTE(noreturn) && __cplusplus >= 201103L
# define FMT_NORETURN [[noreturn]]
#else
# define FMT_NORETURN
#endif
#ifndef FMT_USE_VARIADIC_TEMPLATES
// Variadic templates are available in GCC since version 4.4
// (http://gcc.gnu.org/projects/cxx0x.html) and in Visual C++
// since version 2013.
# define FMT_USE_VARIADIC_TEMPLATES \
(FMT_HAS_FEATURE(cxx_variadic_templates) || \
(FMT_GCC_VERSION >= 404 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1800)
#endif
#ifndef FMT_USE_RVALUE_REFERENCES
// Don't use rvalue references when compiling with clang and an old libstdc++
// as the latter doesn't provide std::move.
# if defined(FMT_GNUC_LIBSTD_VERSION) && FMT_GNUC_LIBSTD_VERSION <= 402
# define FMT_USE_RVALUE_REFERENCES 0
# else
# define FMT_USE_RVALUE_REFERENCES \
(FMT_HAS_FEATURE(cxx_rvalue_references) || \
(FMT_GCC_VERSION >= 403 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1600)
# endif
#endif
#if __cplusplus >= 201103L || FMT_MSC_VER >= 1700
# define FMT_USE_ALLOCATOR_TRAITS 1
#else
# define FMT_USE_ALLOCATOR_TRAITS 0
#endif
// Check if exceptions are disabled.
#if defined(__GNUC__) && !defined(__EXCEPTIONS)
# define FMT_EXCEPTIONS 0
#endif
#if FMT_MSC_VER && !_HAS_EXCEPTIONS
# define FMT_EXCEPTIONS 0
#endif
#ifndef FMT_EXCEPTIONS
# define FMT_EXCEPTIONS 1
#endif
#ifndef FMT_THROW
# if FMT_EXCEPTIONS
# define FMT_THROW(x) throw x
# else
# define FMT_THROW(x) assert(false)
# endif
#endif
// Define FMT_USE_NOEXCEPT to make fmt use noexcept (C++11 feature).
#ifndef FMT_USE_NOEXCEPT
# define FMT_USE_NOEXCEPT 0
#endif
#if FMT_USE_NOEXCEPT || FMT_HAS_FEATURE(cxx_noexcept) || \
(FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \
FMT_MSC_VER >= 1900
# define FMT_DETECTED_NOEXCEPT noexcept
#else
# define FMT_DETECTED_NOEXCEPT throw()
#endif
#ifndef FMT_NOEXCEPT
# if FMT_EXCEPTIONS
# define FMT_NOEXCEPT FMT_DETECTED_NOEXCEPT
# else
# define FMT_NOEXCEPT
# endif
#endif
// This is needed because GCC still uses throw() in its headers when exceptions
// are disabled.
#if FMT_GCC_VERSION
# define FMT_DTOR_NOEXCEPT FMT_DETECTED_NOEXCEPT
#else
# define FMT_DTOR_NOEXCEPT FMT_NOEXCEPT
#endif
#ifndef FMT_OVERRIDE
# if (defined(FMT_USE_OVERRIDE) && FMT_USE_OVERRIDE) || FMT_HAS_FEATURE(cxx_override) || \
(FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \
FMT_MSC_VER >= 1900
# define FMT_OVERRIDE override
# else
# define FMT_OVERRIDE
# endif
#endif
#ifndef FMT_NULL
# if FMT_HAS_FEATURE(cxx_nullptr) || \
(FMT_GCC_VERSION >= 408 && FMT_HAS_GXX_CXX11) || \
FMT_MSC_VER >= 1600
# define FMT_NULL nullptr
# else
# define FMT_NULL NULL
# endif
#endif
// A macro to disallow the copy constructor and operator= functions
// This should be used in the private: declarations for a class
#ifndef FMT_USE_DELETED_FUNCTIONS
# define FMT_USE_DELETED_FUNCTIONS 0
#endif
#if FMT_USE_DELETED_FUNCTIONS || FMT_HAS_FEATURE(cxx_deleted_functions) || \
(FMT_GCC_VERSION >= 404 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1800
# define FMT_DELETED_OR_UNDEFINED = delete
# define FMT_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&) = delete; \
TypeName& operator=(const TypeName&) = delete
#else
# define FMT_DELETED_OR_UNDEFINED
# define FMT_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
TypeName& operator=(const TypeName&)
#endif
#ifndef FMT_USE_DEFAULTED_FUNCTIONS
# define FMT_USE_DEFAULTED_FUNCTIONS 0
#endif
#ifndef FMT_DEFAULTED_COPY_CTOR
# if FMT_USE_DEFAULTED_FUNCTIONS || FMT_HAS_FEATURE(cxx_defaulted_functions) || \
(FMT_GCC_VERSION >= 404 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1800
# define FMT_DEFAULTED_COPY_CTOR(TypeName) \
TypeName(const TypeName&) = default;
# else
# define FMT_DEFAULTED_COPY_CTOR(TypeName)
# endif
#endif
#ifndef FMT_USE_USER_DEFINED_LITERALS
// All compilers which support UDLs also support variadic templates. This
// makes the fmt::literals implementation easier. However, an explicit check
// for variadic templates is added here just in case.
// For Intel's compiler both it and the system gcc/msc must support UDLs.
# if FMT_USE_VARIADIC_TEMPLATES && FMT_USE_RVALUE_REFERENCES && \
(FMT_HAS_FEATURE(cxx_user_literals) || \
(FMT_GCC_VERSION >= 407 && FMT_HAS_GXX_CXX11) || FMT_MSC_VER >= 1900) && \
(!defined(FMT_ICC_VERSION) || FMT_ICC_VERSION >= 1500)
# define FMT_USE_USER_DEFINED_LITERALS 1
# else
# define FMT_USE_USER_DEFINED_LITERALS 0
# endif
#endif
#ifndef FMT_USE_EXTERN_TEMPLATES
# define FMT_USE_EXTERN_TEMPLATES \
(FMT_CLANG_VERSION >= 209 || (FMT_GCC_VERSION >= 303 && FMT_HAS_GXX_CXX11))
#endif
#ifdef FMT_HEADER_ONLY
// If header only do not use extern templates.
# undef FMT_USE_EXTERN_TEMPLATES
# define FMT_USE_EXTERN_TEMPLATES 0
#endif
#ifndef FMT_ASSERT
# define FMT_ASSERT(condition, message) assert((condition) && message)
#endif
// __builtin_clz is broken in clang with Microsoft CodeGen:
// https://github.com/fmtlib/fmt/issues/519
#ifndef _MSC_VER
# if FMT_GCC_VERSION >= 400 || FMT_HAS_BUILTIN(__builtin_clz)
# define FMT_BUILTIN_CLZ(n) __builtin_clz(n)
# endif
# if FMT_GCC_VERSION >= 400 || FMT_HAS_BUILTIN(__builtin_clzll)
# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n)
# endif
#endif
// Some compilers masquerade as both MSVC and GCC-likes or
// otherwise support __builtin_clz and __builtin_clzll, so
// only define FMT_BUILTIN_CLZ using the MSVC intrinsics
// if the clz and clzll builtins are not available.
#if FMT_MSC_VER && !defined(FMT_BUILTIN_CLZLL) && !defined(_MANAGED)
# include <intrin.h> // _BitScanReverse, _BitScanReverse64
namespace fmt {
namespace internal {
// avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning
# ifndef __clang__
# pragma intrinsic(_BitScanReverse)
# endif
inline uint32_t clz(uint32_t x) {
unsigned long r = 0;
_BitScanReverse(&r, x);
assert(x != 0);
// Static analysis complains about using uninitialized data
// "r", but the only way that can happen is if "x" is 0,
// which the callers guarantee to not happen.
# pragma warning(suppress: 6102)
return 31 - r;
}
# define FMT_BUILTIN_CLZ(n) fmt::internal::clz(n)
// avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning
# if defined(_WIN64) && !defined(__clang__)
# pragma intrinsic(_BitScanReverse64)
# endif
inline uint32_t clzll(uint64_t x) {
unsigned long r = 0;
# ifdef _WIN64
_BitScanReverse64(&r, x);
# else
// Scan the high 32 bits.
if (_BitScanReverse(&r, static_cast<uint32_t>(x >> 32)))
return 63 - (r + 32);
// Scan the low 32 bits.
_BitScanReverse(&r, static_cast<uint32_t>(x));
# endif
assert(x != 0);
// Static analysis complains about using uninitialized data
// "r", but the only way that can happen is if "x" is 0,
// which the callers guarantee to not happen.
# pragma warning(suppress: 6102)
return 63 - r;
}
# define FMT_BUILTIN_CLZLL(n) fmt::internal::clzll(n)
}
}
#endif
namespace fmt {
namespace internal {
struct DummyInt {
int data[2];
operator int() const { return 0; }
};
typedef std::numeric_limits<fmt::internal::DummyInt> FPUtil;
// Dummy implementations of system functions such as signbit and ecvt called
// if the latter are not available.
inline DummyInt signbit(...) { return DummyInt(); }
inline DummyInt _ecvt_s(...) { return DummyInt(); }
inline DummyInt isinf(...) { return DummyInt(); }
inline DummyInt _finite(...) { return DummyInt(); }
inline DummyInt isnan(...) { return DummyInt(); }
inline DummyInt _isnan(...) { return DummyInt(); }
// A helper function to suppress bogus "conditional expression is constant"
// warnings.
template <typename T>
inline T const_check(T value) { return value; }
}
} // namespace fmt
namespace std {
// Standard permits specialization of std::numeric_limits. This specialization
// is used to resolve ambiguity between isinf and std::isinf in glibc:
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48891
// and the same for isnan and signbit.
template <>
class numeric_limits<fmt::internal::DummyInt> :
public std::numeric_limits<int> {
public:
// Portable version of isinf.
template <typename T>
static bool isinfinity(T x) {
using namespace fmt::internal;
// The resolution "priority" is:
// isinf macro > std::isinf > ::isinf > fmt::internal::isinf
if (const_check(sizeof(isinf(x)) != sizeof(fmt::internal::DummyInt))) {
return isinf(x) != 0;
}
return !_finite(static_cast<double>(x));
}
// Portable version of isnan.
template <typename T>
static bool isnotanumber(T x) {
using namespace fmt::internal;
if (const_check(sizeof(isnan(x)) != sizeof(fmt::internal::DummyInt))) {
return isnan(x) != 0;
}
return _isnan(static_cast<double>(x)) != 0;
}
// Portable version of signbit.
static bool isnegative(double x) {
using namespace fmt::internal;
if (const_check(sizeof(signbit(x)) != sizeof(fmt::internal::DummyInt))) {
return signbit(x) != 0;
}
if (x < 0) return true;
if (!isnotanumber(x)) return false;
int dec = 0, sign = 0;
char buffer[2]; // The buffer size must be >= 2 or _ecvt_s will fail.
_ecvt_s(buffer, sizeof(buffer), x, 0, &dec, &sign);
return sign != 0;
}
};
} // namespace std
namespace fmt {
// Fix the warning about long long on older versions of GCC
// that don't support the diagnostic pragma.
FMT_GCC_EXTENSION typedef long long LongLong;
FMT_GCC_EXTENSION typedef unsigned long long ULongLong;
#if FMT_USE_RVALUE_REFERENCES
using std::move;
#endif
template <typename Char>
class BasicWriter;
typedef BasicWriter<char> Writer;
typedef BasicWriter<wchar_t> WWriter;
template <typename Char>
class ArgFormatter;
struct FormatSpec;
template <typename Impl, typename Char, typename Spec = fmt::FormatSpec>
class BasicPrintfArgFormatter;
template <typename CharType,
typename ArgFormatter = fmt::ArgFormatter<CharType> >
class BasicFormatter;
/**
\rst
A string reference. It can be constructed from a C string or
``std::basic_string``.
You can use one of the following typedefs for common character types:
+------------+-------------------------+
| Type | Definition |
+============+=========================+
| StringRef | BasicStringRef<char> |
+------------+-------------------------+
| WStringRef | BasicStringRef<wchar_t> |
+------------+-------------------------+
This class is most useful as a parameter type to allow passing
different types of strings to a function, for example::
template <typename... Args>
std::string format(StringRef format_str, const Args & ... args);
format("{}", 42);
format(std::string("{}"), 42);
\endrst
*/
template <typename Char>
class BasicStringRef {
private:
const Char *data_;
std::size_t size_;
public:
/** Constructs a string reference object from a C string and a size. */
BasicStringRef(const Char *s, std::size_t size) : data_(s), size_(size) {}
/**
\rst
Constructs a string reference object from a C string computing
the size with ``std::char_traits<Char>::length``.
\endrst
*/
BasicStringRef(const Char *s)
: data_(s), size_(std::char_traits<Char>::length(s)) {}
/**
\rst
Constructs a string reference from a ``std::basic_string`` object.
\endrst
*/
template <typename Allocator>
BasicStringRef(
const std::basic_string<Char, std::char_traits<Char>, Allocator> &s)
: data_(s.c_str()), size_(s.size()) {}
#if FMT_HAS_STRING_VIEW
/**
\rst
Constructs a string reference from a ``std::basic_string_view`` object.
\endrst
*/
BasicStringRef(
const std::basic_string_view<Char, std::char_traits<Char>> &s)
: data_(s.data()), size_(s.size()) {}
/**
\rst
Converts a string reference to an ``std::string_view`` object.
\endrst
*/
explicit operator std::basic_string_view<Char>() const FMT_NOEXCEPT {
return std::basic_string_view<Char>(data_, size_);
}
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
/**
\rst
Constructs a string reference from a ``std::experimental::basic_string_view`` object.
\endrst
*/
BasicStringRef(
const std::experimental::basic_string_view<Char, std::char_traits<Char>> &s)
: data_(s.data()), size_(s.size()) {}
/**
\rst
Converts a string reference to an ``std::string_view`` object.
\endrst
*/
explicit operator std::experimental::basic_string_view<Char>() const FMT_NOEXCEPT {
return std::experimental::basic_string_view<Char>(data_, size_);
}
#endif
/**
\rst
Converts a string reference to an ``std::string`` object.
\endrst
*/
std::basic_string<Char> to_string() const {
return std::basic_string<Char>(data_, size_);
}
/** Returns a pointer to the string data. */
const Char *data() const { return data_; }
/** Returns the string size. */
std::size_t size() const { return size_; }
// Lexicographically compare this string reference to other.
int compare(BasicStringRef other) const {
std::size_t size = size_ < other.size_ ? size_ : other.size_;
int result = std::char_traits<Char>::compare(data_, other.data_, size);
if (result == 0)
result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1);
return result;
}
friend bool operator==(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) == 0;
}
friend bool operator!=(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) != 0;
}
friend bool operator<(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) < 0;
}
friend bool operator<=(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) <= 0;
}
friend bool operator>(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) > 0;
}
friend bool operator>=(BasicStringRef lhs, BasicStringRef rhs) {
return lhs.compare(rhs) >= 0;
}
};
typedef BasicStringRef<char> StringRef;
typedef BasicStringRef<wchar_t> WStringRef;
/**
\rst
A reference to a null terminated string. It can be constructed from a C
string or ``std::basic_string``.
You can use one of the following typedefs for common character types:
+-------------+--------------------------+
| Type | Definition |
+=============+==========================+
| CStringRef | BasicCStringRef<char> |
+-------------+--------------------------+
| WCStringRef | BasicCStringRef<wchar_t> |
+-------------+--------------------------+
This class is most useful as a parameter type to allow passing
different types of strings to a function, for example::
template <typename... Args>
std::string format(CStringRef format_str, const Args & ... args);
format("{}", 42);
format(std::string("{}"), 42);
\endrst
*/
template <typename Char>
class BasicCStringRef {
private:
const Char *data_;
public:
/** Constructs a string reference object from a C string. */
BasicCStringRef(const Char *s) : data_(s) {}
/**
\rst
Constructs a string reference from a ``std::basic_string`` object.
\endrst
*/
template <typename Allocator>
BasicCStringRef(
const std::basic_string<Char, std::char_traits<Char>, Allocator> &s)
: data_(s.c_str()) {}
/** Returns the pointer to a C string. */
const Char *c_str() const { return data_; }
};
typedef BasicCStringRef<char> CStringRef;
typedef BasicCStringRef<wchar_t> WCStringRef;
/** A formatting error such as invalid format string. */
class FormatError : public std::runtime_error {
public:
explicit FormatError(CStringRef message)
: std::runtime_error(message.c_str()) {}
FormatError(const FormatError &ferr) : std::runtime_error(ferr) {}
FMT_API ~FormatError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE;
};
namespace internal {
// MakeUnsigned<T>::Type gives an unsigned type corresponding to integer type T.
template <typename T>
struct MakeUnsigned { typedef T Type; };
#define FMT_SPECIALIZE_MAKE_UNSIGNED(T, U) \
template <> \
struct MakeUnsigned<T> { typedef U Type; }
FMT_SPECIALIZE_MAKE_UNSIGNED(char, unsigned char);
FMT_SPECIALIZE_MAKE_UNSIGNED(signed char, unsigned char);
FMT_SPECIALIZE_MAKE_UNSIGNED(short, unsigned short);
FMT_SPECIALIZE_MAKE_UNSIGNED(int, unsigned);
FMT_SPECIALIZE_MAKE_UNSIGNED(long, unsigned long);
FMT_SPECIALIZE_MAKE_UNSIGNED(LongLong, ULongLong);
// Casts nonnegative integer to unsigned.
template <typename Int>
inline typename MakeUnsigned<Int>::Type to_unsigned(Int value) {
FMT_ASSERT(value >= 0, "negative value");
return static_cast<typename MakeUnsigned<Int>::Type>(value);
}
// The number of characters to store in the MemoryBuffer object itself
// to avoid dynamic memory allocation.
enum { INLINE_BUFFER_SIZE = 500 };
#if FMT_SECURE_SCL
// Use checked iterator to avoid warnings on MSVC.
template <typename T>
inline stdext::checked_array_iterator<T*> make_ptr(T *ptr, std::size_t size) {
return stdext::checked_array_iterator<T*>(ptr, size);
}
#else
template <typename T>
inline T *make_ptr(T *ptr, std::size_t) { return ptr; }
#endif
} // namespace internal
/**
\rst
A buffer supporting a subset of ``std::vector``'s operations.
\endrst
*/
template <typename T>
class Buffer {
private:
FMT_DISALLOW_COPY_AND_ASSIGN(Buffer);
protected:
T *ptr_;
std::size_t size_;
std::size_t capacity_;
Buffer(T *ptr = FMT_NULL, std::size_t capacity = 0)
: ptr_(ptr), size_(0), capacity_(capacity) {}
/**
\rst
Increases the buffer capacity to hold at least *size* elements updating
``ptr_`` and ``capacity_``.
\endrst
*/
virtual void grow(std::size_t size) = 0;
public:
virtual ~Buffer() {}
/** Returns the size of this buffer. */
std::size_t size() const { return size_; }
/** Returns the capacity of this buffer. */
std::size_t capacity() const { return capacity_; }
/**
Resizes the buffer. If T is a POD type new elements may not be initialized.
*/
void resize(std::size_t new_size) {
if (new_size > capacity_)
grow(new_size);
size_ = new_size;
}
/**
\rst
Reserves space to store at least *capacity* elements.
\endrst
*/
void reserve(std::size_t capacity) {
if (capacity > capacity_)
grow(capacity);
}
void clear() FMT_NOEXCEPT { size_ = 0; }
void push_back(const T &value) {
if (size_ == capacity_)
grow(size_ + 1);
ptr_[size_++] = value;
}
/** Appends data to the end of the buffer. */
template <typename U>
void append(const U *begin, const U *end);
T &operator[](std::size_t index) { return ptr_[index]; }
const T &operator[](std::size_t index) const { return ptr_[index]; }
};
template <typename T>
template <typename U>
void Buffer<T>::append(const U *begin, const U *end) {
FMT_ASSERT(end >= begin, "negative value");
std::size_t new_size = size_ + static_cast<std::size_t>(end - begin);
if (new_size > capacity_)
grow(new_size);
std::uninitialized_copy(begin, end,
internal::make_ptr(ptr_, capacity_) + size_);
size_ = new_size;
}
namespace internal {
// A memory buffer for trivially copyable/constructible types with the first
// SIZE elements stored in the object itself.
template <typename T, std::size_t SIZE, typename Allocator = std::allocator<T> >
class MemoryBuffer : private Allocator, public Buffer<T> {
private:
T data_[SIZE];
// Deallocate memory allocated by the buffer.
void deallocate() {
if (this->ptr_ != data_) Allocator::deallocate(this->ptr_, this->capacity_);
}
protected:
void grow(std::size_t size) FMT_OVERRIDE;
public:
explicit MemoryBuffer(const Allocator &alloc = Allocator())
: Allocator(alloc), Buffer<T>(data_, SIZE) {}
~MemoryBuffer() FMT_OVERRIDE { deallocate(); }
#if FMT_USE_RVALUE_REFERENCES
private:
// Move data from other to this buffer.
void move(MemoryBuffer &other) {
Allocator &this_alloc = *this, &other_alloc = other;
this_alloc = std::move(other_alloc);
this->size_ = other.size_;
this->capacity_ = other.capacity_;
if (other.ptr_ == other.data_) {
this->ptr_ = data_;
std::uninitialized_copy(other.data_, other.data_ + this->size_,
make_ptr(data_, this->capacity_));
} else {
this->ptr_ = other.ptr_;
// Set pointer to the inline array so that delete is not called
// when deallocating.
other.ptr_ = other.data_;
}
}
public:
MemoryBuffer(MemoryBuffer &&other) {
move(other);
}
MemoryBuffer &operator=(MemoryBuffer &&other) {
assert(this != &other);
deallocate();
move(other);
return *this;
}
#endif
// Returns a copy of the allocator associated with this buffer.
Allocator get_allocator() const { return *this; }
};
template <typename T, std::size_t SIZE, typename Allocator>
void MemoryBuffer<T, SIZE, Allocator>::grow(std::size_t size) {
std::size_t new_capacity = this->capacity_ + this->capacity_ / 2;
if (size > new_capacity)
new_capacity = size;
#if FMT_USE_ALLOCATOR_TRAITS
T *new_ptr =
std::allocator_traits<Allocator>::allocate(*this, new_capacity, FMT_NULL);
#else
T *new_ptr = this->allocate(new_capacity, FMT_NULL);
#endif
// The following code doesn't throw, so the raw pointer above doesn't leak.
std::uninitialized_copy(this->ptr_, this->ptr_ + this->size_,
make_ptr(new_ptr, new_capacity));
std::size_t old_capacity = this->capacity_;
T *old_ptr = this->ptr_;
this->capacity_ = new_capacity;
this->ptr_ = new_ptr;
// deallocate may throw (at least in principle), but it doesn't matter since
// the buffer already uses the new storage and will deallocate it in case
// of exception.
if (old_ptr != data_)
Allocator::deallocate(old_ptr, old_capacity);
}
// A fixed-size buffer.
template <typename Char>
class FixedBuffer : public fmt::Buffer<Char> {
public:
FixedBuffer(Char *array, std::size_t size) : fmt::Buffer<Char>(array, size) {}
protected:
FMT_API void grow(std::size_t size) FMT_OVERRIDE;
};
template <typename Char>
class BasicCharTraits {
public:
#if FMT_SECURE_SCL
typedef stdext::checked_array_iterator<Char*> CharPtr;
#else
typedef Char *CharPtr;
#endif
static Char cast(int value) { return static_cast<Char>(value); }
};
template <typename Char>
class CharTraits;
template <>
class CharTraits<char> : public BasicCharTraits<char> {
private:
// Conversion from wchar_t to char is not allowed.
static char convert(wchar_t);
public:
static char convert(char value) { return value; }
// Formats a floating-point number.
template <typename T>
FMT_API static int format_float(char *buffer, std::size_t size,
const char *format, unsigned width, int precision, T value);
};
#if FMT_USE_EXTERN_TEMPLATES
extern template int CharTraits<char>::format_float<double>
(char *buffer, std::size_t size,
const char* format, unsigned width, int precision, double value);
extern template int CharTraits<char>::format_float<long double>
(char *buffer, std::size_t size,
const char* format, unsigned width, int precision, long double value);
#endif
template <>
class CharTraits<wchar_t> : public BasicCharTraits<wchar_t> {
public:
static wchar_t convert(char value) { return value; }
static wchar_t convert(wchar_t value) { return value; }
template <typename T>
FMT_API static int format_float(wchar_t *buffer, std::size_t size,
const wchar_t *format, unsigned width, int precision, T value);
};
#if FMT_USE_EXTERN_TEMPLATES
extern template int CharTraits<wchar_t>::format_float<double>
(wchar_t *buffer, std::size_t size,
const wchar_t* format, unsigned width, int precision, double value);
extern template int CharTraits<wchar_t>::format_float<long double>
(wchar_t *buffer, std::size_t size,
const wchar_t* format, unsigned width, int precision, long double value);
#endif
// Checks if a number is negative - used to avoid warnings.
template <bool IsSigned>
struct SignChecker {
template <typename T>
static bool is_negative(T value) { return value < 0; }
};
template <>
struct SignChecker<false> {
template <typename T>
static bool is_negative(T) { return false; }
};
// Returns true if value is negative, false otherwise.
// Same as (value < 0) but doesn't produce warnings if T is an unsigned type.
template <typename T>
inline bool is_negative(T value) {
return SignChecker<std::numeric_limits<T>::is_signed>::is_negative(value);
}
// Selects uint32_t if FitsIn32Bits is true, uint64_t otherwise.
template <bool FitsIn32Bits>
struct TypeSelector { typedef uint32_t Type; };
template <>
struct TypeSelector<false> { typedef uint64_t Type; };
template <typename T>
struct IntTraits {
// Smallest of uint32_t and uint64_t that is large enough to represent
// all values of T.
typedef typename
TypeSelector<std::numeric_limits<T>::digits <= 32>::Type MainType;
};
FMT_API FMT_NORETURN void report_unknown_type(char code, const char *type);
// Static data is placed in this class template to allow header-only
// configuration.
template <typename T = void>
struct FMT_API BasicData {
static const uint32_t POWERS_OF_10_32[];
static const uint64_t POWERS_OF_10_64[];
static const char DIGITS[];
};
#if FMT_USE_EXTERN_TEMPLATES
extern template struct BasicData<void>;
#endif
typedef BasicData<> Data;
#ifdef FMT_BUILTIN_CLZLL
// Returns the number of decimal digits in n. Leading zeros are not counted
// except for n == 0 in which case count_digits returns 1.
inline unsigned count_digits(uint64_t n) {
// Based on http://graphics.stanford.edu/~seander/bithacks.html#IntegerLog10
// and the benchmark https://github.com/localvoid/cxx-benchmark-count-digits.
int t = (64 - FMT_BUILTIN_CLZLL(n | 1)) * 1233 >> 12;
return to_unsigned(t) - (n < Data::POWERS_OF_10_64[t]) + 1;
}
#else
// Fallback version of count_digits used when __builtin_clz is not available.
inline unsigned count_digits(uint64_t n) {
unsigned count = 1;
for (;;) {
// Integer division is slow so do it for a group of four digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
if (n < 10) return count;
if (n < 100) return count + 1;
if (n < 1000) return count + 2;
if (n < 10000) return count + 3;
n /= 10000u;
count += 4;
}
}
#endif
#ifdef FMT_BUILTIN_CLZ
// Optional version of count_digits for better performance on 32-bit platforms.
inline unsigned count_digits(uint32_t n) {
int t = (32 - FMT_BUILTIN_CLZ(n | 1)) * 1233 >> 12;
return to_unsigned(t) - (n < Data::POWERS_OF_10_32[t]) + 1;
}
#endif
// A functor that doesn't add a thousands separator.
struct NoThousandsSep {
template <typename Char>
void operator()(Char *) {}
};
// A functor that adds a thousands separator.
class ThousandsSep {
private:
fmt::StringRef sep_;
// Index of a decimal digit with the least significant digit having index 0.
unsigned digit_index_;
public:
explicit ThousandsSep(fmt::StringRef sep) : sep_(sep), digit_index_(0) {}
template <typename Char>
void operator()(Char *&buffer) {
if (++digit_index_ % 3 != 0)
return;
buffer -= sep_.size();
std::uninitialized_copy(sep_.data(), sep_.data() + sep_.size(),
internal::make_ptr(buffer, sep_.size()));
}
};
// Formats a decimal unsigned integer value writing into buffer.
// thousands_sep is a functor that is called after writing each char to
// add a thousands separator if necessary.
template <typename UInt, typename Char, typename ThousandsSep>
inline void format_decimal(Char *buffer, UInt value, unsigned num_digits,
ThousandsSep thousands_sep) {
buffer += num_digits;
while (value >= 100) {
// Integer division is slow so do it for a group of two digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
unsigned index = static_cast<unsigned>((value % 100) * 2);
value /= 100;
*--buffer = Data::DIGITS[index + 1];
thousands_sep(buffer);
*--buffer = Data::DIGITS[index];
thousands_sep(buffer);
}
if (value < 10) {
*--buffer = static_cast<char>('0' + value);
return;
}
unsigned index = static_cast<unsigned>(value * 2);
*--buffer = Data::DIGITS[index + 1];
thousands_sep(buffer);
*--buffer = Data::DIGITS[index];
}
template <typename UInt, typename Char>
inline void format_decimal(Char *buffer, UInt value, unsigned num_digits) {
format_decimal(buffer, value, num_digits, NoThousandsSep());
return;
}
#ifndef _WIN32
# define FMT_USE_WINDOWS_H 0
#elif !defined(FMT_USE_WINDOWS_H)
# define FMT_USE_WINDOWS_H 1
#endif
// Define FMT_USE_WINDOWS_H to 0 to disable use of windows.h.
// All the functionality that relies on it will be disabled too.
#if FMT_USE_WINDOWS_H
// A converter from UTF-8 to UTF-16.
// It is only provided for Windows since other systems support UTF-8 natively.
class UTF8ToUTF16 {
private:
MemoryBuffer<wchar_t, INLINE_BUFFER_SIZE> buffer_;
public:
FMT_API explicit UTF8ToUTF16(StringRef s);
operator WStringRef() const { return WStringRef(&buffer_[0], size()); }
size_t size() const { return buffer_.size() - 1; }
const wchar_t *c_str() const { return &buffer_[0]; }
std::wstring str() const { return std::wstring(&buffer_[0], size()); }
};
// A converter from UTF-16 to UTF-8.
// It is only provided for Windows since other systems support UTF-8 natively.
class UTF16ToUTF8 {
private:
MemoryBuffer<char, INLINE_BUFFER_SIZE> buffer_;
public:
UTF16ToUTF8() {}
FMT_API explicit UTF16ToUTF8(WStringRef s);
operator StringRef() const { return StringRef(&buffer_[0], size()); }
size_t size() const { return buffer_.size() - 1; }
const char *c_str() const { return &buffer_[0]; }
std::string str() const { return std::string(&buffer_[0], size()); }
// Performs conversion returning a system error code instead of
// throwing exception on conversion error. This method may still throw
// in case of memory allocation error.
FMT_API int convert(WStringRef s);
};
FMT_API void format_windows_error(fmt::Writer &out, int error_code,
fmt::StringRef message) FMT_NOEXCEPT;
#endif
// A formatting argument value.
struct Value {
template <typename Char>
struct StringValue {
const Char *value;
std::size_t size;
};
typedef void (*FormatFunc)(
void *formatter, const void *arg, void *format_str_ptr);
struct CustomValue {
const void *value;
FormatFunc format;
};
union {
int int_value;
unsigned uint_value;
LongLong long_long_value;
ULongLong ulong_long_value;
double double_value;
long double long_double_value;
const void *pointer;
StringValue<char> string;
StringValue<signed char> sstring;
StringValue<unsigned char> ustring;
StringValue<wchar_t> wstring;
CustomValue custom;
};
enum Type {
NONE, NAMED_ARG,
// Integer types should go first,
INT, UINT, LONG_LONG, ULONG_LONG, BOOL, CHAR, LAST_INTEGER_TYPE = CHAR,
// followed by floating-point types.
DOUBLE, LONG_DOUBLE, LAST_NUMERIC_TYPE = LONG_DOUBLE,
CSTRING, STRING, WSTRING, POINTER, CUSTOM
};
};
// A formatting argument. It is a trivially copyable/constructible type to
// allow storage in internal::MemoryBuffer.
struct Arg : Value {
Type type;
};
template <typename Char>
struct NamedArg;
template <typename Char, typename T>
struct NamedArgWithType;
template <typename T = void>
struct Null {};
// A helper class template to enable or disable overloads taking wide
// characters and strings in MakeValue.
template <typename T, typename Char>
struct WCharHelper {
typedef Null<T> Supported;
typedef T Unsupported;
};
template <typename T>
struct WCharHelper<T, wchar_t> {
typedef T Supported;
typedef Null<T> Unsupported;
};
typedef char Yes[1];
typedef char No[2];
template <typename T>
T &get();
// These are non-members to workaround an overload resolution bug in bcc32.
Yes &convert(fmt::ULongLong);
No &convert(...);
template <typename T, bool ENABLE_CONVERSION>
struct ConvertToIntImpl {
enum { value = ENABLE_CONVERSION };
};
template <typename T, bool ENABLE_CONVERSION>
struct ConvertToIntImpl2 {
enum { value = false };
};
template <typename T>
struct ConvertToIntImpl2<T, true> {
enum {
// Don't convert numeric types.
value = ConvertToIntImpl<T, !std::numeric_limits<T>::is_specialized>::value
};
};
template <typename T>
struct ConvertToInt {
enum {
enable_conversion = sizeof(fmt::internal::convert(get<T>())) == sizeof(Yes)
};
enum { value = ConvertToIntImpl2<T, enable_conversion>::value };
};
#define FMT_DISABLE_CONVERSION_TO_INT(Type) \
template <> \
struct ConvertToInt<Type> { enum { value = 0 }; }
// Silence warnings about convering float to int.
FMT_DISABLE_CONVERSION_TO_INT(float);
FMT_DISABLE_CONVERSION_TO_INT(double);
FMT_DISABLE_CONVERSION_TO_INT(long double);
template <bool B, class T = void>
struct EnableIf {};
template <class T>
struct EnableIf<true, T> { typedef T type; };
template <bool B, class T, class F>
struct Conditional { typedef T type; };
template <class T, class F>
struct Conditional<false, T, F> { typedef F type; };
// For bcc32 which doesn't understand ! in template arguments.
template <bool>
struct Not { enum { value = 0 }; };
template <>
struct Not<false> { enum { value = 1 }; };
template <typename T>
struct FalseType { enum { value = 0 }; };
template <typename T, T> struct LConvCheck {
LConvCheck(int) {}
};
// Returns the thousands separator for the current locale.
// We check if ``lconv`` contains ``thousands_sep`` because on Android
// ``lconv`` is stubbed as an empty struct.
template <typename LConv>
inline StringRef thousands_sep(
LConv *lc, LConvCheck<char *LConv::*, &LConv::thousands_sep> = 0) {
return lc->thousands_sep;
}
inline fmt::StringRef thousands_sep(...) { return ""; }
#define FMT_CONCAT(a, b) a##b
#if FMT_GCC_VERSION >= 303
# define FMT_UNUSED __attribute__((unused))
#else
# define FMT_UNUSED
#endif
#ifndef FMT_USE_STATIC_ASSERT
# define FMT_USE_STATIC_ASSERT 0
#endif
#if FMT_USE_STATIC_ASSERT || FMT_HAS_FEATURE(cxx_static_assert) || \
(FMT_GCC_VERSION >= 403 && FMT_HAS_GXX_CXX11) || _MSC_VER >= 1600
# define FMT_STATIC_ASSERT(cond, message) static_assert(cond, message)
#else
# define FMT_CONCAT_(a, b) FMT_CONCAT(a, b)
# define FMT_STATIC_ASSERT(cond, message) \
typedef int FMT_CONCAT_(Assert, __LINE__)[(cond) ? 1 : -1] FMT_UNUSED
#endif
template <typename Formatter>
void format_arg(Formatter&, ...) {
FMT_STATIC_ASSERT(FalseType<Formatter>::value,
"Cannot format argument. To enable the use of ostream "
"operator<< include fmt/ostream.h. Otherwise provide "
"an overload of format_arg.");
}
// Makes an Arg object from any type.
template <typename Formatter>
class MakeValue : public Arg {
public:
typedef typename Formatter::Char Char;
private:
// The following two methods are private to disallow formatting of
// arbitrary pointers. If you want to output a pointer cast it to
// "void *" or "const void *". In particular, this forbids formatting
// of "[const] volatile char *" which is printed as bool by iostreams.
// Do not implement!
template <typename T>
MakeValue(const T *value);
template <typename T>
MakeValue(T *value);
// The following methods are private to disallow formatting of wide
// characters and strings into narrow strings as in
// fmt::format("{}", L"test");
// To fix this, use a wide format string: fmt::format(L"{}", L"test").
#if !FMT_MSC_VER || defined(_NATIVE_WCHAR_T_DEFINED)
MakeValue(typename WCharHelper<wchar_t, Char>::Unsupported);
#endif
MakeValue(typename WCharHelper<wchar_t *, Char>::Unsupported);
MakeValue(typename WCharHelper<const wchar_t *, Char>::Unsupported);
MakeValue(typename WCharHelper<const std::wstring &, Char>::Unsupported);
#if FMT_HAS_STRING_VIEW
MakeValue(typename WCharHelper<const std::wstring_view &, Char>::Unsupported);
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
MakeValue(typename WCharHelper<const std::experimental::wstring_view &, Char>::Unsupported);
#endif
MakeValue(typename WCharHelper<WStringRef, Char>::Unsupported);
void set_string(StringRef str) {
string.value = str.data();
string.size = str.size();
}
void set_string(WStringRef str) {
wstring.value = str.data();
wstring.size = str.size();
}
// Formats an argument of a custom type, such as a user-defined class.
template <typename T>
static void format_custom_arg(
void *formatter, const void *arg, void *format_str_ptr) {
format_arg(*static_cast<Formatter*>(formatter),
*static_cast<const Char**>(format_str_ptr),
*static_cast<const T*>(arg));
}
public:
MakeValue() {}
#define FMT_MAKE_VALUE_(Type, field, TYPE, rhs) \
MakeValue(Type value) { field = rhs; } \
static uint64_t type(Type) { return Arg::TYPE; }
#define FMT_MAKE_VALUE(Type, field, TYPE) \
FMT_MAKE_VALUE_(Type, field, TYPE, value)
FMT_MAKE_VALUE(bool, int_value, BOOL)
FMT_MAKE_VALUE(short, int_value, INT)
FMT_MAKE_VALUE(unsigned short, uint_value, UINT)
FMT_MAKE_VALUE(int, int_value, INT)
FMT_MAKE_VALUE(unsigned, uint_value, UINT)
MakeValue(long value) {
// To minimize the number of types we need to deal with, long is
// translated either to int or to long long depending on its size.
if (const_check(sizeof(long) == sizeof(int)))
int_value = static_cast<int>(value);
else
long_long_value = value;
}
static uint64_t type(long) {
return sizeof(long) == sizeof(int) ? Arg::INT : Arg::LONG_LONG;
}
MakeValue(unsigned long value) {
if (const_check(sizeof(unsigned long) == sizeof(unsigned)))
uint_value = static_cast<unsigned>(value);
else
ulong_long_value = value;
}
static uint64_t type(unsigned long) {
return sizeof(unsigned long) == sizeof(unsigned) ?
Arg::UINT : Arg::ULONG_LONG;
}
FMT_MAKE_VALUE(LongLong, long_long_value, LONG_LONG)
FMT_MAKE_VALUE(ULongLong, ulong_long_value, ULONG_LONG)
FMT_MAKE_VALUE(float, double_value, DOUBLE)
FMT_MAKE_VALUE(double, double_value, DOUBLE)
FMT_MAKE_VALUE(long double, long_double_value, LONG_DOUBLE)
FMT_MAKE_VALUE(signed char, int_value, INT)
FMT_MAKE_VALUE(unsigned char, uint_value, UINT)
FMT_MAKE_VALUE(char, int_value, CHAR)
#if __cplusplus >= 201103L
template <
typename T,
typename = typename std::enable_if<
std::is_enum<T>::value && ConvertToInt<T>::value>::type>
MakeValue(T value) { int_value = value; }
template <
typename T,
typename = typename std::enable_if<
std::is_enum<T>::value && ConvertToInt<T>::value>::type>
static uint64_t type(T) { return Arg::INT; }
#endif
#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
MakeValue(typename WCharHelper<wchar_t, Char>::Supported value) {
int_value = value;
}
static uint64_t type(wchar_t) { return Arg::CHAR; }
#endif
#define FMT_MAKE_STR_VALUE(Type, TYPE) \
MakeValue(Type value) { set_string(value); } \
static uint64_t type(Type) { return Arg::TYPE; }
FMT_MAKE_VALUE(char *, string.value, CSTRING)
FMT_MAKE_VALUE(const char *, string.value, CSTRING)
FMT_MAKE_VALUE(signed char *, sstring.value, CSTRING)
FMT_MAKE_VALUE(const signed char *, sstring.value, CSTRING)
FMT_MAKE_VALUE(unsigned char *, ustring.value, CSTRING)
FMT_MAKE_VALUE(const unsigned char *, ustring.value, CSTRING)
FMT_MAKE_STR_VALUE(const std::string &, STRING)
#if FMT_HAS_STRING_VIEW
FMT_MAKE_STR_VALUE(const std::string_view &, STRING)
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
FMT_MAKE_STR_VALUE(const std::experimental::string_view &, STRING)
#endif
FMT_MAKE_STR_VALUE(StringRef, STRING)
FMT_MAKE_VALUE_(CStringRef, string.value, CSTRING, value.c_str())
#define FMT_MAKE_WSTR_VALUE(Type, TYPE) \
MakeValue(typename WCharHelper<Type, Char>::Supported value) { \
set_string(value); \
} \
static uint64_t type(Type) { return Arg::TYPE; }
FMT_MAKE_WSTR_VALUE(wchar_t *, WSTRING)
FMT_MAKE_WSTR_VALUE(const wchar_t *, WSTRING)
FMT_MAKE_WSTR_VALUE(const std::wstring &, WSTRING)
#if FMT_HAS_STRING_VIEW
FMT_MAKE_WSTR_VALUE(const std::wstring_view &, WSTRING)
#endif
#if FMT_HAS_EXPERIMENTAL_STRING_VIEW
FMT_MAKE_WSTR_VALUE(const std::experimental::wstring_view &, WSTRING)
#endif
FMT_MAKE_WSTR_VALUE(WStringRef, WSTRING)
FMT_MAKE_VALUE(void *, pointer, POINTER)
FMT_MAKE_VALUE(const void *, pointer, POINTER)
template <typename T>
MakeValue(const T &value,
typename EnableIf<Not<
ConvertToInt<T>::value>::value, int>::type = 0) {
custom.value = &value;
custom.format = &format_custom_arg<T>;
}
template <typename T>
static typename EnableIf<Not<ConvertToInt<T>::value>::value, uint64_t>::type
type(const T &) {
return Arg::CUSTOM;
}
// Additional template param `Char_` is needed here because make_type always
// uses char.
template <typename Char_>
MakeValue(const NamedArg<Char_> &value) { pointer = &value; }
template <typename Char_, typename T>
MakeValue(const NamedArgWithType<Char_, T> &value) { pointer = &value; }
template <typename Char_>
static uint64_t type(const NamedArg<Char_> &) { return Arg::NAMED_ARG; }
template <typename Char_, typename T>
static uint64_t type(const NamedArgWithType<Char_, T> &) { return Arg::NAMED_ARG; }
};
template <typename Formatter>
class MakeArg : public Arg {
public:
MakeArg() {
type = Arg::NONE;
}
template <typename T>
MakeArg(const T &value)
: Arg(MakeValue<Formatter>(value)) {
type = static_cast<Arg::Type>(MakeValue<Formatter>::type(value));
}
};
template <typename Char>
struct NamedArg : Arg {
BasicStringRef<Char> name;
template <typename T>
NamedArg(BasicStringRef<Char> argname, const T &value)
: Arg(MakeArg< BasicFormatter<Char> >(value)), name(argname) {}
};
template <typename Char, typename T>
struct NamedArgWithType : NamedArg<Char> {
NamedArgWithType(BasicStringRef<Char> argname, const T &value)
: NamedArg<Char>(argname, value) {}
};
class RuntimeError : public std::runtime_error {
protected:
RuntimeError() : std::runtime_error("") {}
RuntimeError(const RuntimeError &rerr) : std::runtime_error(rerr) {}
FMT_API ~RuntimeError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE;
};
template <typename Char>
class ArgMap;
} // namespace internal
/** An argument list. */
class ArgList {
private:
// To reduce compiled code size per formatting function call, types of first
// MAX_PACKED_ARGS arguments are passed in the types_ field.
uint64_t types_;
union {
// If the number of arguments is less than MAX_PACKED_ARGS, the argument
// values are stored in values_, otherwise they are stored in args_.
// This is done to reduce compiled code size as storing larger objects
// may require more code (at least on x86-64) even if the same amount of
// data is actually copied to stack. It saves ~10% on the bloat test.
const internal::Value *values_;
const internal::Arg *args_;
};
internal::Arg::Type type(unsigned index) const {
return type(types_, index);
}
template <typename Char>
friend class internal::ArgMap;
public:
// Maximum number of arguments with packed types.
enum { MAX_PACKED_ARGS = 16 };
ArgList() : types_(0) {}
ArgList(ULongLong types, const internal::Value *values)
: types_(types), values_(values) {}
ArgList(ULongLong types, const internal::Arg *args)
: types_(types), args_(args) {}
uint64_t types() const { return types_; }
/** Returns the argument at specified index. */
internal::Arg operator[](unsigned index) const {
using internal::Arg;
Arg arg;
bool use_values = type(MAX_PACKED_ARGS - 1) == Arg::NONE;
if (index < MAX_PACKED_ARGS) {
Arg::Type arg_type = type(index);
internal::Value &val = arg;
if (arg_type != Arg::NONE)
val = use_values ? values_[index] : args_[index];
arg.type = arg_type;
return arg;
}
if (use_values) {
// The index is greater than the number of arguments that can be stored
// in values, so return a "none" argument.
arg.type = Arg::NONE;
return arg;
}
for (unsigned i = MAX_PACKED_ARGS; i <= index; ++i) {
if (args_[i].type == Arg::NONE)
return args_[i];
}
return args_[index];
}
static internal::Arg::Type type(uint64_t types, unsigned index) {
unsigned shift = index * 4;
uint64_t mask = 0xf;
return static_cast<internal::Arg::Type>(
(types & (mask << shift)) >> shift);
}
};
#define FMT_DISPATCH(call) static_cast<Impl*>(this)->call
/**
\rst
An argument visitor based on the `curiously recurring template pattern
<http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern>`_.
To use `~fmt::ArgVisitor` define a subclass that implements some or all of the
visit methods with the same signatures as the methods in `~fmt::ArgVisitor`,
for example, `~fmt::ArgVisitor::visit_int()`.
Pass the subclass as the *Impl* template parameter. Then calling
`~fmt::ArgVisitor::visit` for some argument will dispatch to a visit method
specific to the argument type. For example, if the argument type is
``double`` then the `~fmt::ArgVisitor::visit_double()` method of a subclass
will be called. If the subclass doesn't contain a method with this signature,
then a corresponding method of `~fmt::ArgVisitor` will be called.
**Example**::
class MyArgVisitor : public fmt::ArgVisitor<MyArgVisitor, void> {
public:
void visit_int(int value) { fmt::print("{}", value); }
void visit_double(double value) { fmt::print("{}", value ); }
};
\endrst
*/
template <typename Impl, typename Result>
class ArgVisitor {
private:
typedef internal::Arg Arg;
public:
void report_unhandled_arg() {}
Result visit_unhandled_arg() {
FMT_DISPATCH(report_unhandled_arg());
return Result();
}
/** Visits an ``int`` argument. **/
Result visit_int(int value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits a ``long long`` argument. **/
Result visit_long_long(LongLong value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits an ``unsigned`` argument. **/
Result visit_uint(unsigned value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits an ``unsigned long long`` argument. **/
Result visit_ulong_long(ULongLong value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits a ``bool`` argument. **/
Result visit_bool(bool value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits a ``char`` or ``wchar_t`` argument. **/
Result visit_char(int value) {
return FMT_DISPATCH(visit_any_int(value));
}
/** Visits an argument of any integral type. **/
template <typename T>
Result visit_any_int(T) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a ``double`` argument. **/
Result visit_double(double value) {
return FMT_DISPATCH(visit_any_double(value));
}
/** Visits a ``long double`` argument. **/
Result visit_long_double(long double value) {
return FMT_DISPATCH(visit_any_double(value));
}
/** Visits a ``double`` or ``long double`` argument. **/
template <typename T>
Result visit_any_double(T) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a null-terminated C string (``const char *``) argument. **/
Result visit_cstring(const char *) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a string argument. **/
Result visit_string(Arg::StringValue<char>) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a wide string argument. **/
Result visit_wstring(Arg::StringValue<wchar_t>) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits a pointer argument. **/
Result visit_pointer(const void *) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/** Visits an argument of a custom (user-defined) type. **/
Result visit_custom(Arg::CustomValue) {
return FMT_DISPATCH(visit_unhandled_arg());
}
/**
\rst
Visits an argument dispatching to the appropriate visit method based on
the argument type. For example, if the argument type is ``double`` then
the `~fmt::ArgVisitor::visit_double()` method of the *Impl* class will be
called.
\endrst
*/
Result visit(const Arg &arg) {
switch (arg.type) {
case Arg::NONE:
case Arg::NAMED_ARG:
FMT_ASSERT(false, "invalid argument type");
break;
case Arg::INT:
return FMT_DISPATCH(visit_int(arg.int_value));
case Arg::UINT:
return FMT_DISPATCH(visit_uint(arg.uint_value));
case Arg::LONG_LONG:
return FMT_DISPATCH(visit_long_long(arg.long_long_value));
case Arg::ULONG_LONG:
return FMT_DISPATCH(visit_ulong_long(arg.ulong_long_value));
case Arg::BOOL:
return FMT_DISPATCH(visit_bool(arg.int_value != 0));
case Arg::CHAR:
return FMT_DISPATCH(visit_char(arg.int_value));
case Arg::DOUBLE:
return FMT_DISPATCH(visit_double(arg.double_value));
case Arg::LONG_DOUBLE:
return FMT_DISPATCH(visit_long_double(arg.long_double_value));
case Arg::CSTRING:
return FMT_DISPATCH(visit_cstring(arg.string.value));
case Arg::STRING:
return FMT_DISPATCH(visit_string(arg.string));
case Arg::WSTRING:
return FMT_DISPATCH(visit_wstring(arg.wstring));
case Arg::POINTER:
return FMT_DISPATCH(visit_pointer(arg.pointer));
case Arg::CUSTOM:
return FMT_DISPATCH(visit_custom(arg.custom));
}
return Result();
}
};
enum Alignment {
ALIGN_DEFAULT, ALIGN_LEFT, ALIGN_RIGHT, ALIGN_CENTER, ALIGN_NUMERIC
};
// Flags.
enum {
SIGN_FLAG = 1, PLUS_FLAG = 2, MINUS_FLAG = 4, HASH_FLAG = 8,
CHAR_FLAG = 0x10 // Argument has char type - used in error reporting.
};
// An empty format specifier.
struct EmptySpec {};
// A type specifier.
template <char TYPE>
struct TypeSpec : EmptySpec {
Alignment align() const { return ALIGN_DEFAULT; }
unsigned width() const { return 0; }
int precision() const { return -1; }
bool flag(unsigned) const { return false; }
char type() const { return TYPE; }
char type_prefix() const { return TYPE; }
char fill() const { return ' '; }
};
// A width specifier.
struct WidthSpec {
unsigned width_;
// Fill is always wchar_t and cast to char if necessary to avoid having
// two specialization of WidthSpec and its subclasses.
wchar_t fill_;
WidthSpec(unsigned width, wchar_t fill) : width_(width), fill_(fill) {}
unsigned width() const { return width_; }
wchar_t fill() const { return fill_; }
};
// An alignment specifier.
struct AlignSpec : WidthSpec {
Alignment align_;
AlignSpec(unsigned width, wchar_t fill, Alignment align = ALIGN_DEFAULT)
: WidthSpec(width, fill), align_(align) {}
Alignment align() const { return align_; }
int precision() const { return -1; }
};
// An alignment and type specifier.
template <char TYPE>
struct AlignTypeSpec : AlignSpec {
AlignTypeSpec(unsigned width, wchar_t fill) : AlignSpec(width, fill) {}
bool flag(unsigned) const { return false; }
char type() const { return TYPE; }
char type_prefix() const { return TYPE; }
};
// A full format specifier.
struct FormatSpec : AlignSpec {
unsigned flags_;
int precision_;
char type_;
FormatSpec(
unsigned width = 0, char type = 0, wchar_t fill = ' ')
: AlignSpec(width, fill), flags_(0), precision_(-1), type_(type) {}
bool flag(unsigned f) const { return (flags_ & f) != 0; }
int precision() const { return precision_; }
char type() const { return type_; }
char type_prefix() const { return type_; }
};
// An integer format specifier.
template <typename T, typename SpecT = TypeSpec<0>, typename Char = char>
class IntFormatSpec : public SpecT {
private:
T value_;
public:
IntFormatSpec(T val, const SpecT &spec = SpecT())
: SpecT(spec), value_(val) {}
T value() const { return value_; }
};
// A string format specifier.
template <typename Char>
class StrFormatSpec : public AlignSpec {
private:
const Char *str_;
public:
template <typename FillChar>
StrFormatSpec(const Char *str, unsigned width, FillChar fill)
: AlignSpec(width, fill), str_(str) {
internal::CharTraits<Char>::convert(FillChar());
}
const Char *str() const { return str_; }
};
/**
Returns an integer format specifier to format the value in base 2.
*/
IntFormatSpec<int, TypeSpec<'b'> > bin(int value);
/**
Returns an integer format specifier to format the value in base 8.
*/
IntFormatSpec<int, TypeSpec<'o'> > oct(int value);
/**
Returns an integer format specifier to format the value in base 16 using
lower-case letters for the digits above 9.
*/
IntFormatSpec<int, TypeSpec<'x'> > hex(int value);
/**
Returns an integer formatter format specifier to format in base 16 using
upper-case letters for the digits above 9.
*/
IntFormatSpec<int, TypeSpec<'X'> > hexu(int value);
/**
\rst
Returns an integer format specifier to pad the formatted argument with the
fill character to the specified width using the default (right) numeric
alignment.
**Example**::
MemoryWriter out;
out << pad(hex(0xcafe), 8, '0');
// out.str() == "0000cafe"
\endrst
*/
template <char TYPE_CODE, typename Char>
IntFormatSpec<int, AlignTypeSpec<TYPE_CODE>, Char> pad(
int value, unsigned width, Char fill = ' ');
#define FMT_DEFINE_INT_FORMATTERS(TYPE) \
inline IntFormatSpec<TYPE, TypeSpec<'b'> > bin(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'b'> >(value, TypeSpec<'b'>()); \
} \
\
inline IntFormatSpec<TYPE, TypeSpec<'o'> > oct(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'o'> >(value, TypeSpec<'o'>()); \
} \
\
inline IntFormatSpec<TYPE, TypeSpec<'x'> > hex(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'x'> >(value, TypeSpec<'x'>()); \
} \
\
inline IntFormatSpec<TYPE, TypeSpec<'X'> > hexu(TYPE value) { \
return IntFormatSpec<TYPE, TypeSpec<'X'> >(value, TypeSpec<'X'>()); \
} \
\
template <char TYPE_CODE> \
inline IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE> > pad( \
IntFormatSpec<TYPE, TypeSpec<TYPE_CODE> > f, unsigned width) { \
return IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE> >( \
f.value(), AlignTypeSpec<TYPE_CODE>(width, ' ')); \
} \
\
/* For compatibility with older compilers we provide two overloads for pad, */ \
/* one that takes a fill character and one that doesn't. In the future this */ \
/* can be replaced with one overload making the template argument Char */ \
/* default to char (C++11). */ \
template <char TYPE_CODE, typename Char> \
inline IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE>, Char> pad( \
IntFormatSpec<TYPE, TypeSpec<TYPE_CODE>, Char> f, \
unsigned width, Char fill) { \
return IntFormatSpec<TYPE, AlignTypeSpec<TYPE_CODE>, Char>( \
f.value(), AlignTypeSpec<TYPE_CODE>(width, fill)); \
} \
\
inline IntFormatSpec<TYPE, AlignTypeSpec<0> > pad( \
TYPE value, unsigned width) { \
return IntFormatSpec<TYPE, AlignTypeSpec<0> >( \
value, AlignTypeSpec<0>(width, ' ')); \
} \
\
template <typename Char> \
inline IntFormatSpec<TYPE, AlignTypeSpec<0>, Char> pad( \
TYPE value, unsigned width, Char fill) { \
return IntFormatSpec<TYPE, AlignTypeSpec<0>, Char>( \
value, AlignTypeSpec<0>(width, fill)); \
}
FMT_DEFINE_INT_FORMATTERS(int)
FMT_DEFINE_INT_FORMATTERS(long)
FMT_DEFINE_INT_FORMATTERS(unsigned)
FMT_DEFINE_INT_FORMATTERS(unsigned long)
FMT_DEFINE_INT_FORMATTERS(LongLong)
FMT_DEFINE_INT_FORMATTERS(ULongLong)
/**
\rst
Returns a string formatter that pads the formatted argument with the fill
character to the specified width using the default (left) string alignment.
**Example**::
std::string s = str(MemoryWriter() << pad("abc", 8));
// s == "abc "
\endrst
*/
template <typename Char>
inline StrFormatSpec<Char> pad(
const Char *str, unsigned width, Char fill = ' ') {
return StrFormatSpec<Char>(str, width, fill);
}
inline StrFormatSpec<wchar_t> pad(
const wchar_t *str, unsigned width, char fill = ' ') {
return StrFormatSpec<wchar_t>(str, width, fill);
}
namespace internal {
template <typename Char>
class ArgMap {
private:
typedef std::vector<
std::pair<fmt::BasicStringRef<Char>, internal::Arg> > MapType;
typedef typename MapType::value_type Pair;
MapType map_;
public:
void init(const ArgList &args);
const internal::Arg *find(const fmt::BasicStringRef<Char> &name) const {
// The list is unsorted, so just return the first matching name.
for (typename MapType::const_iterator it = map_.begin(), end = map_.end();
it != end; ++it) {
if (it->first == name)
return &it->second;
}
return FMT_NULL;
}
};
template <typename Char>
void ArgMap<Char>::init(const ArgList &args) {
if (!map_.empty())
return;
typedef internal::NamedArg<Char> NamedArg;
const NamedArg *named_arg = FMT_NULL;
bool use_values =
args.type(ArgList::MAX_PACKED_ARGS - 1) == internal::Arg::NONE;
if (use_values) {
for (unsigned i = 0;/*nothing*/; ++i) {
internal::Arg::Type arg_type = args.type(i);
switch (arg_type) {
case internal::Arg::NONE:
return;
case internal::Arg::NAMED_ARG:
named_arg = static_cast<const NamedArg*>(args.values_[i].pointer);
map_.push_back(Pair(named_arg->name, *named_arg));
break;
default:
/*nothing*/;
}
}
return;
}
for (unsigned i = 0; i != ArgList::MAX_PACKED_ARGS; ++i) {
internal::Arg::Type arg_type = args.type(i);
if (arg_type == internal::Arg::NAMED_ARG) {
named_arg = static_cast<const NamedArg*>(args.args_[i].pointer);
map_.push_back(Pair(named_arg->name, *named_arg));
}
}
for (unsigned i = ArgList::MAX_PACKED_ARGS;/*nothing*/; ++i) {
switch (args.args_[i].type) {
case internal::Arg::NONE:
return;
case internal::Arg::NAMED_ARG:
named_arg = static_cast<const NamedArg*>(args.args_[i].pointer);
map_.push_back(Pair(named_arg->name, *named_arg));
break;
default:
/*nothing*/;
}
}
}
template <typename Impl, typename Char, typename Spec = fmt::FormatSpec>
class ArgFormatterBase : public ArgVisitor<Impl, void> {
private:
BasicWriter<Char> &writer_;
Spec &spec_;
FMT_DISALLOW_COPY_AND_ASSIGN(ArgFormatterBase);
void write_pointer(const void *p) {
spec_.flags_ = HASH_FLAG;
spec_.type_ = 'x';
writer_.write_int(reinterpret_cast<uintptr_t>(p), spec_);
}
// workaround MSVC two-phase lookup issue
typedef internal::Arg Arg;
protected:
BasicWriter<Char> &writer() { return writer_; }
Spec &spec() { return spec_; }
void write(bool value) {
const char *str_value = value ? "true" : "false";
Arg::StringValue<char> str = { str_value, std::strlen(str_value) };
writer_.write_str(str, spec_);
}
void write(const char *value) {
Arg::StringValue<char> str = {value, value ? std::strlen(value) : 0};
writer_.write_str(str, spec_);
}
public:
typedef Spec SpecType;
ArgFormatterBase(BasicWriter<Char> &w, Spec &s)
: writer_(w), spec_(s) {}
template <typename T>
void visit_any_int(T value) { writer_.write_int(value, spec_); }
template <typename T>
void visit_any_double(T value) { writer_.write_double(value, spec_); }
void visit_bool(bool value) {
if (spec_.type_) {
visit_any_int(value);
return;
}
write(value);
}
void visit_char(int value) {
if (spec_.type_ && spec_.type_ != 'c') {
spec_.flags_ |= CHAR_FLAG;
writer_.write_int(value, spec_);
return;
}
if (spec_.align_ == ALIGN_NUMERIC || spec_.flags_ != 0)
FMT_THROW(FormatError("invalid format specifier for char"));
typedef typename BasicWriter<Char>::CharPtr CharPtr;
Char fill = internal::CharTraits<Char>::cast(spec_.fill());
CharPtr out = CharPtr();
const unsigned CHAR_SIZE = 1;
if (spec_.width_ > CHAR_SIZE) {
out = writer_.grow_buffer(spec_.width_);
if (spec_.align_ == ALIGN_RIGHT) {
std::uninitialized_fill_n(out, spec_.width_ - CHAR_SIZE, fill);
out += spec_.width_ - CHAR_SIZE;
} else if (spec_.align_ == ALIGN_CENTER) {
out = writer_.fill_padding(out, spec_.width_,
internal::const_check(CHAR_SIZE), fill);
} else {
std::uninitialized_fill_n(out + CHAR_SIZE,
spec_.width_ - CHAR_SIZE, fill);
}
} else {
out = writer_.grow_buffer(CHAR_SIZE);
}
*out = internal::CharTraits<Char>::cast(value);
}
void visit_cstring(const char *value) {
if (spec_.type_ == 'p')
return write_pointer(value);
write(value);
}
// Qualification with "internal" here and below is a workaround for nvcc.
void visit_string(internal::Arg::StringValue<char> value) {
writer_.write_str(value, spec_);
}
using ArgVisitor<Impl, void>::visit_wstring;
void visit_wstring(internal::Arg::StringValue<Char> value) {
writer_.write_str(value, spec_);
}
void visit_pointer(const void *value) {
if (spec_.type_ && spec_.type_ != 'p')
report_unknown_type(spec_.type_, "pointer");
write_pointer(value);
}
};
class FormatterBase {
private:
ArgList args_;
int next_arg_index_;
// Returns the argument with specified index.
FMT_API Arg do_get_arg(unsigned arg_index, const char *&error);
protected:
const ArgList &args() const { return args_; }
explicit FormatterBase(const ArgList &args) {
args_ = args;
next_arg_index_ = 0;
}
// Returns the next argument.
Arg next_arg(const char *&error) {
if (next_arg_index_ >= 0)
return do_get_arg(internal::to_unsigned(next_arg_index_++), error);
error = "cannot switch from manual to automatic argument indexing";
return Arg();
}
// Checks if manual indexing is used and returns the argument with
// specified index.
Arg get_arg(unsigned arg_index, const char *&error) {
return check_no_auto_index(error) ? do_get_arg(arg_index, error) : Arg();
}
bool check_no_auto_index(const char *&error) {
if (next_arg_index_ > 0) {
error = "cannot switch from automatic to manual argument indexing";
return false;
}
next_arg_index_ = -1;
return true;
}
template <typename Char>
void write(BasicWriter<Char> &w, const Char *start, const Char *end) {
if (start != end)
w << BasicStringRef<Char>(start, internal::to_unsigned(end - start));
}
};
} // namespace internal
/**
\rst
An argument formatter based on the `curiously recurring template pattern
<http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern>`_.
To use `~fmt::BasicArgFormatter` define a subclass that implements some or
all of the visit methods with the same signatures as the methods in
`~fmt::ArgVisitor`, for example, `~fmt::ArgVisitor::visit_int()`.
Pass the subclass as the *Impl* template parameter. When a formatting
function processes an argument, it will dispatch to a visit method
specific to the argument type. For example, if the argument type is
``double`` then the `~fmt::ArgVisitor::visit_double()` method of a subclass
will be called. If the subclass doesn't contain a method with this signature,
then a corresponding method of `~fmt::BasicArgFormatter` or its superclass
will be called.
\endrst
*/
template <typename Impl, typename Char, typename Spec = fmt::FormatSpec>
class BasicArgFormatter : public internal::ArgFormatterBase<Impl, Char, Spec> {
private:
BasicFormatter<Char, Impl> &formatter_;
const Char *format_;
public:
/**
\rst
Constructs an argument formatter object.
*formatter* is a reference to the main formatter object, *spec* contains
format specifier information for standard argument types, and *fmt* points
to the part of the format string being parsed for custom argument types.
\endrst
*/
BasicArgFormatter(BasicFormatter<Char, Impl> &formatter,
Spec &spec, const Char *fmt)
: internal::ArgFormatterBase<Impl, Char, Spec>(formatter.writer(), spec),
formatter_(formatter), format_(fmt) {}
/** Formats an argument of a custom (user-defined) type. */
void visit_custom(internal::Arg::CustomValue c) {
c.format(&formatter_, c.value, &format_);
}
};
/** The default argument formatter. */
template <typename Char>
class ArgFormatter :
public BasicArgFormatter<ArgFormatter<Char>, Char, FormatSpec> {
public:
/** Constructs an argument formatter object. */
ArgFormatter(BasicFormatter<Char> &formatter,
FormatSpec &spec, const Char *fmt)
: BasicArgFormatter<ArgFormatter<Char>,
Char, FormatSpec>(formatter, spec, fmt) {}
};
/** This template formats data and writes the output to a writer. */
template <typename CharType, typename ArgFormatter>
class BasicFormatter : private internal::FormatterBase {
public:
/** The character type for the output. */
typedef CharType Char;
private:
BasicWriter<Char> &writer_;
internal::ArgMap<Char> map_;
FMT_DISALLOW_COPY_AND_ASSIGN(BasicFormatter);
using internal::FormatterBase::get_arg;
// Checks if manual indexing is used and returns the argument with
// specified name.
internal::Arg get_arg(BasicStringRef<Char> arg_name, const char *&error);
// Parses argument index and returns corresponding argument.
internal::Arg parse_arg_index(const Char *&s);
// Parses argument name and returns corresponding argument.
internal::Arg parse_arg_name(const Char *&s);
public:
/**
\rst
Constructs a ``BasicFormatter`` object. References to the arguments and
the writer are stored in the formatter object so make sure they have
appropriate lifetimes.
\endrst
*/
BasicFormatter(const ArgList &args, BasicWriter<Char> &w)
: internal::FormatterBase(args), writer_(w) {}
/** Returns a reference to the writer associated with this formatter. */
BasicWriter<Char> &writer() { return writer_; }
/** Formats stored arguments and writes the output to the writer. */
void format(BasicCStringRef<Char> format_str);
// Formats a single argument and advances format_str, a format string pointer.
const Char *format(const Char *&format_str, const internal::Arg &arg);
};
// Generates a comma-separated list with results of applying f to
// numbers 0..n-1.
# define FMT_GEN(n, f) FMT_GEN##n(f)
# define FMT_GEN1(f) f(0)
# define FMT_GEN2(f) FMT_GEN1(f), f(1)
# define FMT_GEN3(f) FMT_GEN2(f), f(2)
# define FMT_GEN4(f) FMT_GEN3(f), f(3)
# define FMT_GEN5(f) FMT_GEN4(f), f(4)
# define FMT_GEN6(f) FMT_GEN5(f), f(5)
# define FMT_GEN7(f) FMT_GEN6(f), f(6)
# define FMT_GEN8(f) FMT_GEN7(f), f(7)
# define FMT_GEN9(f) FMT_GEN8(f), f(8)
# define FMT_GEN10(f) FMT_GEN9(f), f(9)
# define FMT_GEN11(f) FMT_GEN10(f), f(10)
# define FMT_GEN12(f) FMT_GEN11(f), f(11)
# define FMT_GEN13(f) FMT_GEN12(f), f(12)
# define FMT_GEN14(f) FMT_GEN13(f), f(13)
# define FMT_GEN15(f) FMT_GEN14(f), f(14)
namespace internal {
inline uint64_t make_type() { return 0; }
template <typename T>
inline uint64_t make_type(const T &arg) {
return MakeValue< BasicFormatter<char> >::type(arg);
}
template <std::size_t N, bool/*IsPacked*/= (N < ArgList::MAX_PACKED_ARGS)>
struct ArgArray;
template <std::size_t N>
struct ArgArray<N, true/*IsPacked*/> {
// '+' is used to silence GCC -Wduplicated-branches warning.
typedef Value Type[N > 0 ? N : +1];
template <typename Formatter, typename T>
static Value make(const T &value) {
#ifdef __clang__
Value result = MakeValue<Formatter>(value);
// Workaround a bug in Apple LLVM version 4.2 (clang-425.0.28) of clang:
// https://github.com/fmtlib/fmt/issues/276
(void)result.custom.format;
return result;
#else
return MakeValue<Formatter>(value);
#endif
}
};
template <std::size_t N>
struct ArgArray<N, false/*IsPacked*/> {
typedef Arg Type[N + 1]; // +1 for the list end Arg::NONE
template <typename Formatter, typename T>
static Arg make(const T &value) { return MakeArg<Formatter>(value); }
};
#if FMT_USE_VARIADIC_TEMPLATES
template <typename Arg, typename... Args>
inline uint64_t make_type(const Arg &first, const Args & ... tail) {
return make_type(first) | (make_type(tail...) << 4);
}
#else
struct ArgType {
uint64_t type;
ArgType() : type(0) {}
template <typename T>
ArgType(const T &arg) : type(make_type(arg)) {}
};
# define FMT_ARG_TYPE_DEFAULT(n) ArgType t##n = ArgType()
inline uint64_t make_type(FMT_GEN15(FMT_ARG_TYPE_DEFAULT)) {
return t0.type | (t1.type << 4) | (t2.type << 8) | (t3.type << 12) |
(t4.type << 16) | (t5.type << 20) | (t6.type << 24) | (t7.type << 28) |
(t8.type << 32) | (t9.type << 36) | (t10.type << 40) | (t11.type << 44) |
(t12.type << 48) | (t13.type << 52) | (t14.type << 56);
}
#endif
} // namespace internal
# define FMT_MAKE_TEMPLATE_ARG(n) typename T##n
# define FMT_MAKE_ARG_TYPE(n) T##n
# define FMT_MAKE_ARG(n) const T##n &v##n
# define FMT_ASSIGN_char(n) \
arr[n] = fmt::internal::MakeValue< fmt::BasicFormatter<char> >(v##n)
# define FMT_ASSIGN_wchar_t(n) \
arr[n] = fmt::internal::MakeValue< fmt::BasicFormatter<wchar_t> >(v##n)
#if FMT_USE_VARIADIC_TEMPLATES
// Defines a variadic function returning void.
# define FMT_VARIADIC_VOID(func, arg_type) \
template <typename... Args> \
void func(arg_type arg0, const Args & ... args) { \
typedef fmt::internal::ArgArray<sizeof...(Args)> ArgArray; \
typename ArgArray::Type array{ \
ArgArray::template make<fmt::BasicFormatter<Char> >(args)...}; \
func(arg0, fmt::ArgList(fmt::internal::make_type(args...), array)); \
}
// Defines a variadic constructor.
# define FMT_VARIADIC_CTOR(ctor, func, arg0_type, arg1_type) \
template <typename... Args> \
ctor(arg0_type arg0, arg1_type arg1, const Args & ... args) { \
typedef fmt::internal::ArgArray<sizeof...(Args)> ArgArray; \
typename ArgArray::Type array{ \
ArgArray::template make<fmt::BasicFormatter<Char> >(args)...}; \
func(arg0, arg1, fmt::ArgList(fmt::internal::make_type(args...), array)); \
}
#else
# define FMT_MAKE_REF(n) \
fmt::internal::MakeValue< fmt::BasicFormatter<Char> >(v##n)
# define FMT_MAKE_REF2(n) v##n
// Defines a wrapper for a function taking one argument of type arg_type
// and n additional arguments of arbitrary types.
# define FMT_WRAP1(func, arg_type, n) \
template <FMT_GEN(n, FMT_MAKE_TEMPLATE_ARG)> \
inline void func(arg_type arg1, FMT_GEN(n, FMT_MAKE_ARG)) { \
const fmt::internal::ArgArray<n>::Type array = {FMT_GEN(n, FMT_MAKE_REF)}; \
func(arg1, fmt::ArgList( \
fmt::internal::make_type(FMT_GEN(n, FMT_MAKE_REF2)), array)); \
}
// Emulates a variadic function returning void on a pre-C++11 compiler.
# define FMT_VARIADIC_VOID(func, arg_type) \
inline void func(arg_type arg) { func(arg, fmt::ArgList()); } \
FMT_WRAP1(func, arg_type, 1) FMT_WRAP1(func, arg_type, 2) \
FMT_WRAP1(func, arg_type, 3) FMT_WRAP1(func, arg_type, 4) \
FMT_WRAP1(func, arg_type, 5) FMT_WRAP1(func, arg_type, 6) \
FMT_WRAP1(func, arg_type, 7) FMT_WRAP1(func, arg_type, 8) \
FMT_WRAP1(func, arg_type, 9) FMT_WRAP1(func, arg_type, 10)
# define FMT_CTOR(ctor, func, arg0_type, arg1_type, n) \
template <FMT_GEN(n, FMT_MAKE_TEMPLATE_ARG)> \
ctor(arg0_type arg0, arg1_type arg1, FMT_GEN(n, FMT_MAKE_ARG)) { \
const fmt::internal::ArgArray<n>::Type array = {FMT_GEN(n, FMT_MAKE_REF)}; \
func(arg0, arg1, fmt::ArgList( \
fmt::internal::make_type(FMT_GEN(n, FMT_MAKE_REF2)), array)); \
}
// Emulates a variadic constructor on a pre-C++11 compiler.
# define FMT_VARIADIC_CTOR(ctor, func, arg0_type, arg1_type) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 1) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 2) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 3) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 4) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 5) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 6) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 7) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 8) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 9) \
FMT_CTOR(ctor, func, arg0_type, arg1_type, 10)
#endif
// Generates a comma-separated list with results of applying f to pairs
// (argument, index).
#define FMT_FOR_EACH1(f, x0) f(x0, 0)
#define FMT_FOR_EACH2(f, x0, x1) \
FMT_FOR_EACH1(f, x0), f(x1, 1)
#define FMT_FOR_EACH3(f, x0, x1, x2) \
FMT_FOR_EACH2(f, x0 ,x1), f(x2, 2)
#define FMT_FOR_EACH4(f, x0, x1, x2, x3) \
FMT_FOR_EACH3(f, x0, x1, x2), f(x3, 3)
#define FMT_FOR_EACH5(f, x0, x1, x2, x3, x4) \
FMT_FOR_EACH4(f, x0, x1, x2, x3), f(x4, 4)
#define FMT_FOR_EACH6(f, x0, x1, x2, x3, x4, x5) \
FMT_FOR_EACH5(f, x0, x1, x2, x3, x4), f(x5, 5)
#define FMT_FOR_EACH7(f, x0, x1, x2, x3, x4, x5, x6) \
FMT_FOR_EACH6(f, x0, x1, x2, x3, x4, x5), f(x6, 6)
#define FMT_FOR_EACH8(f, x0, x1, x2, x3, x4, x5, x6, x7) \
FMT_FOR_EACH7(f, x0, x1, x2, x3, x4, x5, x6), f(x7, 7)
#define FMT_FOR_EACH9(f, x0, x1, x2, x3, x4, x5, x6, x7, x8) \
FMT_FOR_EACH8(f, x0, x1, x2, x3, x4, x5, x6, x7), f(x8, 8)
#define FMT_FOR_EACH10(f, x0, x1, x2, x3, x4, x5, x6, x7, x8, x9) \
FMT_FOR_EACH9(f, x0, x1, x2, x3, x4, x5, x6, x7, x8), f(x9, 9)
/**
An error returned by an operating system or a language runtime,
for example a file opening error.
*/
class SystemError : public internal::RuntimeError {
private:
FMT_API void init(int err_code, CStringRef format_str, ArgList args);
protected:
int error_code_;
typedef char Char; // For FMT_VARIADIC_CTOR.
SystemError() {}
public:
/**
\rst
Constructs a :class:`fmt::SystemError` object with a description
formatted with `fmt::format_system_error`. *message* and additional
arguments passed into the constructor are formatted similarly to
`fmt::format`.
**Example**::
// This throws a SystemError with the description
// cannot open file 'madeup': No such file or directory
// or similar (system message may vary).
const char *filename = "madeup";
std::FILE *file = std::fopen(filename, "r");
if (!file)
throw fmt::SystemError(errno, "cannot open file '{}'", filename);
\endrst
*/
SystemError(int error_code, CStringRef message) {
init(error_code, message, ArgList());
}
FMT_DEFAULTED_COPY_CTOR(SystemError)
FMT_VARIADIC_CTOR(SystemError, init, int, CStringRef)
FMT_API ~SystemError() FMT_DTOR_NOEXCEPT FMT_OVERRIDE;
int error_code() const { return error_code_; }
};
/**
\rst
Formats an error returned by an operating system or a language runtime,
for example a file opening error, and writes it to *out* in the following
form:
.. parsed-literal::
*<message>*: *<system-message>*
where *<message>* is the passed message and *<system-message>* is
the system message corresponding to the error code.
*error_code* is a system error code as given by ``errno``.
If *error_code* is not a valid error code such as -1, the system message
may look like "Unknown error -1" and is platform-dependent.
\endrst
*/
FMT_API void format_system_error(fmt::Writer &out, int error_code,
fmt::StringRef message) FMT_NOEXCEPT;
/**
\rst
This template provides operations for formatting and writing data into
a character stream. The output is stored in a buffer provided by a subclass
such as :class:`fmt::BasicMemoryWriter`.
You can use one of the following typedefs for common character types:
+---------+----------------------+
| Type | Definition |
+=========+======================+
| Writer | BasicWriter<char> |
+---------+----------------------+
| WWriter | BasicWriter<wchar_t> |
+---------+----------------------+
\endrst
*/
template <typename Char>
class BasicWriter {
private:
// Output buffer.
Buffer<Char> &buffer_;
FMT_DISALLOW_COPY_AND_ASSIGN(BasicWriter);
typedef typename internal::CharTraits<Char>::CharPtr CharPtr;
#if FMT_SECURE_SCL
// Returns pointer value.
static Char *get(CharPtr p) { return p.base(); }
#else
static Char *get(Char *p) { return p; }
#endif
// Fills the padding around the content and returns the pointer to the
// content area.
static CharPtr fill_padding(CharPtr buffer,
unsigned total_size, std::size_t content_size, wchar_t fill);
// Grows the buffer by n characters and returns a pointer to the newly
// allocated area.
CharPtr grow_buffer(std::size_t n) {
std::size_t size = buffer_.size();
buffer_.resize(size + n);
return internal::make_ptr(&buffer_[size], n);
}
// Writes an unsigned decimal integer.
template <typename UInt>
Char *write_unsigned_decimal(UInt value, unsigned prefix_size = 0) {
unsigned num_digits = internal::count_digits(value);
Char *ptr = get(grow_buffer(prefix_size + num_digits));
internal::format_decimal(ptr + prefix_size, value, num_digits);
return ptr;
}
// Writes a decimal integer.
template <typename Int>
void write_decimal(Int value) {
typedef typename internal::IntTraits<Int>::MainType MainType;
MainType abs_value = static_cast<MainType>(value);
if (internal::is_negative(value)) {
abs_value = 0 - abs_value;
*write_unsigned_decimal(abs_value, 1) = '-';
} else {
write_unsigned_decimal(abs_value, 0);
}
}
// Prepare a buffer for integer formatting.
CharPtr prepare_int_buffer(unsigned num_digits,
const EmptySpec &, const char *prefix, unsigned prefix_size) {
unsigned size = prefix_size + num_digits;
CharPtr p = grow_buffer(size);
std::uninitialized_copy(prefix, prefix + prefix_size, p);
return p + size - 1;
}
template <typename Spec>
CharPtr prepare_int_buffer(unsigned num_digits,
const Spec &spec, const char *prefix, unsigned prefix_size);
// Formats an integer.
template <typename T, typename Spec>
void write_int(T value, Spec spec);
// Formats a floating-point number (double or long double).
template <typename T, typename Spec>
void write_double(T value, const Spec &spec);
// Writes a formatted string.
template <typename StrChar>
CharPtr write_str(const StrChar *s, std::size_t size, const AlignSpec &spec);
template <typename StrChar, typename Spec>
void write_str(const internal::Arg::StringValue<StrChar> &str,
const Spec &spec);
// This following methods are private to disallow writing wide characters
// and strings to a char stream. If you want to print a wide string as a
// pointer as std::ostream does, cast it to const void*.
// Do not implement!
void operator<<(typename internal::WCharHelper<wchar_t, Char>::Unsupported);
void operator<<(
typename internal::WCharHelper<const wchar_t *, Char>::Unsupported);
// Appends floating-point length specifier to the format string.
// The second argument is only used for overload resolution.
void append_float_length(Char *&format_ptr, long double) {
*format_ptr++ = 'L';
}
template<typename T>
void append_float_length(Char *&, T) {}
template <typename Impl, typename Char_, typename Spec_>
friend class internal::ArgFormatterBase;
template <typename Impl, typename Char_, typename Spec_>
friend class BasicPrintfArgFormatter;
protected:
/**
Constructs a ``BasicWriter`` object.
*/
explicit BasicWriter(Buffer<Char> &b) : buffer_(b) {}
public:
/**
\rst
Destroys a ``BasicWriter`` object.
\endrst
*/
virtual ~BasicWriter() {}
/**
Returns the total number of characters written.
*/
std::size_t size() const { return buffer_.size(); }
/**
Returns a pointer to the output buffer content. No terminating null
character is appended.
*/
const Char *data() const FMT_NOEXCEPT { return &buffer_[0]; }
/**
Returns a pointer to the output buffer content with terminating null
character appended.
*/
const Char *c_str() const {
std::size_t size = buffer_.size();
buffer_.reserve(size + 1);
buffer_[size] = '\0';
return &buffer_[0];
}
/**
\rst
Returns the content of the output buffer as an `std::string`.
\endrst
*/
std::basic_string<Char> str() const {
return std::basic_string<Char>(&buffer_[0], buffer_.size());
}
/**
\rst
Writes formatted data.
*args* is an argument list representing arbitrary arguments.
**Example**::
MemoryWriter out;
out.write("Current point:\n");
out.write("({:+f}, {:+f})", -3.14, 3.14);
This will write the following output to the ``out`` object:
.. code-block:: none
Current point:
(-3.140000, +3.140000)
The output can be accessed using :func:`data()`, :func:`c_str` or
:func:`str` methods.
See also :ref:`syntax`.
\endrst
*/
void write(BasicCStringRef<Char> format, ArgList args) {
BasicFormatter<Char>(args, *this).format(format);
}
FMT_VARIADIC_VOID(write, BasicCStringRef<Char>)
BasicWriter &operator<<(int value) {
write_decimal(value);
return *this;
}
BasicWriter &operator<<(unsigned value) {
return *this << IntFormatSpec<unsigned>(value);
}
BasicWriter &operator<<(long value) {
write_decimal(value);
return *this;
}
BasicWriter &operator<<(unsigned long value) {
return *this << IntFormatSpec<unsigned long>(value);
}
BasicWriter &operator<<(LongLong value) {
write_decimal(value);
return *this;
}
/**
\rst
Formats *value* and writes it to the stream.
\endrst
*/
BasicWriter &operator<<(ULongLong value) {
return *this << IntFormatSpec<ULongLong>(value);
}
BasicWriter &operator<<(double value) {
write_double(value, FormatSpec());
return *this;
}
/**
\rst
Formats *value* using the general format for floating-point numbers
(``'g'``) and writes it to the stream.
\endrst
*/
BasicWriter &operator<<(long double value) {
write_double(value, FormatSpec());
return *this;
}
/**
Writes a character to the stream.
*/
BasicWriter &operator<<(char value) {
buffer_.push_back(value);
return *this;
}
BasicWriter &operator<<(
typename internal::WCharHelper<wchar_t, Char>::Supported value) {
buffer_.push_back(value);
return *this;
}
/**
\rst
Writes *value* to the stream.
\endrst
*/
BasicWriter &operator<<(fmt::BasicStringRef<Char> value) {
const Char *str = value.data();
buffer_.append(str, str + value.size());
return *this;
}
BasicWriter &operator<<(
typename internal::WCharHelper<StringRef, Char>::Supported value) {
const char *str = value.data();
buffer_.append(str, str + value.size());
return *this;
}
template <typename T, typename Spec, typename FillChar>
BasicWriter &operator<<(IntFormatSpec<T, Spec, FillChar> spec) {
internal::CharTraits<Char>::convert(FillChar());
write_int(spec.value(), spec);
return *this;
}
template <typename StrChar>
BasicWriter &operator<<(const StrFormatSpec<StrChar> &spec) {
const StrChar *s = spec.str();
write_str(s, std::char_traits<Char>::length(s), spec);
return *this;
}
void clear() FMT_NOEXCEPT { buffer_.clear(); }
Buffer<Char> &buffer() FMT_NOEXCEPT { return buffer_; }
};
template <typename Char>
template <typename StrChar>
typename BasicWriter<Char>::CharPtr BasicWriter<Char>::write_str(
const StrChar *s, std::size_t size, const AlignSpec &spec) {
CharPtr out = CharPtr();
if (spec.width() > size) {
out = grow_buffer(spec.width());
Char fill = internal::CharTraits<Char>::cast(spec.fill());
if (spec.align() == ALIGN_RIGHT) {
std::uninitialized_fill_n(out, spec.width() - size, fill);
out += spec.width() - size;
} else if (spec.align() == ALIGN_CENTER) {
out = fill_padding(out, spec.width(), size, fill);
} else {
std::uninitialized_fill_n(out + size, spec.width() - size, fill);
}
} else {
out = grow_buffer(size);
}
std::uninitialized_copy(s, s + size, out);
return out;
}
template <typename Char>
template <typename StrChar, typename Spec>
void BasicWriter<Char>::write_str(
const internal::Arg::StringValue<StrChar> &s, const Spec &spec) {
// Check if StrChar is convertible to Char.
internal::CharTraits<Char>::convert(StrChar());
if (spec.type_ && spec.type_ != 's')
internal::report_unknown_type(spec.type_, "string");
const StrChar *str_value = s.value;
std::size_t str_size = s.size;
if (str_size == 0) {
if (!str_value) {
FMT_THROW(FormatError("string pointer is null"));
}
}
std::size_t precision = static_cast<std::size_t>(spec.precision_);
if (spec.precision_ >= 0 && precision < str_size)
str_size = precision;
write_str(str_value, str_size, spec);
}
template <typename Char>
typename BasicWriter<Char>::CharPtr
BasicWriter<Char>::fill_padding(
CharPtr buffer, unsigned total_size,
std::size_t content_size, wchar_t fill) {
std::size_t padding = total_size - content_size;
std::size_t left_padding = padding / 2;
Char fill_char = internal::CharTraits<Char>::cast(fill);
std::uninitialized_fill_n(buffer, left_padding, fill_char);
buffer += left_padding;
CharPtr content = buffer;
std::uninitialized_fill_n(buffer + content_size,
padding - left_padding, fill_char);
return content;
}
template <typename Char>
template <typename Spec>
typename BasicWriter<Char>::CharPtr
BasicWriter<Char>::prepare_int_buffer(
unsigned num_digits, const Spec &spec,
const char *prefix, unsigned prefix_size) {
unsigned width = spec.width();
Alignment align = spec.align();
Char fill = internal::CharTraits<Char>::cast(spec.fill());
if (spec.precision() > static_cast<int>(num_digits)) {
// Octal prefix '0' is counted as a digit, so ignore it if precision
// is specified.
if (prefix_size > 0 && prefix[prefix_size - 1] == '0')
--prefix_size;
unsigned number_size =
prefix_size + internal::to_unsigned(spec.precision());
AlignSpec subspec(number_size, '0', ALIGN_NUMERIC);
if (number_size >= width)
return prepare_int_buffer(num_digits, subspec, prefix, prefix_size);
buffer_.reserve(width);
unsigned fill_size = width - number_size;
if (align != ALIGN_LEFT) {
CharPtr p = grow_buffer(fill_size);
std::uninitialized_fill(p, p + fill_size, fill);
}
std::ptrdiff_t offset = get(prepare_int_buffer(
num_digits, subspec, prefix, prefix_size)) - &buffer_[0];
if (align == ALIGN_LEFT) {
CharPtr p = grow_buffer(fill_size);
std::uninitialized_fill(p, p + fill_size, fill);
}
return internal::make_ptr(&buffer_[0], buffer_.size()) + offset;
}
unsigned size = prefix_size + num_digits;
if (width <= size) {
CharPtr p = grow_buffer(size);
std::uninitialized_copy(prefix, prefix + prefix_size, p);
return p + size - 1;
}
CharPtr p = grow_buffer(width);
CharPtr end = p + width;
if (align == ALIGN_LEFT) {
std::uninitialized_copy(prefix, prefix + prefix_size, p);
p += size;
std::uninitialized_fill(p, end, fill);
} else if (align == ALIGN_CENTER) {
p = fill_padding(p, width, size, fill);
std::uninitialized_copy(prefix, prefix + prefix_size, p);
p += size;
} else {
if (align == ALIGN_NUMERIC) {
if (prefix_size != 0) {
p = std::uninitialized_copy(prefix, prefix + prefix_size, p);
size -= prefix_size;
}
} else {
std::uninitialized_copy(prefix, prefix + prefix_size, end - size);
}
std::uninitialized_fill(p, end - size, fill);
p = end;
}
return p - 1;
}
template <typename Char>
template <typename T, typename Spec>
void BasicWriter<Char>::write_int(T value, Spec spec) {
unsigned prefix_size = 0;
typedef typename internal::IntTraits<T>::MainType UnsignedType;
UnsignedType abs_value = static_cast<UnsignedType>(value);
char prefix[4] = "";
if (internal::is_negative(value)) {
prefix[0] = '-';
++prefix_size;
abs_value = 0 - abs_value;
} else if (spec.flag(SIGN_FLAG)) {
prefix[0] = spec.flag(PLUS_FLAG) ? '+' : ' ';
++prefix_size;
}
switch (spec.type()) {
case 0: case 'd': {
unsigned num_digits = internal::count_digits(abs_value);
CharPtr p = prepare_int_buffer(num_digits, spec, prefix, prefix_size) + 1;
internal::format_decimal(get(p), abs_value, 0);
break;
}
case 'x': case 'X': {
UnsignedType n = abs_value;
if (spec.flag(HASH_FLAG)) {
prefix[prefix_size++] = '0';
prefix[prefix_size++] = spec.type_prefix();
}
unsigned num_digits = 0;
do {
++num_digits;
} while ((n >>= 4) != 0);
Char *p = get(prepare_int_buffer(
num_digits, spec, prefix, prefix_size));
n = abs_value;
const char *digits = spec.type() == 'x' ?
"0123456789abcdef" : "0123456789ABCDEF";
do {
*p-- = digits[n & 0xf];
} while ((n >>= 4) != 0);
break;
}
case 'b': case 'B': {
UnsignedType n = abs_value;
if (spec.flag(HASH_FLAG)) {
prefix[prefix_size++] = '0';
prefix[prefix_size++] = spec.type_prefix();
}
unsigned num_digits = 0;
do {
++num_digits;
} while ((n >>= 1) != 0);
Char *p = get(prepare_int_buffer(num_digits, spec, prefix, prefix_size));
n = abs_value;
do {
*p-- = static_cast<Char>('0' + (n & 1));
} while ((n >>= 1) != 0);
break;
}
case 'o': {
UnsignedType n = abs_value;
if (spec.flag(HASH_FLAG))
prefix[prefix_size++] = '0';
unsigned num_digits = 0;
do {
++num_digits;
} while ((n >>= 3) != 0);
Char *p = get(prepare_int_buffer(num_digits, spec, prefix, prefix_size));
n = abs_value;
do {
*p-- = static_cast<Char>('0' + (n & 7));
} while ((n >>= 3) != 0);
break;
}
case 'n': {
unsigned num_digits = internal::count_digits(abs_value);
fmt::StringRef sep = "";
#if !(defined(ANDROID) || defined(__ANDROID__))
sep = internal::thousands_sep(std::localeconv());
#endif
unsigned size = static_cast<unsigned>(
num_digits + sep.size() * ((num_digits - 1) / 3));
CharPtr p = prepare_int_buffer(size, spec, prefix, prefix_size) + 1;
internal::format_decimal(get(p), abs_value, 0, internal::ThousandsSep(sep));
break;
}
default:
internal::report_unknown_type(
spec.type(), spec.flag(CHAR_FLAG) ? "char" : "integer");
break;
}
}
template <typename Char>
template <typename T, typename Spec>
void BasicWriter<Char>::write_double(T value, const Spec &spec) {
// Check type.
char type = spec.type();
bool upper = false;
switch (type) {
case 0:
type = 'g';
break;
case 'e': case 'f': case 'g': case 'a':
break;
case 'F':
#if FMT_MSC_VER
// MSVC's printf doesn't support 'F'.
type = 'f';
#endif
// Fall through.
case 'E': case 'G': case 'A':
upper = true;
break;
default:
internal::report_unknown_type(type, "double");
break;
}
char sign = 0;
// Use isnegative instead of value < 0 because the latter is always
// false for NaN.
if (internal::FPUtil::isnegative(static_cast<double>(value))) {
sign = '-';
value = -value;
} else if (spec.flag(SIGN_FLAG)) {
sign = spec.flag(PLUS_FLAG) ? '+' : ' ';
}
if (internal::FPUtil::isnotanumber(value)) {
// Format NaN ourselves because sprintf's output is not consistent
// across platforms.
std::size_t nan_size = 4;
const char *nan = upper ? " NAN" : " nan";
if (!sign) {
--nan_size;
++nan;
}
CharPtr out = write_str(nan, nan_size, spec);
if (sign)
*out = sign;
return;
}
if (internal::FPUtil::isinfinity(value)) {
// Format infinity ourselves because sprintf's output is not consistent
// across platforms.
std::size_t inf_size = 4;
const char *inf = upper ? " INF" : " inf";
if (!sign) {
--inf_size;
++inf;
}
CharPtr out = write_str(inf, inf_size, spec);
if (sign)
*out = sign;
return;
}
std::size_t offset = buffer_.size();
unsigned width = spec.width();
if (sign) {
buffer_.reserve(buffer_.size() + (width > 1u ? width : 1u));
if (width > 0)
--width;
++offset;
}
// Build format string.
enum { MAX_FORMAT_SIZE = 10}; // longest format: %#-*.*Lg
Char format[MAX_FORMAT_SIZE];
Char *format_ptr = format;
*format_ptr++ = '%';
unsigned width_for_sprintf = width;
if (spec.flag(HASH_FLAG))
*format_ptr++ = '#';
if (spec.align() == ALIGN_CENTER) {
width_for_sprintf = 0;
} else {
if (spec.align() == ALIGN_LEFT)
*format_ptr++ = '-';
if (width != 0)
*format_ptr++ = '*';
}
if (spec.precision() >= 0) {
*format_ptr++ = '.';
*format_ptr++ = '*';
}
append_float_length(format_ptr, value);
*format_ptr++ = type;
*format_ptr = '\0';
// Format using snprintf.
Char fill = internal::CharTraits<Char>::cast(spec.fill());
unsigned n = 0;
Char *start = FMT_NULL;
for (;;) {
std::size_t buffer_size = buffer_.capacity() - offset;
#if FMT_MSC_VER
// MSVC's vsnprintf_s doesn't work with zero size, so reserve
// space for at least one extra character to make the size non-zero.
// Note that the buffer's capacity will increase by more than 1.
if (buffer_size == 0) {
buffer_.reserve(offset + 1);
buffer_size = buffer_.capacity() - offset;
}
#endif
start = &buffer_[offset];
int result = internal::CharTraits<Char>::format_float(
start, buffer_size, format, width_for_sprintf, spec.precision(), value);
if (result >= 0) {
n = internal::to_unsigned(result);
if (offset + n < buffer_.capacity())
break; // The buffer is large enough - continue with formatting.
buffer_.reserve(offset + n + 1);
} else {
// If result is negative we ask to increase the capacity by at least 1,
// but as std::vector, the buffer grows exponentially.
buffer_.reserve(buffer_.capacity() + 1);
}
}
if (sign) {
if ((spec.align() != ALIGN_RIGHT && spec.align() != ALIGN_DEFAULT) ||
*start != ' ') {
*(start - 1) = sign;
sign = 0;
} else {
*(start - 1) = fill;
}
++n;
}
if (spec.align() == ALIGN_CENTER && spec.width() > n) {
width = spec.width();
CharPtr p = grow_buffer(width);
std::memmove(get(p) + (width - n) / 2, get(p), n * sizeof(Char));
fill_padding(p, spec.width(), n, fill);
return;
}
if (spec.fill() != ' ' || sign) {
while (*start == ' ')
*start++ = fill;
if (sign)
*(start - 1) = sign;
}
grow_buffer(n);
}
/**
\rst
This class template provides operations for formatting and writing data
into a character stream. The output is stored in a memory buffer that grows
dynamically.
You can use one of the following typedefs for common character types
and the standard allocator:
+---------------+-----------------------------------------------------+
| Type | Definition |
+===============+=====================================================+
| MemoryWriter | BasicMemoryWriter<char, std::allocator<char>> |
+---------------+-----------------------------------------------------+
| WMemoryWriter | BasicMemoryWriter<wchar_t, std::allocator<wchar_t>> |
+---------------+-----------------------------------------------------+
**Example**::
MemoryWriter out;
out << "The answer is " << 42 << "\n";
out.write("({:+f}, {:+f})", -3.14, 3.14);
This will write the following output to the ``out`` object:
.. code-block:: none
The answer is 42
(-3.140000, +3.140000)
The output can be converted to an ``std::string`` with ``out.str()`` or
accessed as a C string with ``out.c_str()``.
\endrst
*/
template <typename Char, typename Allocator = std::allocator<Char> >
class BasicMemoryWriter : public BasicWriter<Char> {
private:
internal::MemoryBuffer<Char, internal::INLINE_BUFFER_SIZE, Allocator> buffer_;
public:
explicit BasicMemoryWriter(const Allocator& alloc = Allocator())
: BasicWriter<Char>(buffer_), buffer_(alloc) {}
#if FMT_USE_RVALUE_REFERENCES
/**
\rst
Constructs a :class:`fmt::BasicMemoryWriter` object moving the content
of the other object to it.
\endrst
*/
BasicMemoryWriter(BasicMemoryWriter &&other)
: BasicWriter<Char>(buffer_), buffer_(std::move(other.buffer_)) {
}
/**
\rst
Moves the content of the other ``BasicMemoryWriter`` object to this one.
\endrst
*/
BasicMemoryWriter &operator=(BasicMemoryWriter &&other) {
buffer_ = std::move(other.buffer_);
return *this;
}
#endif
};
typedef BasicMemoryWriter<char> MemoryWriter;
typedef BasicMemoryWriter<wchar_t> WMemoryWriter;
/**
\rst
This class template provides operations for formatting and writing data
into a fixed-size array. For writing into a dynamically growing buffer
use :class:`fmt::BasicMemoryWriter`.
Any write method will throw ``std::runtime_error`` if the output doesn't fit
into the array.
You can use one of the following typedefs for common character types:
+--------------+---------------------------+
| Type | Definition |
+==============+===========================+
| ArrayWriter | BasicArrayWriter<char> |
+--------------+---------------------------+
| WArrayWriter | BasicArrayWriter<wchar_t> |
+--------------+---------------------------+
\endrst
*/
template <typename Char>
class BasicArrayWriter : public BasicWriter<Char> {
private:
internal::FixedBuffer<Char> buffer_;
public:
/**
\rst
Constructs a :class:`fmt::BasicArrayWriter` object for *array* of the
given size.
\endrst
*/
BasicArrayWriter(Char *array, std::size_t size)
: BasicWriter<Char>(buffer_), buffer_(array, size) {}
/**
\rst
Constructs a :class:`fmt::BasicArrayWriter` object for *array* of the
size known at compile time.
\endrst
*/
template <std::size_t SIZE>
explicit BasicArrayWriter(Char (&array)[SIZE])
: BasicWriter<Char>(buffer_), buffer_(array, SIZE) {}
};
typedef BasicArrayWriter<char> ArrayWriter;
typedef BasicArrayWriter<wchar_t> WArrayWriter;
// Reports a system error without throwing an exception.
// Can be used to report errors from destructors.
FMT_API void report_system_error(int error_code,
StringRef message) FMT_NOEXCEPT;
#if FMT_USE_WINDOWS_H
/** A Windows error. */
class WindowsError : public SystemError {
private:
FMT_API void init(int error_code, CStringRef format_str, ArgList args);
public:
/**
\rst
Constructs a :class:`fmt::WindowsError` object with the description
of the form
.. parsed-literal::
*<message>*: *<system-message>*
where *<message>* is the formatted message and *<system-message>* is the
system message corresponding to the error code.
*error_code* is a Windows error code as given by ``GetLastError``.
If *error_code* is not a valid error code such as -1, the system message
will look like "error -1".
**Example**::
// This throws a WindowsError with the description
// cannot open file 'madeup': The system cannot find the file specified.
// or similar (system message may vary).
const char *filename = "madeup";
LPOFSTRUCT of = LPOFSTRUCT();
HFILE file = OpenFile(filename, &of, OF_READ);
if (file == HFILE_ERROR) {
throw fmt::WindowsError(GetLastError(),
"cannot open file '{}'", filename);
}
\endrst
*/
WindowsError(int error_code, CStringRef message) {
init(error_code, message, ArgList());
}
FMT_VARIADIC_CTOR(WindowsError, init, int, CStringRef)
};
// Reports a Windows error without throwing an exception.
// Can be used to report errors from destructors.
FMT_API void report_windows_error(int error_code,
StringRef message) FMT_NOEXCEPT;
#endif
enum Color { BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE };
/**
Formats a string and prints it to stdout using ANSI escape sequences
to specify color (experimental).
Example:
print_colored(fmt::RED, "Elapsed time: {0:.2f} seconds", 1.23);
*/
FMT_API void print_colored(Color c, CStringRef format, ArgList args);
/**
\rst
Formats arguments and returns the result as a string.
**Example**::
std::string message = format("The answer is {}", 42);
\endrst
*/
inline std::string format(CStringRef format_str, ArgList args) {
MemoryWriter w;
w.write(format_str, args);
return w.str();
}
inline std::wstring format(WCStringRef format_str, ArgList args) {
WMemoryWriter w;
w.write(format_str, args);
return w.str();
}
/**
\rst
Prints formatted data to the file *f*.
**Example**::
print(stderr, "Don't {}!", "panic");
\endrst
*/
FMT_API void print(std::FILE *f, CStringRef format_str, ArgList args);
/**
\rst
Prints formatted data to ``stdout``.
**Example**::
print("Elapsed time: {0:.2f} seconds", 1.23);
\endrst
*/
FMT_API void print(CStringRef format_str, ArgList args);
/**
Fast integer formatter.
*/
class FormatInt {
private:
// Buffer should be large enough to hold all digits (digits10 + 1),
// a sign and a null character.
enum {BUFFER_SIZE = std::numeric_limits<ULongLong>::digits10 + 3};
mutable char buffer_[BUFFER_SIZE];
char *str_;
// Formats value in reverse and returns the number of digits.
char *format_decimal(ULongLong value) {
char *buffer_end = buffer_ + BUFFER_SIZE - 1;
while (value >= 100) {
// Integer division is slow so do it for a group of two digits instead
// of for every digit. The idea comes from the talk by Alexandrescu
// "Three Optimization Tips for C++". See speed-test for a comparison.
unsigned index = static_cast<unsigned>((value % 100) * 2);
value /= 100;
*--buffer_end = internal::Data::DIGITS[index + 1];
*--buffer_end = internal::Data::DIGITS[index];
}
if (value < 10) {
*--buffer_end = static_cast<char>('0' + value);
return buffer_end;
}
unsigned index = static_cast<unsigned>(value * 2);
*--buffer_end = internal::Data::DIGITS[index + 1];
*--buffer_end = internal::Data::DIGITS[index];
return buffer_end;
}
void FormatSigned(LongLong value) {
ULongLong abs_value = static_cast<ULongLong>(value);
bool negative = value < 0;
if (negative)
abs_value = 0 - abs_value;
str_ = format_decimal(abs_value);
if (negative)
*--str_ = '-';
}
public:
explicit FormatInt(int value) { FormatSigned(value); }
explicit FormatInt(long value) { FormatSigned(value); }
explicit FormatInt(LongLong value) { FormatSigned(value); }
explicit FormatInt(unsigned value) : str_(format_decimal(value)) {}
explicit FormatInt(unsigned long value) : str_(format_decimal(value)) {}
explicit FormatInt(ULongLong value) : str_(format_decimal(value)) {}
/** Returns the number of characters written to the output buffer. */
std::size_t size() const {
return internal::to_unsigned(buffer_ - str_ + BUFFER_SIZE - 1);
}
/**
Returns a pointer to the output buffer content. No terminating null
character is appended.
*/
const char *data() const { return str_; }
/**
Returns a pointer to the output buffer content with terminating null
character appended.
*/
const char *c_str() const {
buffer_[BUFFER_SIZE - 1] = '\0';
return str_;
}
/**
\rst
Returns the content of the output buffer as an ``std::string``.
\endrst
*/
std::string str() const { return std::string(str_, size()); }
};
// Formats a decimal integer value writing into buffer and returns
// a pointer to the end of the formatted string. This function doesn't
// write a terminating null character.
template <typename T>
inline void format_decimal(char *&buffer, T value) {
typedef typename internal::IntTraits<T>::MainType MainType;
MainType abs_value = static_cast<MainType>(value);
if (internal::is_negative(value)) {
*buffer++ = '-';
abs_value = 0 - abs_value;
}
if (abs_value < 100) {
if (abs_value < 10) {
*buffer++ = static_cast<char>('0' + abs_value);
return;
}
unsigned index = static_cast<unsigned>(abs_value * 2);
*buffer++ = internal::Data::DIGITS[index];
*buffer++ = internal::Data::DIGITS[index + 1];
return;
}
unsigned num_digits = internal::count_digits(abs_value);
internal::format_decimal(buffer, abs_value, num_digits);
buffer += num_digits;
}
/**
\rst
Returns a named argument for formatting functions.
**Example**::
print("Elapsed time: {s:.2f} seconds", arg("s", 1.23));
\endrst
*/
template <typename T>
inline internal::NamedArgWithType<char, T> arg(StringRef name, const T &arg) {
return internal::NamedArgWithType<char, T>(name, arg);
}
template <typename T>
inline internal::NamedArgWithType<wchar_t, T> arg(WStringRef name, const T &arg) {
return internal::NamedArgWithType<wchar_t, T>(name, arg);
}
// The following two functions are deleted intentionally to disable
// nested named arguments as in ``format("{}", arg("a", arg("b", 42)))``.
template <typename Char>
void arg(StringRef, const internal::NamedArg<Char>&) FMT_DELETED_OR_UNDEFINED;
template <typename Char>
void arg(WStringRef, const internal::NamedArg<Char>&) FMT_DELETED_OR_UNDEFINED;
}
#if FMT_GCC_VERSION
// Use the system_header pragma to suppress warnings about variadic macros
// because suppressing -Wvariadic-macros with the diagnostic pragma doesn't
// work. It is used at the end because we want to suppress as little warnings
// as possible.
# pragma GCC system_header
#endif
// This is used to work around VC++ bugs in handling variadic macros.
#define FMT_EXPAND(args) args
// Returns the number of arguments.
// Based on https://groups.google.com/forum/#!topic/comp.std.c/d-6Mj5Lko_s.
#define FMT_NARG(...) FMT_NARG_(__VA_ARGS__, FMT_RSEQ_N())
#define FMT_NARG_(...) FMT_EXPAND(FMT_ARG_N(__VA_ARGS__))
#define FMT_ARG_N(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, N, ...) N
#define FMT_RSEQ_N() 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
#define FMT_FOR_EACH_(N, f, ...) \
FMT_EXPAND(FMT_CONCAT(FMT_FOR_EACH, N)(f, __VA_ARGS__))
#define FMT_FOR_EACH(f, ...) \
FMT_EXPAND(FMT_FOR_EACH_(FMT_NARG(__VA_ARGS__), f, __VA_ARGS__))
#define FMT_ADD_ARG_NAME(type, index) type arg##index
#define FMT_GET_ARG_NAME(type, index) arg##index
#if FMT_USE_VARIADIC_TEMPLATES
# define FMT_VARIADIC_(Const, Char, ReturnType, func, call, ...) \
template <typename... Args> \
ReturnType func(FMT_FOR_EACH(FMT_ADD_ARG_NAME, __VA_ARGS__), \
const Args & ... args) Const { \
typedef fmt::internal::ArgArray<sizeof...(Args)> ArgArray; \
typename ArgArray::Type array{ \
ArgArray::template make<fmt::BasicFormatter<Char> >(args)...}; \
call(FMT_FOR_EACH(FMT_GET_ARG_NAME, __VA_ARGS__), \
fmt::ArgList(fmt::internal::make_type(args...), array)); \
}
#else
// Defines a wrapper for a function taking __VA_ARGS__ arguments
// and n additional arguments of arbitrary types.
# define FMT_WRAP(Const, Char, ReturnType, func, call, n, ...) \
template <FMT_GEN(n, FMT_MAKE_TEMPLATE_ARG)> \
inline ReturnType func(FMT_FOR_EACH(FMT_ADD_ARG_NAME, __VA_ARGS__), \
FMT_GEN(n, FMT_MAKE_ARG)) Const { \
fmt::internal::ArgArray<n>::Type arr; \
FMT_GEN(n, FMT_ASSIGN_##Char); \
call(FMT_FOR_EACH(FMT_GET_ARG_NAME, __VA_ARGS__), fmt::ArgList( \
fmt::internal::make_type(FMT_GEN(n, FMT_MAKE_REF2)), arr)); \
}
# define FMT_VARIADIC_(Const, Char, ReturnType, func, call, ...) \
inline ReturnType func(FMT_FOR_EACH(FMT_ADD_ARG_NAME, __VA_ARGS__)) Const { \
call(FMT_FOR_EACH(FMT_GET_ARG_NAME, __VA_ARGS__), fmt::ArgList()); \
} \
FMT_WRAP(Const, Char, ReturnType, func, call, 1, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 2, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 3, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 4, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 5, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 6, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 7, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 8, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 9, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 10, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 11, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 12, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 13, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 14, __VA_ARGS__) \
FMT_WRAP(Const, Char, ReturnType, func, call, 15, __VA_ARGS__)
#endif // FMT_USE_VARIADIC_TEMPLATES
/**
\rst
Defines a variadic function with the specified return type, function name
and argument types passed as variable arguments to this macro.
**Example**::
void print_error(const char *file, int line, const char *format,
fmt::ArgList args) {
fmt::print("{}: {}: ", file, line);
fmt::print(format, args);
}
FMT_VARIADIC(void, print_error, const char *, int, const char *)
``FMT_VARIADIC`` is used for compatibility with legacy C++ compilers that
don't implement variadic templates. You don't have to use this macro if
you don't need legacy compiler support and can use variadic templates
directly::
template <typename... Args>
void print_error(const char *file, int line, const char *format,
const Args & ... args) {
fmt::print("{}: {}: ", file, line);
fmt::print(format, args...);
}
\endrst
*/
#define FMT_VARIADIC(ReturnType, func, ...) \
FMT_VARIADIC_(, char, ReturnType, func, return func, __VA_ARGS__)
#define FMT_VARIADIC_CONST(ReturnType, func, ...) \
FMT_VARIADIC_(const, char, ReturnType, func, return func, __VA_ARGS__)
#define FMT_VARIADIC_W(ReturnType, func, ...) \
FMT_VARIADIC_(, wchar_t, ReturnType, func, return func, __VA_ARGS__)
#define FMT_VARIADIC_CONST_W(ReturnType, func, ...) \
FMT_VARIADIC_(const, wchar_t, ReturnType, func, return func, __VA_ARGS__)
#define FMT_CAPTURE_ARG_(id, index) ::fmt::arg(#id, id)
#define FMT_CAPTURE_ARG_W_(id, index) ::fmt::arg(L###id, id)
/**
\rst
Convenient macro to capture the arguments' names and values into several
``fmt::arg(name, value)``.
**Example**::
int x = 1, y = 2;
print("point: ({x}, {y})", FMT_CAPTURE(x, y));
// same as:
// print("point: ({x}, {y})", arg("x", x), arg("y", y));
\endrst
*/
#define FMT_CAPTURE(...) FMT_FOR_EACH(FMT_CAPTURE_ARG_, __VA_ARGS__)
#define FMT_CAPTURE_W(...) FMT_FOR_EACH(FMT_CAPTURE_ARG_W_, __VA_ARGS__)
namespace fmt {
FMT_VARIADIC(std::string, format, CStringRef)
FMT_VARIADIC_W(std::wstring, format, WCStringRef)
FMT_VARIADIC(void, print, CStringRef)
FMT_VARIADIC(void, print, std::FILE *, CStringRef)
FMT_VARIADIC(void, print_colored, Color, CStringRef)
namespace internal {
template <typename Char>
inline bool is_name_start(Char c) {
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c;
}
// Parses an unsigned integer advancing s to the end of the parsed input.
// This function assumes that the first character of s is a digit.
template <typename Char>
unsigned parse_nonnegative_int(const Char *&s) {
assert('0' <= *s && *s <= '9');
unsigned value = 0;
// Convert to unsigned to prevent a warning.
unsigned max_int = (std::numeric_limits<int>::max)();
unsigned big = max_int / 10;
do {
// Check for overflow.
if (value > big) {
value = max_int + 1;
break;
}
value = value * 10 + (*s - '0');
++s;
} while ('0' <= *s && *s <= '9');
// Convert to unsigned to prevent a warning.
if (value > max_int)
FMT_THROW(FormatError("number is too big"));
return value;
}
inline void require_numeric_argument(const Arg &arg, char spec) {
if (arg.type > Arg::LAST_NUMERIC_TYPE) {
std::string message =
fmt::format("format specifier '{}' requires numeric argument", spec);
FMT_THROW(fmt::FormatError(message));
}
}
template <typename Char>
void check_sign(const Char *&s, const Arg &arg) {
char sign = static_cast<char>(*s);
require_numeric_argument(arg, sign);
if (arg.type == Arg::UINT || arg.type == Arg::ULONG_LONG) {
FMT_THROW(FormatError(fmt::format(
"format specifier '{}' requires signed argument", sign)));
}
++s;
}
} // namespace internal
template <typename Char, typename AF>
inline internal::Arg BasicFormatter<Char, AF>::get_arg(
BasicStringRef<Char> arg_name, const char *&error) {
if (check_no_auto_index(error)) {
map_.init(args());
const internal::Arg *arg = map_.find(arg_name);
if (arg)
return *arg;
error = "argument not found";
}
return internal::Arg();
}
template <typename Char, typename AF>
inline internal::Arg BasicFormatter<Char, AF>::parse_arg_index(const Char *&s) {
const char *error = FMT_NULL;
internal::Arg arg = *s < '0' || *s > '9' ?
next_arg(error) : get_arg(internal::parse_nonnegative_int(s), error);
if (error) {
FMT_THROW(FormatError(
*s != '}' && *s != ':' ? "invalid format string" : error));
}
return arg;
}
template <typename Char, typename AF>
inline internal::Arg BasicFormatter<Char, AF>::parse_arg_name(const Char *&s) {
assert(internal::is_name_start(*s));
const Char *start = s;
Char c;
do {
c = *++s;
} while (internal::is_name_start(c) || ('0' <= c && c <= '9'));
const char *error = FMT_NULL;
internal::Arg arg = get_arg(BasicStringRef<Char>(start, s - start), error);
if (error)
FMT_THROW(FormatError(error));
return arg;
}
template <typename Char, typename ArgFormatter>
const Char *BasicFormatter<Char, ArgFormatter>::format(
const Char *&format_str, const internal::Arg &arg) {
using internal::Arg;
const Char *s = format_str;
typename ArgFormatter::SpecType spec;
if (*s == ':') {
if (arg.type == Arg::CUSTOM) {
arg.custom.format(this, arg.custom.value, &s);
return s;
}
++s;
// Parse fill and alignment.
if (Char c = *s) {
const Char *p = s + 1;
spec.align_ = ALIGN_DEFAULT;
do {
switch (*p) {
case '<':
spec.align_ = ALIGN_LEFT;
break;
case '>':
spec.align_ = ALIGN_RIGHT;
break;
case '=':
spec.align_ = ALIGN_NUMERIC;
break;
case '^':
spec.align_ = ALIGN_CENTER;
break;
}
if (spec.align_ != ALIGN_DEFAULT) {
if (p != s) {
if (c == '}') break;
if (c == '{')
FMT_THROW(FormatError("invalid fill character '{'"));
s += 2;
spec.fill_ = c;
} else ++s;
if (spec.align_ == ALIGN_NUMERIC)
require_numeric_argument(arg, '=');
break;
}
} while (--p >= s);
}
// Parse sign.
switch (*s) {
case '+':
check_sign(s, arg);
spec.flags_ |= SIGN_FLAG | PLUS_FLAG;
break;
case '-':
check_sign(s, arg);
spec.flags_ |= MINUS_FLAG;
break;
case ' ':
check_sign(s, arg);
spec.flags_ |= SIGN_FLAG;
break;
}
if (*s == '#') {
require_numeric_argument(arg, '#');
spec.flags_ |= HASH_FLAG;
++s;
}
// Parse zero flag.
if (*s == '0') {
require_numeric_argument(arg, '0');
spec.align_ = ALIGN_NUMERIC;
spec.fill_ = '0';
++s;
}
// Parse width.
if ('0' <= *s && *s <= '9') {
spec.width_ = internal::parse_nonnegative_int(s);
} else if (*s == '{') {
++s;
Arg width_arg = internal::is_name_start(*s) ?
parse_arg_name(s) : parse_arg_index(s);
if (*s++ != '}')
FMT_THROW(FormatError("invalid format string"));
ULongLong value = 0;
switch (width_arg.type) {
case Arg::INT:
if (width_arg.int_value < 0)
FMT_THROW(FormatError("negative width"));
value = width_arg.int_value;
break;
case Arg::UINT:
value = width_arg.uint_value;
break;
case Arg::LONG_LONG:
if (width_arg.long_long_value < 0)
FMT_THROW(FormatError("negative width"));
value = width_arg.long_long_value;
break;
case Arg::ULONG_LONG:
value = width_arg.ulong_long_value;
break;
default:
FMT_THROW(FormatError("width is not integer"));
}
unsigned max_int = (std::numeric_limits<int>::max)();
if (value > max_int)
FMT_THROW(FormatError("number is too big"));
spec.width_ = static_cast<int>(value);
}
// Parse precision.
if (*s == '.') {
++s;
spec.precision_ = 0;
if ('0' <= *s && *s <= '9') {
spec.precision_ = internal::parse_nonnegative_int(s);
} else if (*s == '{') {
++s;
Arg precision_arg = internal::is_name_start(*s) ?
parse_arg_name(s) : parse_arg_index(s);
if (*s++ != '}')
FMT_THROW(FormatError("invalid format string"));
ULongLong value = 0;
switch (precision_arg.type) {
case Arg::INT:
if (precision_arg.int_value < 0)
FMT_THROW(FormatError("negative precision"));
value = precision_arg.int_value;
break;
case Arg::UINT:
value = precision_arg.uint_value;
break;
case Arg::LONG_LONG:
if (precision_arg.long_long_value < 0)
FMT_THROW(FormatError("negative precision"));
value = precision_arg.long_long_value;
break;
case Arg::ULONG_LONG:
value = precision_arg.ulong_long_value;
break;
default:
FMT_THROW(FormatError("precision is not integer"));
}
unsigned max_int = (std::numeric_limits<int>::max)();
if (value > max_int)
FMT_THROW(FormatError("number is too big"));
spec.precision_ = static_cast<int>(value);
} else {
FMT_THROW(FormatError("missing precision specifier"));
}
if (arg.type <= Arg::LAST_INTEGER_TYPE || arg.type == Arg::POINTER) {
FMT_THROW(FormatError(
fmt::format("precision not allowed in {} format specifier",
arg.type == Arg::POINTER ? "pointer" : "integer")));
}
}
// Parse type.
if (*s != '}' && *s)
spec.type_ = static_cast<char>(*s++);
}
if (*s++ != '}')
FMT_THROW(FormatError("missing '}' in format string"));
// Format argument.
ArgFormatter(*this, spec, s - 1).visit(arg);
return s;
}
template <typename Char, typename AF>
void BasicFormatter<Char, AF>::format(BasicCStringRef<Char> format_str) {
const Char *s = format_str.c_str();
const Char *start = s;
while (*s) {
Char c = *s++;
if (c != '{' && c != '}') continue;
if (*s == c) {
write(writer_, start, s);
start = ++s;
continue;
}
if (c == '}')
FMT_THROW(FormatError("unmatched '}' in format string"));
write(writer_, start, s - 1);
internal::Arg arg = internal::is_name_start(*s) ?
parse_arg_name(s) : parse_arg_index(s);
start = s = format(s, arg);
}
write(writer_, start, s);
}
template <typename Char, typename It>
struct ArgJoin {
It first;
It last;
BasicCStringRef<Char> sep;
ArgJoin(It first, It last, const BasicCStringRef<Char>& sep) :
first(first),
last(last),
sep(sep) {}
};
template <typename It>
ArgJoin<char, It> join(It first, It last, const BasicCStringRef<char>& sep) {
return ArgJoin<char, It>(first, last, sep);
}
template <typename It>
ArgJoin<wchar_t, It> join(It first, It last, const BasicCStringRef<wchar_t>& sep) {
return ArgJoin<wchar_t, It>(first, last, sep);
}
#if FMT_HAS_GXX_CXX11
template <typename Range>
auto join(const Range& range, const BasicCStringRef<char>& sep)
-> ArgJoin<char, decltype(std::begin(range))> {
return join(std::begin(range), std::end(range), sep);
}
template <typename Range>
auto join(const Range& range, const BasicCStringRef<wchar_t>& sep)
-> ArgJoin<wchar_t, decltype(std::begin(range))> {
return join(std::begin(range), std::end(range), sep);
}
#endif
template <typename ArgFormatter, typename Char, typename It>
void format_arg(fmt::BasicFormatter<Char, ArgFormatter> &f,
const Char *&format_str, const ArgJoin<Char, It>& e) {
const Char* end = format_str;
int brace_level = 1;
while (*end) {
if (*end == '}' && --brace_level == 0)
break;
if (*end == '{')
++brace_level;
++end;
}
if (*end != '}')
FMT_THROW(FormatError("missing '}' in format string"));
It it = e.first;
if (it != e.last) {
const Char* save = format_str;
f.format(format_str, internal::MakeArg<fmt::BasicFormatter<Char, ArgFormatter> >(*it++));
while (it != e.last) {
f.writer().write(e.sep);
format_str = save;
f.format(format_str, internal::MakeArg<fmt::BasicFormatter<Char, ArgFormatter> >(*it++));
}
}
format_str = end + 1;
}
} // namespace fmt
#if FMT_USE_USER_DEFINED_LITERALS
namespace fmt {
namespace internal {
template <typename Char>
struct UdlFormat {
const Char *str;
template <typename... Args>
auto operator()(Args && ... args) const
-> decltype(format(str, std::forward<Args>(args)...)) {
return format(str, std::forward<Args>(args)...);
}
};
template <typename Char>
struct UdlArg {
const Char *str;
template <typename T>
NamedArgWithType<Char, T> operator=(T &&value) const {
return {str, std::forward<T>(value)};
}
};
} // namespace internal
inline namespace literals {
/**
\rst
C++11 literal equivalent of :func:`fmt::format`.
**Example**::
using namespace fmt::literals;
std::string message = "The answer is {}"_format(42);
\endrst
*/
inline internal::UdlFormat<char>
operator"" _format(const char *s, std::size_t) { return {s}; }
inline internal::UdlFormat<wchar_t>
operator"" _format(const wchar_t *s, std::size_t) { return {s}; }
/**
\rst
C++11 literal equivalent of :func:`fmt::arg`.
**Example**::
using namespace fmt::literals;
print("Elapsed time: {s:.2f} seconds", "s"_a=1.23);
\endrst
*/
inline internal::UdlArg<char>
operator"" _a(const char *s, std::size_t) { return {s}; }
inline internal::UdlArg<wchar_t>
operator"" _a(const wchar_t *s, std::size_t) { return {s}; }
} // inline namespace literals
} // namespace fmt
#endif // FMT_USE_USER_DEFINED_LITERALS
// Restore warnings.
#if FMT_GCC_VERSION >= 406
# pragma GCC diagnostic pop
#endif
#if defined(__clang__) && !defined(FMT_ICC_VERSION)
# pragma clang diagnostic pop
#endif
#ifdef FMT_HEADER_ONLY
# define FMT_FUNC inline
# include "format.cc"
#else
# define FMT_FUNC
#endif
#endif // FMT_FORMAT_H_
|
6_0
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
c
|
/*
Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License. You may
obtain a copy of the License at
https://imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore pixel accessor methods.
*/
#ifndef MAGICKCORE_PIXEL_ACCESSOR_H
#define MAGICKCORE_PIXEL_ACCESSOR_H
#include <assert.h>
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#undef index
static inline Quantum ClampPixel(const MagickRealType pixel)
{
if (pixel < 0.0f)
return((Quantum) 0);
if (pixel >= (MagickRealType) QuantumRange)
return((Quantum) QuantumRange);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((Quantum) (pixel+0.5f));
#else
return((Quantum) pixel);
#endif
}
static inline Quantum GetPixela(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[aPixelChannel].offset]);
}
static inline Quantum GetPixelAlpha(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits == UndefinedPixelTrait)
return(OpaqueAlpha);
return(pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline PixelTrait GetPixelAlphaTraits(
const Image *magick_restrict image)
{
return(image->channel_map[AlphaPixelChannel].traits);
}
static inline Quantum GetPixelb(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[bPixelChannel].offset]);
}
static inline Quantum GetPixelBlack(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[BlackPixelChannel].offset]);
}
static inline PixelTrait GetPixelBlackTraits(
const Image *magick_restrict image)
{
return(image->channel_map[BlackPixelChannel].traits);
}
static inline Quantum GetPixelBlue(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[BluePixelChannel].offset]);
}
static inline PixelTrait GetPixelBlueTraits(const Image *magick_restrict image)
{
return(image->channel_map[BluePixelChannel].traits);
}
static inline Quantum GetPixelCb(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[CbPixelChannel].offset]);
}
static inline PixelTrait GetPixelCbTraits(const Image *magick_restrict image)
{
return(image->channel_map[CbPixelChannel].traits);
}
static inline Quantum GetPixelChannel(const Image *magick_restrict image,
const PixelChannel channel,const Quantum *magick_restrict pixel)
{
if (image->channel_map[image->channel_map[channel].offset].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[channel].offset]);
}
static inline PixelChannel GetPixelChannelChannel(
const Image *magick_restrict image,const ssize_t offset)
{
return(image->channel_map[offset].channel);
}
static inline ssize_t GetPixelChannelOffset(const Image *magick_restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].offset);
}
static inline PixelTrait GetPixelChannelTraits(
const Image *magick_restrict image,const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline size_t GetPixelChannels(const Image *magick_restrict image)
{
return(image->number_channels);
}
static inline Quantum GetPixelCompositeMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[CompositeMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[CompositeMaskPixelChannel].offset]);
}
static inline Quantum GetPixelCr(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[CrPixelChannel].offset]);
}
static inline PixelTrait GetPixelCrTraits(const Image *magick_restrict image)
{
return(image->channel_map[CrPixelChannel].traits);
}
static inline Quantum GetPixelCyan(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[CyanPixelChannel].offset]);
}
static inline PixelTrait GetPixelCyanTraits(const Image *magick_restrict image)
{
return(image->channel_map[CyanPixelChannel].traits);
}
static inline Quantum GetPixelGray(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[GrayPixelChannel].offset]);
}
static inline PixelTrait GetPixelGrayTraits(const Image *magick_restrict image)
{
return(image->channel_map[GrayPixelChannel].traits);
}
static inline Quantum GetPixelGreen(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[GreenPixelChannel].offset]);
}
static inline PixelTrait GetPixelGreenTraits(
const Image *magick_restrict image)
{
return(image->channel_map[GreenPixelChannel].traits);
}
static inline Quantum GetPixelIndex(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[IndexPixelChannel].offset]);
}
static inline PixelTrait GetPixelIndexTraits(
const Image *magick_restrict image)
{
return(image->channel_map[IndexPixelChannel].traits);
}
static inline MagickRealType GetPixelInfoChannel(
const PixelInfo *magick_restrict pixel_info,const PixelChannel channel)
{
switch (channel)
{
case RedPixelChannel: return(pixel_info->red);
case GreenPixelChannel: return(pixel_info->green);
case BluePixelChannel: return(pixel_info->blue);
case BlackPixelChannel: return(pixel_info->black);
case AlphaPixelChannel: return(pixel_info->alpha);
case IndexPixelChannel: return(pixel_info->index);
default: return((MagickRealType) 0.0);
}
}
static inline double PerceptibleReciprocal(const double x)
{
double
sign;
/*
Return 1/x where x is perceptible (not unlimited or infinitesimal).
*/
sign=x < 0.0 ? -1.0 : 1.0;
if ((sign*x) >= MagickEpsilon)
return(1.0/x);
return(sign/MagickEpsilon);
}
static inline MagickRealType GetPixelInfoLuma(
const PixelInfo *magick_restrict pixel)
{
MagickRealType
intensity;
if (pixel->colorspace == sRGBColorspace)
{
intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+
0.072186f*pixel->blue);
return(intensity);
}
intensity=(MagickRealType) (0.212656f*EncodePixelGamma(pixel->red)+
0.715158f*EncodePixelGamma(pixel->green)+
0.072186f*EncodePixelGamma(pixel->blue));
return(intensity);
}
static inline MagickRealType GetPixelInfoLuminance(
const PixelInfo *magick_restrict pixel)
{
MagickRealType
intensity;
if (pixel->colorspace != sRGBColorspace)
{
intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+
0.072186f*pixel->blue);
return(intensity);
}
intensity=(MagickRealType) (0.212656f*DecodePixelGamma(pixel->red)+
0.715158f*DecodePixelGamma(pixel->green)+
0.072186f*DecodePixelGamma(pixel->blue));
return(intensity);
}
static inline Quantum GetPixelL(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[LPixelChannel].offset]);
}
static inline ssize_t GetPixelLabel(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return((ssize_t) pixel[image->channel_map[LabelPixelChannel].offset]);
}
static inline MagickRealType GetPixelLuma(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
MagickRealType
intensity;
intensity=(MagickRealType) (
0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]);
return(intensity);
}
static inline MagickRealType GetPixelLuminance(
const Image *magick_restrict image,const Quantum *magick_restrict pixel)
{
MagickRealType
intensity;
if (image->colorspace != sRGBColorspace)
{
intensity=(MagickRealType) (
0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]);
return(intensity);
}
intensity=(MagickRealType) (0.212656f*DecodePixelGamma((MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset])+0.715158f*
DecodePixelGamma((MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset])+0.072186f*
DecodePixelGamma((MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset]));
return(intensity);
}
static inline Quantum GetPixelMagenta(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[MagentaPixelChannel].offset]);
}
static inline PixelTrait GetPixelMagentaTraits(
const Image *magick_restrict image)
{
return(image->channel_map[MagentaPixelChannel].traits);
}
static inline Quantum GetPixelReadMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[ReadMaskPixelChannel].offset]);
}
static inline Quantum GetPixelWriteMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[WriteMaskPixelChannel].offset]);
}
static inline PixelTrait GetPixelReadMaskTraits(
const Image *magick_restrict image)
{
return(image->channel_map[ReadMaskPixelChannel].traits);
}
static inline size_t GetPixelMetaChannels(const Image *magick_restrict image)
{
return(image->number_meta_channels);
}
static inline size_t GetPixelMetacontentExtent(
const Image *magick_restrict image)
{
return(image->metacontent_extent);
}
static inline Quantum GetPixelOpacity(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != BlendPixelTrait)
return(QuantumRange-OpaqueAlpha);
return(QuantumRange-pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline Quantum GetPixelRed(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[RedPixelChannel].offset]);
}
static inline PixelTrait GetPixelRedTraits(const Image *magick_restrict image)
{
return(image->channel_map[RedPixelChannel].traits);
}
static inline void GetPixelInfoPixel(const Image *magick_restrict image,
const Quantum *magick_restrict pixel,PixelInfo *magick_restrict pixel_info)
{
(void) ResetMagickMemory(pixel_info,0,sizeof(*pixel_info));
pixel_info->storage_class=DirectClass;
pixel_info->colorspace=sRGBColorspace;
pixel_info->depth=MAGICKCORE_QUANTUM_DEPTH;
pixel_info->alpha_trait=UndefinedPixelTrait;
pixel_info->alpha=(MagickRealType) OpaqueAlpha;
if (image != (Image *) NULL)
{
pixel_info->storage_class=image->storage_class;
pixel_info->colorspace=image->colorspace;
pixel_info->fuzz=image->fuzz;
pixel_info->depth=image->depth;
pixel_info->alpha_trait=image->alpha_trait;
if (pixel != (Quantum *) NULL)
{
pixel_info->red=(MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset];
pixel_info->green=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset];
pixel_info->blue=(MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset];
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel_info->black=(MagickRealType)
pixel[image->channel_map[BlackPixelChannel].offset];
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
{
pixel_info->alpha=(MagickRealType)
pixel[image->channel_map[AlphaPixelChannel].offset];
pixel_info->alpha_trait=BlendPixelTrait;
}
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel_info->index=(MagickRealType)
pixel[image->channel_map[IndexPixelChannel].offset];
}
}
}
static inline PixelTrait GetPixelTraits(const Image *magick_restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline Quantum GetPixelY(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[YPixelChannel].offset]);
}
static inline PixelTrait GetPixelYTraits(const Image *magick_restrict image)
{
return(image->channel_map[YPixelChannel].traits);
}
static inline Quantum GetPixelYellow(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[YellowPixelChannel].offset]);
}
static inline PixelTrait GetPixelYellowTraits(
const Image *magick_restrict image)
{
return(image->channel_map[YellowPixelChannel].traits);
}
static inline MagickRealType AbsolutePixelValue(const MagickRealType x)
{
return(x < 0.0f ? -x : x);
}
static inline MagickBooleanType IsPixelAtDepth(const Quantum pixel,
const QuantumAny range)
{
Quantum
quantum;
if (range == 0)
return(MagickTrue);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range+0.5);
#else
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range);
#endif
return(pixel == quantum ? MagickTrue : MagickFalse);
}
static inline MagickBooleanType IsPixelEquivalent(
const Image *magick_restrict image,const Quantum *magick_restrict p,
const PixelInfo *magick_restrict q)
{
MagickRealType
alpha,
beta,
color;
color=(MagickRealType) p[image->channel_map[AlphaPixelChannel].offset];
alpha=image->alpha_trait == UndefinedPixelTrait ? (MagickRealType)
OpaqueAlpha : color;
beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha :
q->alpha;
if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon)
return(MagickFalse);
if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) ||
(AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon))
return(MagickTrue); /* no color component if pixel is transparent */
color=(MagickRealType) p[image->channel_map[RedPixelChannel].offset];
if (AbsolutePixelValue(color-q->red) >= MagickEpsilon)
return(MagickFalse);
color=(MagickRealType) p[image->channel_map[GreenPixelChannel].offset];
if (AbsolutePixelValue(color-q->green) >= MagickEpsilon)
return(MagickFalse);
color=(MagickRealType) p[image->channel_map[BluePixelChannel].offset];
if (AbsolutePixelValue(color-q->blue) >= MagickEpsilon)
return(MagickFalse);
if (image->colorspace == CMYKColorspace)
{
color=(MagickRealType) p[image->channel_map[BlackPixelChannel].offset];
if (AbsolutePixelValue(color-q->black) >= MagickEpsilon)
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsPixelGray(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
MagickRealType
green_blue,
red_green;
red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]-
pixel[image->channel_map[GreenPixelChannel].offset];
green_blue=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset]-
pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red_green) < MagickEpsilon) &&
(AbsolutePixelValue(green_blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoEquivalent(
const PixelInfo *magick_restrict p,const PixelInfo *magick_restrict q)
{
MagickRealType
alpha,
beta;
alpha=p->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha :
p->alpha;
beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha :
q->alpha;
if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon)
return(MagickFalse);
if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) ||
(AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon))
return(MagickTrue); /* no color component if pixel is transparent */
if (AbsolutePixelValue(p->red-q->red) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->green-q->green) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->blue-q->blue) >= MagickEpsilon)
return(MagickFalse);
if (p->colorspace == CMYKColorspace)
{
if (AbsolutePixelValue(p->black-q->black) >= MagickEpsilon)
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsPixelMonochrome(
const Image *magick_restrict image,const Quantum *magick_restrict pixel)
{
MagickRealType
green_blue,
red,
red_green;
red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset];
if ((AbsolutePixelValue(red) >= MagickEpsilon) &&
(AbsolutePixelValue(red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]-
pixel[image->channel_map[GreenPixelChannel].offset];
green_blue=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset]-
pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red_green) < MagickEpsilon) &&
(AbsolutePixelValue(green_blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoGray(
const PixelInfo *magick_restrict pixel)
{
if ((AbsolutePixelValue(pixel->red-pixel->green) < MagickEpsilon) &&
(AbsolutePixelValue(pixel->green-pixel->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoMonochrome(
const PixelInfo *magick_restrict pixel_info)
{
MagickRealType
green_blue,
red_green;
if ((AbsolutePixelValue(pixel_info->red) >= MagickEpsilon) ||
(AbsolutePixelValue(pixel_info->red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
red_green=pixel_info->red-pixel_info->green;
green_blue=pixel_info->green-pixel_info->blue;
if ((AbsolutePixelValue(red_green) < MagickEpsilon) &&
(AbsolutePixelValue(green_blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline void SetPixela(const Image *magick_restrict image,
const Quantum a,Quantum *magick_restrict pixel)
{
if (image->channel_map[aPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[aPixelChannel].offset]=a;
}
static inline void SetPixelAlpha(const Image *magick_restrict image,
const Quantum alpha,Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=alpha;
}
static inline void SetPixelAlphaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[AlphaPixelChannel].traits=traits;
}
static inline void SetPixelb(const Image *magick_restrict image,
const Quantum b,Quantum *magick_restrict pixel)
{
if (image->channel_map[bPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[bPixelChannel].offset]=b;
}
static inline void SetPixelBackgoundColor(const Image *magick_restrict image,
Quantum *magick_restrict pixel)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=(Quantum) 0;
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(image->background_color.red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(image->background_color.green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(image->background_color.blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(image->background_color.black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
image->background_color.alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(image->background_color.alpha);
}
static inline void SetPixelBlack(const Image *magick_restrict image,
const Quantum black,Quantum *magick_restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=black;
}
static inline void SetPixelBlackTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BlackPixelChannel].traits=traits;
}
static inline void SetPixelBlue(const Image *magick_restrict image,
const Quantum blue,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[BluePixelChannel].offset]=blue;
}
static inline void SetPixelBlueTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BluePixelChannel].traits=traits;
}
static inline void SetPixelCb(const Image *magick_restrict image,
const Quantum cb,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[CbPixelChannel].offset]=cb;
}
static inline void SetPixelCbTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CbPixelChannel].traits=traits;
}
static inline void SetPixelChannel(const Image *magick_restrict image,
const PixelChannel channel,const Quantum quantum,
Quantum *magick_restrict pixel)
{
if (image->channel_map[channel].traits != UndefinedPixelTrait)
pixel[image->channel_map[channel].offset]=quantum;
}
static inline void SetPixelChannelAttributes(
const Image *magick_restrict image,const PixelChannel channel,
const PixelTrait traits,const ssize_t offset)
{
assert((ssize_t) channel < MaxPixelChannels);
assert(offset < MaxPixelChannels);
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
image->channel_map[channel].traits=traits;
}
static inline void SetPixelChannelChannel(const Image *magick_restrict image,
const PixelChannel channel,const ssize_t offset)
{
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
}
static inline void SetPixelChannels(Image *image,const size_t number_channels)
{
image->number_channels=number_channels;
}
static inline void SetPixelChannelTraits(Image *image,
const PixelChannel channel,const PixelTrait traits)
{
image->channel_map[channel].traits=traits;
}
static inline void SetPixelCompositeMask(const Image *magick_restrict image,
const Quantum mask,Quantum *magick_restrict pixel)
{
if (image->channel_map[CompositeMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[CompositeMaskPixelChannel].offset]=mask;
}
static inline void SetPixelCr(const Image *magick_restrict image,
const Quantum cr,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[CrPixelChannel].offset]=cr;
}
static inline void SetPixelCrTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CrPixelChannel].traits=traits;
}
static inline void SetPixelCyan(const Image *magick_restrict image,
const Quantum cyan,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[CyanPixelChannel].offset]=cyan;
}
static inline void SetPixelGray(const Image *magick_restrict image,
const Quantum gray,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[GrayPixelChannel].offset]=gray;
}
static inline void SetPixelGrayTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GrayPixelChannel].traits=traits;
}
static inline void SetPixelGreen(const Image *magick_restrict image,
const Quantum green,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[GreenPixelChannel].offset]=green;
}
static inline void SetPixelGreenTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GreenPixelChannel].traits=traits;
}
static inline void SetPixelIndex(const Image *magick_restrict image,
const Quantum index,Quantum *magick_restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[IndexPixelChannel].offset]=index;
}
static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits)
{
image->channel_map[IndexPixelChannel].traits=traits;
}
static inline void SetPixelViaPixelInfo(const Image *magick_restrict image,
const PixelInfo *magick_restrict pixel_info,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(pixel_info->red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(pixel_info->green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(pixel_info->blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(pixel_info->black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
pixel_info->alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(pixel_info->alpha);
}
static inline void SetPixelL(const Image *magick_restrict image,const Quantum L,
Quantum *magick_restrict pixel)
{
if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[LPixelChannel].offset]=L;
}
static inline void SetPixelMagenta(const Image *magick_restrict image,
const Quantum magenta,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[MagentaPixelChannel].offset]=magenta;
}
static inline void SetPixelMagentaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[MagentaPixelChannel].traits=traits;
}
static inline void SetPixelReadMask(const Image *magick_restrict image,
const Quantum mask,Quantum *magick_restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[ReadMaskPixelChannel].offset]=mask;
}
static inline void SetPixelWriteMask(const Image *magick_restrict image,
const Quantum mask,Quantum *magick_restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[WriteMaskPixelChannel].offset]=mask;
}
static inline void SetPixelMetacontentExtent(Image *image,const size_t extent)
{
image->metacontent_extent=extent;
}
static inline void SetPixelOpacity(const Image *magick_restrict image,
const Quantum alpha,Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha;
}
static inline void SetPixelRed(const Image *magick_restrict image,
const Quantum red,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=red;
}
static inline void SetPixelRedTraits(Image *image,const PixelTrait traits)
{
image->channel_map[RedPixelChannel].traits=traits;
}
static inline void SetPixelYellow(const Image *magick_restrict image,
const Quantum yellow,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[YellowPixelChannel].offset]=yellow;
}
static inline void SetPixelYellowTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YellowPixelChannel].traits=traits;
}
static inline void SetPixelY(const Image *magick_restrict image,
const Quantum y,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[YPixelChannel].offset]=y;
}
static inline void SetPixelYTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YPixelChannel].traits=traits;
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
/*
Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization
dedicated to making software imaging solutions freely available.
You may not use this file except in compliance with the License. You may
obtain a copy of the License at
https://imagemagick.org/script/license.php
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
MagickCore pixel accessor methods.
*/
#ifndef MAGICKCORE_PIXEL_ACCESSOR_H
#define MAGICKCORE_PIXEL_ACCESSOR_H
#include <assert.h>
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
#undef index
static inline Quantum ClampPixel(const MagickRealType pixel)
{
if (pixel < 0.0f)
return((Quantum) 0);
if (pixel >= (MagickRealType) QuantumRange)
return((Quantum) QuantumRange);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
return((Quantum) (pixel+0.5f));
#else
return((Quantum) pixel);
#endif
}
static inline Quantum GetPixela(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[aPixelChannel].offset]);
}
static inline Quantum GetPixelAlpha(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits == UndefinedPixelTrait)
return(OpaqueAlpha);
return(pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline PixelTrait GetPixelAlphaTraits(
const Image *magick_restrict image)
{
return(image->channel_map[AlphaPixelChannel].traits);
}
static inline Quantum GetPixelb(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[bPixelChannel].offset]);
}
static inline Quantum GetPixelBlack(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[BlackPixelChannel].offset]);
}
static inline PixelTrait GetPixelBlackTraits(
const Image *magick_restrict image)
{
return(image->channel_map[BlackPixelChannel].traits);
}
static inline Quantum GetPixelBlue(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[BluePixelChannel].offset]);
}
static inline PixelTrait GetPixelBlueTraits(const Image *magick_restrict image)
{
return(image->channel_map[BluePixelChannel].traits);
}
static inline Quantum GetPixelCb(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[CbPixelChannel].offset]);
}
static inline PixelTrait GetPixelCbTraits(const Image *magick_restrict image)
{
return(image->channel_map[CbPixelChannel].traits);
}
static inline Quantum GetPixelChannel(const Image *magick_restrict image,
const PixelChannel channel,const Quantum *magick_restrict pixel)
{
if (image->channel_map[channel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[channel].offset]);
}
static inline PixelChannel GetPixelChannelChannel(
const Image *magick_restrict image,const ssize_t offset)
{
return(image->channel_map[offset].channel);
}
static inline ssize_t GetPixelChannelOffset(const Image *magick_restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].offset);
}
static inline PixelTrait GetPixelChannelTraits(
const Image *magick_restrict image,const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline size_t GetPixelChannels(const Image *magick_restrict image)
{
return(image->number_channels);
}
static inline Quantum GetPixelCompositeMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[CompositeMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[CompositeMaskPixelChannel].offset]);
}
static inline Quantum GetPixelCr(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[CrPixelChannel].offset]);
}
static inline PixelTrait GetPixelCrTraits(const Image *magick_restrict image)
{
return(image->channel_map[CrPixelChannel].traits);
}
static inline Quantum GetPixelCyan(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[CyanPixelChannel].offset]);
}
static inline PixelTrait GetPixelCyanTraits(const Image *magick_restrict image)
{
return(image->channel_map[CyanPixelChannel].traits);
}
static inline Quantum GetPixelGray(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[GrayPixelChannel].offset]);
}
static inline PixelTrait GetPixelGrayTraits(const Image *magick_restrict image)
{
return(image->channel_map[GrayPixelChannel].traits);
}
static inline Quantum GetPixelGreen(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[GreenPixelChannel].offset]);
}
static inline PixelTrait GetPixelGreenTraits(
const Image *magick_restrict image)
{
return(image->channel_map[GreenPixelChannel].traits);
}
static inline Quantum GetPixelIndex(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) 0);
return(pixel[image->channel_map[IndexPixelChannel].offset]);
}
static inline PixelTrait GetPixelIndexTraits(
const Image *magick_restrict image)
{
return(image->channel_map[IndexPixelChannel].traits);
}
static inline MagickRealType GetPixelInfoChannel(
const PixelInfo *magick_restrict pixel_info,const PixelChannel channel)
{
switch (channel)
{
case RedPixelChannel: return(pixel_info->red);
case GreenPixelChannel: return(pixel_info->green);
case BluePixelChannel: return(pixel_info->blue);
case BlackPixelChannel: return(pixel_info->black);
case AlphaPixelChannel: return(pixel_info->alpha);
case IndexPixelChannel: return(pixel_info->index);
default: return((MagickRealType) 0.0);
}
}
static inline double PerceptibleReciprocal(const double x)
{
double
sign;
/*
Return 1/x where x is perceptible (not unlimited or infinitesimal).
*/
sign=x < 0.0 ? -1.0 : 1.0;
if ((sign*x) >= MagickEpsilon)
return(1.0/x);
return(sign/MagickEpsilon);
}
static inline MagickRealType GetPixelInfoLuma(
const PixelInfo *magick_restrict pixel)
{
MagickRealType
intensity;
if (pixel->colorspace == sRGBColorspace)
{
intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+
0.072186f*pixel->blue);
return(intensity);
}
intensity=(MagickRealType) (0.212656f*EncodePixelGamma(pixel->red)+
0.715158f*EncodePixelGamma(pixel->green)+
0.072186f*EncodePixelGamma(pixel->blue));
return(intensity);
}
static inline MagickRealType GetPixelInfoLuminance(
const PixelInfo *magick_restrict pixel)
{
MagickRealType
intensity;
if (pixel->colorspace != sRGBColorspace)
{
intensity=(MagickRealType) (0.212656f*pixel->red+0.715158f*pixel->green+
0.072186f*pixel->blue);
return(intensity);
}
intensity=(MagickRealType) (0.212656f*DecodePixelGamma(pixel->red)+
0.715158f*DecodePixelGamma(pixel->green)+
0.072186f*DecodePixelGamma(pixel->blue));
return(intensity);
}
static inline Quantum GetPixelL(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[LPixelChannel].offset]);
}
static inline ssize_t GetPixelLabel(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return((ssize_t) pixel[image->channel_map[LabelPixelChannel].offset]);
}
static inline MagickRealType GetPixelLuma(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
MagickRealType
intensity;
intensity=(MagickRealType) (
0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]);
return(intensity);
}
static inline MagickRealType GetPixelLuminance(
const Image *magick_restrict image,const Quantum *magick_restrict pixel)
{
MagickRealType
intensity;
if (image->colorspace != sRGBColorspace)
{
intensity=(MagickRealType) (
0.212656f*pixel[image->channel_map[RedPixelChannel].offset]+
0.715158f*pixel[image->channel_map[GreenPixelChannel].offset]+
0.072186f*pixel[image->channel_map[BluePixelChannel].offset]);
return(intensity);
}
intensity=(MagickRealType) (0.212656f*DecodePixelGamma((MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset])+0.715158f*
DecodePixelGamma((MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset])+0.072186f*
DecodePixelGamma((MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset]));
return(intensity);
}
static inline Quantum GetPixelMagenta(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[MagentaPixelChannel].offset]);
}
static inline PixelTrait GetPixelMagentaTraits(
const Image *magick_restrict image)
{
return(image->channel_map[MagentaPixelChannel].traits);
}
static inline Quantum GetPixelReadMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[ReadMaskPixelChannel].offset]);
}
static inline Quantum GetPixelWriteMask(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits == UndefinedPixelTrait)
return((Quantum) QuantumRange);
return(pixel[image->channel_map[WriteMaskPixelChannel].offset]);
}
static inline PixelTrait GetPixelReadMaskTraits(
const Image *magick_restrict image)
{
return(image->channel_map[ReadMaskPixelChannel].traits);
}
static inline size_t GetPixelMetaChannels(const Image *magick_restrict image)
{
return(image->number_meta_channels);
}
static inline size_t GetPixelMetacontentExtent(
const Image *magick_restrict image)
{
return(image->metacontent_extent);
}
static inline Quantum GetPixelOpacity(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != BlendPixelTrait)
return(QuantumRange-OpaqueAlpha);
return(QuantumRange-pixel[image->channel_map[AlphaPixelChannel].offset]);
}
static inline Quantum GetPixelRed(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[RedPixelChannel].offset]);
}
static inline PixelTrait GetPixelRedTraits(const Image *magick_restrict image)
{
return(image->channel_map[RedPixelChannel].traits);
}
static inline void GetPixelInfoPixel(const Image *magick_restrict image,
const Quantum *magick_restrict pixel,PixelInfo *magick_restrict pixel_info)
{
(void) ResetMagickMemory(pixel_info,0,sizeof(*pixel_info));
pixel_info->storage_class=DirectClass;
pixel_info->colorspace=sRGBColorspace;
pixel_info->depth=MAGICKCORE_QUANTUM_DEPTH;
pixel_info->alpha_trait=UndefinedPixelTrait;
pixel_info->alpha=(MagickRealType) OpaqueAlpha;
if (image != (Image *) NULL)
{
pixel_info->storage_class=image->storage_class;
pixel_info->colorspace=image->colorspace;
pixel_info->fuzz=image->fuzz;
pixel_info->depth=image->depth;
pixel_info->alpha_trait=image->alpha_trait;
if (pixel != (Quantum *) NULL)
{
pixel_info->red=(MagickRealType)
pixel[image->channel_map[RedPixelChannel].offset];
pixel_info->green=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset];
pixel_info->blue=(MagickRealType)
pixel[image->channel_map[BluePixelChannel].offset];
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel_info->black=(MagickRealType)
pixel[image->channel_map[BlackPixelChannel].offset];
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
{
pixel_info->alpha=(MagickRealType)
pixel[image->channel_map[AlphaPixelChannel].offset];
pixel_info->alpha_trait=BlendPixelTrait;
}
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel_info->index=(MagickRealType)
pixel[image->channel_map[IndexPixelChannel].offset];
}
}
}
static inline PixelTrait GetPixelTraits(const Image *magick_restrict image,
const PixelChannel channel)
{
return(image->channel_map[channel].traits);
}
static inline Quantum GetPixelY(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[YPixelChannel].offset]);
}
static inline PixelTrait GetPixelYTraits(const Image *magick_restrict image)
{
return(image->channel_map[YPixelChannel].traits);
}
static inline Quantum GetPixelYellow(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
return(pixel[image->channel_map[YellowPixelChannel].offset]);
}
static inline PixelTrait GetPixelYellowTraits(
const Image *magick_restrict image)
{
return(image->channel_map[YellowPixelChannel].traits);
}
static inline MagickRealType AbsolutePixelValue(const MagickRealType x)
{
return(x < 0.0f ? -x : x);
}
static inline MagickBooleanType IsPixelAtDepth(const Quantum pixel,
const QuantumAny range)
{
Quantum
quantum;
if (range == 0)
return(MagickTrue);
#if !defined(MAGICKCORE_HDRI_SUPPORT)
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range+0.5);
#else
quantum=(Quantum) (((MagickRealType) QuantumRange*((QuantumAny)
(((MagickRealType) range*pixel)/QuantumRange+0.5)))/range);
#endif
return(pixel == quantum ? MagickTrue : MagickFalse);
}
static inline MagickBooleanType IsPixelEquivalent(
const Image *magick_restrict image,const Quantum *magick_restrict p,
const PixelInfo *magick_restrict q)
{
MagickRealType
alpha,
beta,
color;
color=(MagickRealType) p[image->channel_map[AlphaPixelChannel].offset];
alpha=image->alpha_trait == UndefinedPixelTrait ? (MagickRealType)
OpaqueAlpha : color;
beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha :
q->alpha;
if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon)
return(MagickFalse);
if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) ||
(AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon))
return(MagickTrue); /* no color component if pixel is transparent */
color=(MagickRealType) p[image->channel_map[RedPixelChannel].offset];
if (AbsolutePixelValue(color-q->red) >= MagickEpsilon)
return(MagickFalse);
color=(MagickRealType) p[image->channel_map[GreenPixelChannel].offset];
if (AbsolutePixelValue(color-q->green) >= MagickEpsilon)
return(MagickFalse);
color=(MagickRealType) p[image->channel_map[BluePixelChannel].offset];
if (AbsolutePixelValue(color-q->blue) >= MagickEpsilon)
return(MagickFalse);
if (image->colorspace == CMYKColorspace)
{
color=(MagickRealType) p[image->channel_map[BlackPixelChannel].offset];
if (AbsolutePixelValue(color-q->black) >= MagickEpsilon)
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsPixelGray(const Image *magick_restrict image,
const Quantum *magick_restrict pixel)
{
MagickRealType
green_blue,
red_green;
red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]-
pixel[image->channel_map[GreenPixelChannel].offset];
green_blue=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset]-
pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red_green) < MagickEpsilon) &&
(AbsolutePixelValue(green_blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoEquivalent(
const PixelInfo *magick_restrict p,const PixelInfo *magick_restrict q)
{
MagickRealType
alpha,
beta;
alpha=p->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha :
p->alpha;
beta=q->alpha_trait == UndefinedPixelTrait ? (MagickRealType) OpaqueAlpha :
q->alpha;
if (AbsolutePixelValue(alpha-beta) >= MagickEpsilon)
return(MagickFalse);
if ((AbsolutePixelValue(alpha-TransparentAlpha) < MagickEpsilon) ||
(AbsolutePixelValue(beta-TransparentAlpha) < MagickEpsilon))
return(MagickTrue); /* no color component if pixel is transparent */
if (AbsolutePixelValue(p->red-q->red) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->green-q->green) >= MagickEpsilon)
return(MagickFalse);
if (AbsolutePixelValue(p->blue-q->blue) >= MagickEpsilon)
return(MagickFalse);
if (p->colorspace == CMYKColorspace)
{
if (AbsolutePixelValue(p->black-q->black) >= MagickEpsilon)
return(MagickFalse);
}
return(MagickTrue);
}
static inline MagickBooleanType IsPixelMonochrome(
const Image *magick_restrict image,const Quantum *magick_restrict pixel)
{
MagickRealType
green_blue,
red,
red_green;
red=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset];
if ((AbsolutePixelValue(red) >= MagickEpsilon) &&
(AbsolutePixelValue(red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
red_green=(MagickRealType) pixel[image->channel_map[RedPixelChannel].offset]-
pixel[image->channel_map[GreenPixelChannel].offset];
green_blue=(MagickRealType)
pixel[image->channel_map[GreenPixelChannel].offset]-
pixel[image->channel_map[BluePixelChannel].offset];
if ((AbsolutePixelValue(red_green) < MagickEpsilon) &&
(AbsolutePixelValue(green_blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoGray(
const PixelInfo *magick_restrict pixel)
{
if ((AbsolutePixelValue(pixel->red-pixel->green) < MagickEpsilon) &&
(AbsolutePixelValue(pixel->green-pixel->blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline MagickBooleanType IsPixelInfoMonochrome(
const PixelInfo *magick_restrict pixel_info)
{
MagickRealType
green_blue,
red_green;
if ((AbsolutePixelValue(pixel_info->red) >= MagickEpsilon) ||
(AbsolutePixelValue(pixel_info->red-QuantumRange) >= MagickEpsilon))
return(MagickFalse);
red_green=pixel_info->red-pixel_info->green;
green_blue=pixel_info->green-pixel_info->blue;
if ((AbsolutePixelValue(red_green) < MagickEpsilon) &&
(AbsolutePixelValue(green_blue) < MagickEpsilon))
return(MagickTrue);
return(MagickFalse);
}
static inline void SetPixela(const Image *magick_restrict image,
const Quantum a,Quantum *magick_restrict pixel)
{
if (image->channel_map[aPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[aPixelChannel].offset]=a;
}
static inline void SetPixelAlpha(const Image *magick_restrict image,
const Quantum alpha,Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=alpha;
}
static inline void SetPixelAlphaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[AlphaPixelChannel].traits=traits;
}
static inline void SetPixelb(const Image *magick_restrict image,
const Quantum b,Quantum *magick_restrict pixel)
{
if (image->channel_map[bPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[bPixelChannel].offset]=b;
}
static inline void SetPixelBackgoundColor(const Image *magick_restrict image,
Quantum *magick_restrict pixel)
{
ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
pixel[i]=(Quantum) 0;
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(image->background_color.red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(image->background_color.green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(image->background_color.blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(image->background_color.black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
image->background_color.alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(image->background_color.alpha);
}
static inline void SetPixelBlack(const Image *magick_restrict image,
const Quantum black,Quantum *magick_restrict pixel)
{
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=black;
}
static inline void SetPixelBlackTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BlackPixelChannel].traits=traits;
}
static inline void SetPixelBlue(const Image *magick_restrict image,
const Quantum blue,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[BluePixelChannel].offset]=blue;
}
static inline void SetPixelBlueTraits(Image *image,const PixelTrait traits)
{
image->channel_map[BluePixelChannel].traits=traits;
}
static inline void SetPixelCb(const Image *magick_restrict image,
const Quantum cb,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[CbPixelChannel].offset]=cb;
}
static inline void SetPixelCbTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CbPixelChannel].traits=traits;
}
static inline void SetPixelChannel(const Image *magick_restrict image,
const PixelChannel channel,const Quantum quantum,
Quantum *magick_restrict pixel)
{
if (image->channel_map[channel].traits != UndefinedPixelTrait)
pixel[image->channel_map[channel].offset]=quantum;
}
static inline void SetPixelChannelAttributes(
const Image *magick_restrict image,const PixelChannel channel,
const PixelTrait traits,const ssize_t offset)
{
assert((ssize_t) channel < MaxPixelChannels);
assert(offset < MaxPixelChannels);
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
image->channel_map[channel].traits=traits;
}
static inline void SetPixelChannelChannel(const Image *magick_restrict image,
const PixelChannel channel,const ssize_t offset)
{
image->channel_map[offset].channel=channel;
image->channel_map[channel].offset=offset;
}
static inline void SetPixelChannels(Image *image,const size_t number_channels)
{
image->number_channels=number_channels;
}
static inline void SetPixelChannelTraits(Image *image,
const PixelChannel channel,const PixelTrait traits)
{
image->channel_map[channel].traits=traits;
}
static inline void SetPixelCompositeMask(const Image *magick_restrict image,
const Quantum mask,Quantum *magick_restrict pixel)
{
if (image->channel_map[CompositeMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[CompositeMaskPixelChannel].offset]=mask;
}
static inline void SetPixelCr(const Image *magick_restrict image,
const Quantum cr,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[CrPixelChannel].offset]=cr;
}
static inline void SetPixelCrTraits(Image *image,const PixelTrait traits)
{
image->channel_map[CrPixelChannel].traits=traits;
}
static inline void SetPixelCyan(const Image *magick_restrict image,
const Quantum cyan,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[CyanPixelChannel].offset]=cyan;
}
static inline void SetPixelGray(const Image *magick_restrict image,
const Quantum gray,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[GrayPixelChannel].offset]=gray;
}
static inline void SetPixelGrayTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GrayPixelChannel].traits=traits;
}
static inline void SetPixelGreen(const Image *magick_restrict image,
const Quantum green,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[GreenPixelChannel].offset]=green;
}
static inline void SetPixelGreenTraits(Image *image,const PixelTrait traits)
{
image->channel_map[GreenPixelChannel].traits=traits;
}
static inline void SetPixelIndex(const Image *magick_restrict image,
const Quantum index,Quantum *magick_restrict pixel)
{
if (image->channel_map[IndexPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[IndexPixelChannel].offset]=index;
}
static inline void SetPixelIndexTraits(Image *image,const PixelTrait traits)
{
image->channel_map[IndexPixelChannel].traits=traits;
}
static inline void SetPixelViaPixelInfo(const Image *magick_restrict image,
const PixelInfo *magick_restrict pixel_info,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=
ClampToQuantum(pixel_info->red);
pixel[image->channel_map[GreenPixelChannel].offset]=
ClampToQuantum(pixel_info->green);
pixel[image->channel_map[BluePixelChannel].offset]=
ClampToQuantum(pixel_info->blue);
if (image->channel_map[BlackPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[BlackPixelChannel].offset]=
ClampToQuantum(pixel_info->black);
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=
pixel_info->alpha_trait == UndefinedPixelTrait ? OpaqueAlpha :
ClampToQuantum(pixel_info->alpha);
}
static inline void SetPixelL(const Image *magick_restrict image,const Quantum L,
Quantum *magick_restrict pixel)
{
if (image->channel_map[LPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[LPixelChannel].offset]=L;
}
static inline void SetPixelMagenta(const Image *magick_restrict image,
const Quantum magenta,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[MagentaPixelChannel].offset]=magenta;
}
static inline void SetPixelMagentaTraits(Image *image,const PixelTrait traits)
{
image->channel_map[MagentaPixelChannel].traits=traits;
}
static inline void SetPixelReadMask(const Image *magick_restrict image,
const Quantum mask,Quantum *magick_restrict pixel)
{
if (image->channel_map[ReadMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[ReadMaskPixelChannel].offset]=mask;
}
static inline void SetPixelWriteMask(const Image *magick_restrict image,
const Quantum mask,Quantum *magick_restrict pixel)
{
if (image->channel_map[WriteMaskPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[WriteMaskPixelChannel].offset]=mask;
}
static inline void SetPixelMetacontentExtent(Image *image,const size_t extent)
{
image->metacontent_extent=extent;
}
static inline void SetPixelOpacity(const Image *magick_restrict image,
const Quantum alpha,Quantum *magick_restrict pixel)
{
if (image->channel_map[AlphaPixelChannel].traits != UndefinedPixelTrait)
pixel[image->channel_map[AlphaPixelChannel].offset]=QuantumRange-alpha;
}
static inline void SetPixelRed(const Image *magick_restrict image,
const Quantum red,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[RedPixelChannel].offset]=red;
}
static inline void SetPixelRedTraits(Image *image,const PixelTrait traits)
{
image->channel_map[RedPixelChannel].traits=traits;
}
static inline void SetPixelYellow(const Image *magick_restrict image,
const Quantum yellow,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[YellowPixelChannel].offset]=yellow;
}
static inline void SetPixelYellowTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YellowPixelChannel].traits=traits;
}
static inline void SetPixelY(const Image *magick_restrict image,
const Quantum y,Quantum *magick_restrict pixel)
{
pixel[image->channel_map[YPixelChannel].offset]=y;
}
static inline void SetPixelYTraits(Image *image,const PixelTrait traits)
{
image->channel_map[YPixelChannel].traits=traits;
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
#endif
|
927_1
|
crossvul
|
h
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
xml
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE locale [
<!ELEMENT locale (exception)>
<!ELEMENT exception (ANY)+>
<!ELEMENT warning (message)+>
<!ELEMENT error (message)+>
<!ELEMENT fatalerror (message)+>
<!ELEMENT message (#PCDATA)>
<!ATTLIST locale name CDATA #REQUIRED>
<!ATTLIST message name CDATA #REQUIRED>
]>
<locale name="english">
<exception>
<blob>
<error>
<message name="UnableToOpenBlob">
unable to open image
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="UnableToReadBlob">
unable to read blob
</message>
<message name="UnableToWriteBlob">
unable to write blob
</message>
<message name="UnrecognizedImageFormat">
unrecognized image format
</message>
<message name="ZeroLengthBlobNotPermitted">
zero-length blob not permitted
</message>
</error>
</blob>
<cache>
<error>
<message name="CacheResourcesExhausted">
cache resources exhausted
</message>
<message name="IncompatibleAPI">
incompatible API
</message>
<message name="NoPixelsDefinedInCache">
no pixels defined in cache
</message>
<message name="PixelCacheIsNotOpen">
pixel cache is not open
</message>
<message name="PixelsAreNotAuthentic">
pixels are not authentic
</message>
<message name="UnableToCloneCache">
unable to clone cache
</message>
<message name="UnableToExtendCache">
unable to extend cache
</message>
<message name="UnableToGetCacheNexus">
unable to get cache nexus
</message>
<message name="UnableToOpenPixelCache">
unable to open pixel cache
</message>
<message name="UnableToPersistPixelCache">
unable to persist pixel cache
</message>
<message name="UnableToReadPixelCache">
unable to read pixel cache
</message>
<message name="UnableToWritePixelCache">
unable to write pixel cache
</message>
</error>
<fatalerror>
<message name="UnableToAcquireCacheView">
unable to acquire cache view
</message>
<message name="UnableToExtendPixelCache">
unable to extent pixel cache
</message>
</fatalerror>
</cache>
<coder>
<error>
<message name="ColormapTypeNotSupported">
colormap type not supported
</message>
<message name="ColorspaceModelIsNotSupported">
colorspace model is not supported
</message>
<message name="CompressNotSupported">
compression not supported
</message>
<message name="DataEncodingSchemeIsNotSupported">
data encoding scheme is not supported
</message>
<message name="DataStorageTypeIsNotSupported">
data storage type is not supported
</message>
<message name="DeltaPNGNotSupported">
delta-PNG is not supported
</message>
<message name="EncryptedWPGImageFileNotSupported">
encrypted WPG image file not supported
</message>
<message name="FractalCompressNotSupported">
fractal compression not supported
</message>
<message name="ImageColumnOrRowSizeIsNotSupported">
image column or row size is not supported
</message>
<message name="ImageDoesNotHaveAClipMask">
image does not have a clip mask
</message>
<message name="ImageDoesNotHaveAAlphaChannel">
image does not have a alpha channel
</message>
<message name="ImageDoesNotHaveAThumbnail">
image does not have a EXIF thumbnail
</message>
<message name="ImageIsNotTiled">
image is not tiled
</message>
<message name="IrregularChannelGeometryNotSupported">
irregular channel geometry not supported
</message>
<message name="JNGCompressNotSupported">
JNG compression not supported
</message>
<message name="JPEGCompressNotSupported">
JPEG compression not supported
</message>
<message name="JPEGEmbeddingFailed">
JPEG embedding failed
</message>
<message name="LocationTypeIsNotSupported">
location type is not supported
</message>
<message name="MapStorageTypeIsNotSupported">
map storage type is not supported
</message>
<message name="MultidimensionalMatricesAreNotSupported">
multi-dimensional matrices are not supported
</message>
<message name="MultipleRecordListNotSupported">
multiple record list not supported
</message>
<message name="NoBitmapOnClipboard">
no bitmap on clipboard
</message>
<message name="NoAPP1DataIsAvailable">
no APP1 data is available
</message>
<message name="No8BIMDataIsAvailable">
no 8BIM data is available
</message>
<message name="NoColorProfileIsAvailable">
no color profile is available
</message>
<message name="NoDataReturned">
no data returned
</message>
<message name="NoImageVectorGraphics">
no image vector graphics; unable to generate SVG
</message>
<message name="NoIPTCProfileAvailable">
no IPTC profile available
</message>
<message name="NumberOfImagesIsNotSupported">
number of images is not supported
</message>
<message name="OnlyContinuousTonePictureSupported">
only continuous tone picture supported
</message>
<message name="OnlyLevelZerofilesSupported">
only level zero files Supported
</message>
<message name="PNGCompressNotSupported">
PNG compression not supported
</message>
<message name="RLECompressNotSupported">
RLE compression not supported
</message>
<message name="UnableToCopyProfile">
unable to copy profile
</message>
<message name="UnableToCreateBitmap">
unable to create bitmap
</message>
<message name="UnableToCreateADC">
unable to create a DC
</message>
<message name="UnableToDecompressImage">
unable to decompress image
</message>
<message name="UnableToWriteMPEGParameters">
unable to write MPEG parameters
</message>
<message name="UnableToZipCompressImage">
unable to zip-compress image
</message>
<message name="ZIPCompressNotSupported">
ZIP compression not supported
</message>
</error>
<warning>
<message name="LosslessToLossyJPEGConversion">
lossless to lossy JPEG conversion
</message>
</warning>
</coder>
<configure>
<error>
<message name="IncludeElementNestedTooDeeply">
include element nested too deeply
</message>
</error>
<warning>
<message name="UnableToOpenConfigureFile">
unable to access configure file
</message>
<message name="UnableToOpenModuleFile">
unable to open module file
</message>
</warning>
</configure>
<corrupt>

</corrupt>
<delegate>
<error>
<message name="DelegateFailed">
delegate failed
</message>
<message name="FailedToComputeOutputSize">
failed to compute output size
</message>
<message name="FailedToRenderFile">
failed to render file
</message>
<message name="FailedToScanFile">
failed to scan file
</message>
<message name="NoTagFound">
no tag found
</message>
<message name="PCLDelegateFailed">
PCL delegate failed
</message>
<message name="PostscriptDelegateFailed">
Postscript delegate failed
</message>
<message name="UnableToCreateImage">
unable to create image
</message>
<message name="UnableToDecodeImageFile">
unable to decode image file
</message>
<message name="UnableToEncodeImageFile">
unable to encode image file
</message>
<message name="UnableToInitializeFPXLibrary">
unable to initialize FPX library
</message>
<message name="UnableToInitializeWMFLibrary">
unable to initialize WMF library
</message>
<message name="UnableToManageJP2Stream">
unable to manage JP2 stream
</message>
<message name="UnableToReadAspectRatio">
unable to read aspect ratio
</message>
<message name="UnableToReadSummaryInfo">
unable to read summary info
</message>
<message name="UnableToSetAffineMatrix">
unable to set affine matrix
</message>
<message name="UnableToSetAspectRatio">
unable to set aspect ratio
</message>
<message name="UnableToSetColorTwist">
unable to set color twist
</message>
<message name="UnableToSetContrast">
unable to set contrast
</message>
<message name="UnableToSetFilteringValue">
unable to set filtering value
</message>
<message name="UnableToSetImageTitle">
unable to set image title
</message>
<message name="UnableToSetJPEGLevel">
unable to set JPEG level
</message>
<message name="UnableToSetRegionOfInterest">
unable to set region of interest
</message>
<message name="UnableToSetSummaryInfo">
unable to set summary info
</message>
<message name="UnableToWriteSVGFormat">
unable to write SVG format
</message>
<message name="XPSDelegateFailed">
XPS delegate failed
</message>
</error>
</delegate>
<draw>
<error>
<message name="AlreadyPushingPatternDefinition">
already pushing pattern definition
</message>
<message name="NonconformingDrawingPrimitiveDefinition">
non-conforming drawing primitive definition
</message>
<message name="NotARelativeURL">
not a relative URL
</message>
<message name="NotCurrentlyPushingPatternDefinition">
not currently pushing pattern definition
</message>
<message name="SegmentStackOverflow">
segment stack overflow
</message>
<message name="TooManyBezierCoordinates">
too many bezier coordinates
</message>
<message name="UnableToPrint">
unable to print
</message>
<message name="UnbalancedGraphicContextPushPop">
unbalanced graphic context push-pop
</message>
<message name="URLNotFound">
URL not found
</message>
</error>
</draw>
<file>
<open>
<error>
<message name="AnErrorHasOccurredReadingFromFile">
an error has occurred reading from file
</message>
<message name="UnableToCreateTemporaryFile">
unable to create temporary file
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="UnableToWriteFile">
unable to write file
</message>
</error>
</open>
</file>

<filter>
<error>
<message name="FilterFailed">
filter failed
</message>
</error>
</filter>
<missing>
<delegate>
<error>
<message name="DelegateLibrarySupportNotBuiltIn">
delegate library support not built-in
</message>
<message name="NoDecodeDelegateForThisImageFormat">
no decode delegate for this image format
</message>
<message name="NoEncodeDelegateForThisImageFormat">
no encode delegate for this image format
</message>
</error>
<warning>
<message name="DelegateLibrarySupportNotBuiltIn">
delegate library support not built-in
</message>
<message name="FreeTypeLibraryIsNotAvailable">
FreeType library is not available
</message>
<message name="LCMSLibraryIsNotAvailable">
LCMS color profile library is not available
</message>
<message name="NoEncodeDelegateForThisImageFormat">
no encode delegate for this image format
</message>
</warning>
</delegate>
</missing>
<module>
<error>
<message name="ImageCoderSignatureMismatch">
image coder signature mismatch
</message>
<message name="ImageFilterSignatureMismatch">
image filter signature mismatch
</message>
<message name="UnableToLoadModule">
unable to load module
</message>
<message name="UnableToRegisterImageFormat">
unable to register image format
</message>
</error>
<fatalerror>
<message name="UnableToInitializeModuleLoader">
unable to initialize module loader
</message>
</fatalerror>
<warning>
<message name="UnableToCloseModule">
unable to close module
</message>
</warning>
</module>
<option>
<error>
<message name="ClutImageRequired">
color lookup table image required
</message>
<message name="CompositeImageRequired">
composite image required
</message>
<message name="DivideByZero">
divide by zero
</message>
<message name="FrameIsLessThanImageSize">
frame is less than image size
</message>
<message name="GeometryDimensionsAreZero">
geometry dimensions are zero
</message>
<message name="GeometryDoesNotContainImage">
geometry does not contain image
</message>
<message name="ImagesAreNotTheSameSize">
images are not the same size
</message>
<message name="ImageSizeMustExceedBevelWidth">
size must exceed bevel width
</message>
<message name="ImageWidthsOrHeightsDiffer">
image widths or heights differ
</message>
<message name="InvalidArgument">
invalid argument for option
</message>
<message name="InvalidGeometry">
invalid geometry
</message>
<message name="KernelWidthMustBeAnOddNumber">
kernel width must be an odd number
</message>
<message name="MapImageRequired">
map image required
</message>
<message name="MissingArgument">
option requires an argument
</message>
<message name="MissingAnImageFilename">
missing an image filename
</message>
<message name="MissingExpression">
missing expression
</message>
<message name="MissingNullSeparator">
missing Null Image List Separator
</message>
<message name="MustSpecifyAnImageName">
must specify an image name
</message>
<message name="MustSpecifyImageSize">
must specify image size
</message>
<message name="NoBlobDefined">
no Binary Large OBjects defined
</message>
<message name="NoClipPathDefined">
no clip path defined
</message>
<message name="NoImagesDefined">
no images defined
</message>
<message name="NoImageVectorGraphics">
no image vector graphics
</message>
<message name="NoSuchImage">
no such image
</message>
<message name="NoSuchOption">
no such option
</message>
<message name="NonZeroWidthAndHeightRequired">
non-zero width and height required
</message>
<message name="NotEnoughParameters">
not enough parameters
</message>
<message name="ParenthesisNestedTooDeeply">
parenthesis nested too deeply
</message>
<message name="ReferenceImageRequired">
reference image required
</message>
<message name="ReferenceIsNotMyType">
reference is not my type
</message>
<message name="SteganoImageRequired">
stegano image required
</message>
<message name="StereoImageRequired">
stereo image required
</message>
<message name="SubimageSpecificationReturnsNoImages">
subimage specification returns no images
</message>
<message name="UnableToAccessPath">
unable to access file path
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="UnableToParseExpression">
unable to parse expression
</message>
<message name="UnableToParseKernel">
unable to parse kernel string
</message>
<message name="UnbalancedBraces">
unbalanced braces
</message>
<message name="UnbalancedParenthesis">
unbalanced parenthesis
</message>
<message name="UnrecognizedAttribute">
unrecognized attribute
</message>
<message name="UnrecognizedChannelType">
unrecognized channel type
</message>
<message name="UnrecognizedColor">
unrecognized color
</message>
<message name="UnrecognizedColorspace">
unrecognized image colorspace
</message>
<message name="UnrecognizedComposeOperator">
unrecognized compose operator
</message>
<message name="UnrecognizedCompressType">
unrecognized compress type
</message>
<message name="UnrecognizedDirectionType">
unrecognized direction type
</message>
<message name="UnrecognizedDisposeMethod">
unrecognized dispose method
</message>
<message name="UnrecognizedDistortMethod">
unrecognized distortion method
</message>
<message name="UnrecognizedDitherMethod">
unrecognized dither method
</message>
<message name="UnrecognizedEndianType">
unrecognized endian type
</message>
<message name="UnrecognizedElement">
unrecognized element
</message>
<message name="UnrecognizedEvaluateOperator">
unrecognized evaluate operator
</message>
<message name="UnrecognizedEventType">
unrecognized event type
</message>
<message name="UnrecognizedFunction">
unrecognized function
</message>
<message name="UnrecognizedGravityType">
unrecognized gravity type
</message>
<message name="UnrecognizedImageCompression">
unrecognized image compression
</message>
<message name="UnrecognizedImageFilter">
unrecognized image filter
</message>
<message name="UnrecognizedImageFormat">
unrecognized image format
</message>
<message name="UnrecognizedImageMode">
unrecognized image mode
</message>
<message name="UnrecognizedImageOrientation">
unrecognized image orientation
</message>
<message name="UnrecognizedImageType">
unrecognized image type
</message>
<message name="UnrecognizedIntensityMethod">
unrecognized intensity method
</message>
<message name="UnrecognizedIntentType">
unrecognized intent type
</message>
<message name="UnrecognizedInterlaceType">
unrecognized interlace type
</message>
<message name="UnrecognizedInterpolateMethod">
unrecognized interpolate method
</message>
<message name="UnrecognizedKernelType">
unrecognized kernel type
</message>
<message name="UnrecognizedListType">
unrecognized list type
</message>
<message name="UnrecognizedMetricType">
unrecognized metric type
</message>
<message name="UnrecognizedModeType">
unrecognized mode type
</message>
<message name="UnrecognizedMorphologyMethod">
unrecognized morphology method
</message>
<message name="UnrecognizedOption">
unrecognized option
</message>
<message name="UnrecognizedPerlMagickMethod">
unrecognized PerlMagick method
</message>
<message name="UnrecognizedPixelMap">
unrecognized pixel map
</message>
<message name="UnrecognizedPreviewType">
unrecognized preview type
</message>
<message name="UnrecognizedResourceType">
unrecognized resource type
</message>
<message name="UnrecognizedSparseColorMethod">
unrecognized sparse color method
</message>
<message name="UnrecognizedStorageType">
unrecognized storage type
</message>
<message name="UnrecognizedStretchType">
unrecognized stretch type
</message>
<message name="UnrecognizedType">
unrecognized type
</message>
<message name="UnrecognizedUnitsType">
unrecognized units type
</message>
<message name="UnrecognizedValidateType">
unrecognized validate type
</message>
<message name="UnrecognizedVirtualPixelMethod">
unrecognized virtual pixel method
</message>
<message name="XmlInvalidAttribute">
XML invalid attribute
</message>
<message name="XmlInvalidContent">
XML invalid content
</message>
<message name="XmlMissingAttribute">
XML missing required attribute
</message>
<message name="XmlMissingContent">
XML missing required content
</message>
<message name="XmlMissingElement">
XML missing required element
</message>
</error>
<fatalerror>
<message name="FilenameTruncated">
image filename truncated
</message>
<message name="MissingAnImageFilename">
missing an image filename
</message>
<message name="UnrecognizedColormapType">
unrecognized colormap type
</message>
<message name="UnrecognizedColorspaceType">
unrecognized colorspace type
</message>
<message name="UnrecognizedEndianType">
unrecognized endian type
</message>
<message name="UnrecognizedImageCompressionType">
unrecognized compression type
</message>
<message name="UnrecognizedImageType">
unrecognized image type
</message>
<message name="UnrecognizedInterlaceType">
unrecognized interlace type
</message>
<message name="UnrecognizedListType">
unrecognized list type
</message>
<message name="UnrecognizedDisposeMethod">
unrecognized dispose method
</message>
<message name="UnrecognizedOption">
unrecognized option
</message>
<message name="UnrecognizedResourceType">
unrecognized resource type
</message>
<message name="UnrecognizedVirtualPixelMethod">
unrecognized virtual pixel method
</message>
</fatalerror>
<warning>
<message name="GeometryDoesNotContainImage">
geometry does not contain image
</message>
<message name="NoSuchElement">
no such element in list
</message>
<message name="UnknownImageProperty">
unknown image property
</message>
<message name="UnrecognizedColor">
unrecognized color
</message>
<message name="ZeroTimeAnimation">
animation only contains zero time delays
</message>
</warning>
</option>
<policy>
<error>
<message name="NotAuthorized">
not authorized
</message>
</error>
</policy>
<registry>
<error>
<message name="UnableToGetRegistryID">
unable to get registry ID
</message>
<message name="UnableToSetRegistry">
unable to set registry
</message>
</error>
</registry>
<resource>
<limit>
<error>
<message name="PixelCacheAllocationFailed">
pixel cache allocation failed
</message>
<message name="MemoryAllocationFailed">
memory allocation failed
</message>
<message name="TooManyObjects">
too many objects
</message>
<message name="UnableToAcquireString">
unable to acquire string
</message>
<message name="UnableToAllocateColormap">
unable to allocate colormap
</message>
<message name="UnableToConvertFont">
unable to convert font
</message>
<message name="UnableToCreateColormap">
unable to create colormap
</message>
<message name="UnableToDitherImage">
unable to dither image
</message>
<message name="UnableToClonePackageInfo">
unable to clone package info
</message>
<message name="UnableToGetPackageInfo">
unable to get package info
</message>
</error>
<fatalerror>
<message name="TimeLimitExceeded">
time limit exceeded
</message>
<message name="UnableToAllocateDashPattern">
unable to allocate dash pattern
</message>
<message name="UnableToAllocateDerivatives">
unable to allocate derivates
</message>
<message name="UnableToAllocateGammaMap">
unable to allocate gamma map
</message>
<message name="UnableToAllocateImage">
unable to allocate image
</message>
<message name="UnableToAllocateImagePixels">
unable to allocate image pixels
</message>
<message name="UnableToDestroySemaphore">
unable to destroy semaphore
</message>
<message name="UnableToInstantiateSemaphore">
unable to instantiate semaphore
</message>
<message name="UnableToAcquireString">
unable to allocate string
</message>
<message name="MemoryAllocationFailed">
Memory allocation failed
</message>
<message name="UnableToConcatenateString">
unable to concatenate string
</message>
<message name="UnableToConvertText">
unable to convert text
</message>
<message name="UnableToCreateColormap">
unable to create colormap
</message>
<message name="UnableToCloneImage">
unable to clone image
</message>
<message name="UnableToDisplayImage">
unable to display image
</message>
<message name="UnableToEscapeString">
unable to escape string
</message>
<message name="UnableToInterpretMSLImage">
unable to interpret MSL image
</message>
<message name="UnableToLockSemaphore">
unable to lock semaphore
</message>
<message name="UnableToUnlockSemaphore">
unable to unlock semaphore
</message>
</fatalerror>
<warning>
<message name="MemoryAllocationFailed">
memory allocation failed
</message>
</warning>
</limit>
</resource>
<type>
<error>
<message name="FontSubstitutionRequired">
font substitution required
</message>
<message name="UnableToGetTypeMetrics">
unable to get type metrics
</message>
<message name="UnableToInitializeFreetypeLibrary">
unable to initialize freetype library
</message>
<message name="UnableToReadFont">
unable to read font
</message>
<message name="UnrecognizedFontEncoding">
unrecognized font encoding
</message>
</error>
<warning>
<message name="UnableToReadFont">
unable to read font
</message>
</warning>
</type>
<stream>
<error>
<message name="ImageDoesNotContainTheStreamGeometry">
image does not contain the stream geometry
</message>
<message name="NoStreamHandlerIsDefined">
no stream handler is defined
</message>
<message name="PixelCacheIsNotOpen">
pixel cache is not open
</message>
</error>
</stream>
<wand>
<error>
<message name="InvalidColormapIndex">
invalid colormap index
</message>
<message name="ZeroRegionSize">
zero region size
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="QuantumDepthMismatch">
wand quantum depth does not match that of the core API
</message>
<message name="ContainsNoImages">
wand contains no images
</message>
<message name="ContainsNoIterators">
wand contains no iterators
</message>
</error>
</wand>
<xserver>
<error>
<message name="ColorIsNotKnownToServer">
color is not known to server
</message>
<message name="NoWindowWithSpecifiedIDExists">
no window with specified ID exists
</message>
<message name="StandardColormapIsNotInitialized">
standard Colormap is not initialized
</message>
<message name="UnableToConnectToRemoteDisplay">
unable to connect to remote display
</message>
<message name="UnableToCreateBitmap">
unable to create bitmap
</message>
<message name="UnableToCreateColormap">
unable to create colormap
</message>
<message name="UnableToCreatePixmap">
unable to create pixmap
</message>
<message name="UnableToCreateProperty">
unable to create property
</message>
<message name="UnableToCreateStandardColormap">
unable to create standard colormap
</message>
<message name="UnableToDisplayImageInfo">
unable to display image info
</message>
<message name="UnableToGetProperty">
unable to get property
</message>
<message name="UnableToGetStandardColormap">
unable to get Standard Colormap
</message>
<message name="UnableToGetVisual">
unable to get visual
</message>
<message name="UnableToGrabMouse">
unable to grab mouse
</message>
<message name="UnableToLoadFont">
unable to load font
</message>
<message name="UnableToMatchVisualToStandardColormap">
unable to match visual to Standard Colormap
</message>
<message name="UnableToOpenXServer">
unable to open X server
</message>
<message name="UnableToReadXWindowAttributes">
unable to read X window attributes
</message>
<message name="UnableToReadXWindowImage">
unable to read X window image
</message>
<message name="UnrecognizedColormapType">
unrecognized colormap type
</message>
<message name="UnrecognizedGravityType">
unrecognized gravity type
</message>
<message name="UnrecognizedVisualSpecifier">
unrecognized visual specifier
</message>
</error>
<fatalerror>
<message name="UnableToCreateCursor">
unable to create X cursor
</message>
<message name="UnableToCreateGraphicContext">
unable to create graphic context
</message>
<message name="UnableToCreateStandardColormap">
unable to create standard colormap
</message>
<message name="UnableToCreateTextProperty">
unable to create text property
</message>
<message name="UnableToCreateXWindow">
unable to create X window
</message>
<message name="UnableToCreateXImage">
unable to create X image
</message>
<message name="UnableToCreateXPixmap">
unable to create X pixmap
</message>
<message name="UnableToDisplayImage">
unable to display image
</message>
<message name="UnableToGetVisual">
unable to get visual
</message>
<message name="UnableToGetPixelInfo">
unable to get pixel info
</message>
<message name="UnableToLoadFont">
unable to load font
</message>
<message name="UnableToMakeXWindow">
unable to make X window
</message>
<message name="UnableToOpenXServer">
unable to open X server
</message>
<message name="UnableToViewFonts">
unable to view fonts
</message>
</fatalerror>
<warning>
<message name="UsingDefaultVisual">
using default visual
</message>
<message name="UnableToGetVisual">
unable to get visual
</message>
</warning>
</xserver>
</exception>
<monitor>
<AddNoise>
<message name="Image">
add noise to image
</message>
</AddNoise>
<Append>
<message name="Image">
append image sequence
</message>
</Append>
<assign>
<message name="Image">
assign image colors
</message>
</assign>
<Average>
<message name="Image">
average image sequence
</message>
</Average>
<Chop>
<message name="Image">
chop image
</message>
</Chop>
<Classify>
<message name="Image">
classify image colors
</message>
</Classify>
<ColorReplace>
<message name="Image">
replace color in image
</message>
</ColorReplace>
<Colorize>
<message name="Image">
colorize image
</message>
</Colorize>
<Combine>
<message name="Image">
combine image
</message>
</Combine>
<ContrastStretch>
<message name="Image">
contrast-stretch image
</message>
</ContrastStretch>
<Convolve>
<message name="Image">
convolve image
</message>
</Convolve>
<Crop>
<message name="Image">
crop image
</message>
</Crop>
<Decode>
<message name="Image">
decode image
</message>
</Decode>
<Despeckle>
<message name="Image">
despeckle image
</message>
</Despeckle>
<Distort>
<message name="Image">
distort image
</message>
</Distort>
<Dither>
<message name="Image">
dither image colors
</message>
</Dither>
<DullContrast>
<message name="Image">
dull image contrast
</message>
</DullContrast>
<Encode>
<message name="Image">
encode image
</message>
</Encode>
<Equalize>
<message name="Image">
equalize image
</message>
</Equalize>
<Flip>
<message name="Image">
flip image
</message>
</Flip>
<Flop>
<message name="Image">
flop image
</message>
</Flop>
<Frame>
<message name="Image">
add frame to image
</message>
</Frame>
<Fx>
<message name="Image">
fx image
</message>
</Fx>
<GammaCorrect>
<message name="Image">
gamma correct image
</message>
</GammaCorrect>
<Histogram>
<message name="Image">
compute image histogram
</message>
</Histogram>
<Implode>
<message name="Image">
implode image
</message>
</Implode>
<Level>
<message name="Image">
level image
</message>
</Level>
<Load>
<message name="Image">
load image
</message>
<message name="Images">
load images
</message>
</Load>
<Magnfiy>
<message name="Image">
magnfiy image
</message>
</Magnfiy>
<MedianFilter>
<message name="Image">
filter image with neighborhood ranking
</message>
</MedianFilter>
<Minify>
<message name="Image">
minify image
</message>
</Minify>
<Modulate>
<message name="Image">
modulate image
</message>
</Modulate>
<Mogrify>
<message name="Image">
mogrify image
</message>
</Mogrify>
<Montage>
<message name="Image">
montage image
</message>
</Montage>
<Morph>
<message name="Image">
morph image sequence
</message>
</Morph>
<Mosaic>
<message name="Image">
mosaic image
</message>
</Mosaic>
<Negate>
<message name="Image">
negate image
</message>
</Negate>
<OilPaint>
<message name="Image">
oil paint image
</message>
</OilPaint>
<Opaque>
<message name="Image">
set opaque color in image
</message>
</Opaque>
<Plasma>
<message name="Image">
plasma image
</message>
</Plasma>
<Preview>
<message name="Image">
preview image
</message>
</Preview>
<Raise>
<message name="Image">
raise image
</message>
</Raise>
<Recolor>
<message name="Image">
recolor color image
</message>
</Recolor>
<Reduce>
<message name="Image">
reduce image colors
</message>
</Reduce>
<ReduceNoise>
<message name="Image">
reduce the image noise
</message>
</ReduceNoise>
<Render>
<message name="Image">
render image
</message>
</Render>
<Resize>
<message name="Image">
resize image
</message>
</Resize>
<RGBTransform>
<message name="Image">
RGB transform image
</message>
</RGBTransform>
<Roll>
<message name="Image">
roll image
</message>
</Roll>
<Rotate>
<message name="Image">
rotate image
</message>
</Rotate>
<Sample>
<message name="Image">
sample image
</message>
</Sample>
<Save>
<message name="Image">
save image
</message>
<message name="Images">
save images
</message>
</Save>
<Scale>
<message name="Image">
scale image
</message>
</Scale>
<Segment>
<message name="Image">
segment image
</message>
</Segment>
<Separate>
<message name="Image">
extract a channel from image
</message>
</Separate>
<SepiaTone>
<message name="Image">
sepia-tone image
</message>
</SepiaTone>
<Shade>
<message name="Image">
shade image
</message>
</Shade>
<Sharpen>
<message name="Image">
sharpen image
</message>
</Sharpen>
<SharpenContrast>
<message name="Image">
sharpen image contrast
</message>
</SharpenContrast>
<SigmoidalContrast>
<message name="Image">
sigmoidal contrast image
</message>
</SigmoidalContrast>
<Solarize>
<message name="Image">
solarize image
</message>
</Solarize>
<Splice>
<message name="Image">
splice image
</message>
</Splice>
<Spread>
<message name="Image">
spread image
</message>
</Spread>
<Stegano>
<message name="Image">
stegano image
</message>
</Stegano>
<Stereo>
<message name="Image">
stereo image
</message>
</Stereo>
<Swirl>
<message name="Image">
swirl image
</message>
</Swirl>
<Texture>
<message name="Image">
texture image
</message>
</Texture>
<Threshold>
<message name="Image">
threshold image
</message>
</Threshold>
<Tile>
<message name="Image">
tile image
</message>
</Tile>
<Tint>
<message name="Image">
tint image
</message>
</Tint>
<TransformRGB>
<message name="Image">
transform RGB image
</message>
</TransformRGB>
<Transparent>
<message name="Image">
set transparent color in image
</message>
</Transparent>
<Wave>
<message name="Image">
wave image
</message>
</Wave>
<Write>
<message name="Image">
write image
</message>
</Write>
<XShear>
<message name="Image">
x shear image
</message>
</XShear>
<YShear>
<message name="Image">
y shear image
</message>
</YShear>
</monitor>
</locale>
|
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE locale [
<!ELEMENT locale (exception)>
<!ELEMENT exception (ANY)+>
<!ELEMENT warning (message)+>
<!ELEMENT error (message)+>
<!ELEMENT fatalerror (message)+>
<!ELEMENT message (#PCDATA)>
<!ATTLIST locale name CDATA #REQUIRED>
<!ATTLIST message name CDATA #REQUIRED>
]>
<locale name="english">
<exception>
<blob>
<error>
<message name="UnableToOpenBlob">
unable to open image
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="UnableToReadBlob">
unable to read blob
</message>
<message name="UnableToWriteBlob">
unable to write blob
</message>
<message name="UnrecognizedImageFormat">
unrecognized image format
</message>
<message name="ZeroLengthBlobNotPermitted">
zero-length blob not permitted
</message>
</error>
</blob>
<cache>
<error>
<message name="CacheResourcesExhausted">
cache resources exhausted
</message>
<message name="IncompatibleAPI">
incompatible API
</message>
<message name="NoPixelsDefinedInCache">
no pixels defined in cache
</message>
<message name="PixelCacheIsNotOpen">
pixel cache is not open
</message>
<message name="PixelsAreNotAuthentic">
pixels are not authentic
</message>
<message name="UnableToCloneCache">
unable to clone cache
</message>
<message name="UnableToExtendCache">
unable to extend cache
</message>
<message name="UnableToGetCacheNexus">
unable to get cache nexus
</message>
<message name="UnableToOpenPixelCache">
unable to open pixel cache
</message>
<message name="UnableToPersistPixelCache">
unable to persist pixel cache
</message>
<message name="UnableToReadPixelCache">
unable to read pixel cache
</message>
<message name="UnableToWritePixelCache">
unable to write pixel cache
</message>
</error>
<fatalerror>
<message name="UnableToAcquireCacheView">
unable to acquire cache view
</message>
<message name="UnableToExtendPixelCache">
unable to extent pixel cache
</message>
</fatalerror>
</cache>
<coder>
<error>
<message name="ColormapTypeNotSupported">
colormap type not supported
</message>
<message name="ColorspaceModelIsNotSupported">
colorspace model is not supported
</message>
<message name="CompressNotSupported">
compression not supported
</message>
<message name="DataEncodingSchemeIsNotSupported">
data encoding scheme is not supported
</message>
<message name="DataStorageTypeIsNotSupported">
data storage type is not supported
</message>
<message name="DeltaPNGNotSupported">
delta-PNG is not supported
</message>
<message name="EncryptedWPGImageFileNotSupported">
encrypted WPG image file not supported
</message>
<message name="FractalCompressNotSupported">
fractal compression not supported
</message>
<message name="ImageColumnOrRowSizeIsNotSupported">
image column or row size is not supported
</message>
<message name="ImageDoesNotHaveAClipMask">
image does not have a clip mask
</message>
<message name="ImageDoesNotHaveAAlphaChannel">
image does not have a alpha channel
</message>
<message name="ImageDoesNotHaveAThumbnail">
image does not have a EXIF thumbnail
</message>
<message name="ImageIsNotTiled">
image is not tiled
</message>
<message name="IrregularChannelGeometryNotSupported">
irregular channel geometry not supported
</message>
<message name="JNGCompressNotSupported">
JNG compression not supported
</message>
<message name="JPEGCompressNotSupported">
JPEG compression not supported
</message>
<message name="JPEGEmbeddingFailed">
JPEG embedding failed
</message>
<message name="LocationTypeIsNotSupported">
location type is not supported
</message>
<message name="MapStorageTypeIsNotSupported">
map storage type is not supported
</message>
<message name="MultidimensionalMatricesAreNotSupported">
multi-dimensional matrices are not supported
</message>
<message name="MultipleRecordListNotSupported">
multiple record list not supported
</message>
<message name="NoBitmapOnClipboard">
no bitmap on clipboard
</message>
<message name="NoAPP1DataIsAvailable">
no APP1 data is available
</message>
<message name="No8BIMDataIsAvailable">
no 8BIM data is available
</message>
<message name="NoColorProfileIsAvailable">
no color profile is available
</message>
<message name="NoDataReturned">
no data returned
</message>
<message name="NoImageVectorGraphics">
no image vector graphics; unable to generate SVG
</message>
<message name="NoIPTCProfileAvailable">
no IPTC profile available
</message>
<message name="NumberOfImagesIsNotSupported">
number of images is not supported
</message>
<message name="OnlyContinuousTonePictureSupported">
only continuous tone picture supported
</message>
<message name="OnlyLevelZerofilesSupported">
only level zero files Supported
</message>
<message name="PNGCompressNotSupported">
PNG compression not supported
</message>
<message name="RLECompressNotSupported">
RLE compression not supported
</message>
<message name="UnableToCopyProfile">
unable to copy profile
</message>
<message name="UnableToCreateBitmap">
unable to create bitmap
</message>
<message name="UnableToCreateADC">
unable to create a DC
</message>
<message name="UnableToDecompressImage">
unable to decompress image
</message>
<message name="UnableToWriteMPEGParameters">
unable to write MPEG parameters
</message>
<message name="UnableToZipCompressImage">
unable to zip-compress image
</message>
<message name="ZIPCompressNotSupported">
ZIP compression not supported
</message>
</error>
<warning>
<message name="ExifProfileSizeExceedsLimit">
exif profile size exceeds limit and will be truncated
</message>
<message name="LosslessToLossyJPEGConversion">
lossless to lossy JPEG conversion
</message>
</warning>
</coder>
<configure>
<error>
<message name="IncludeElementNestedTooDeeply">
include element nested too deeply
</message>
</error>
<warning>
<message name="UnableToOpenConfigureFile">
unable to access configure file
</message>
<message name="UnableToOpenModuleFile">
unable to open module file
</message>
</warning>
</configure>
<corrupt>

</corrupt>
<delegate>
<error>
<message name="DelegateFailed">
delegate failed
</message>
<message name="FailedToComputeOutputSize">
failed to compute output size
</message>
<message name="FailedToRenderFile">
failed to render file
</message>
<message name="FailedToScanFile">
failed to scan file
</message>
<message name="NoTagFound">
no tag found
</message>
<message name="PCLDelegateFailed">
PCL delegate failed
</message>
<message name="PostscriptDelegateFailed">
Postscript delegate failed
</message>
<message name="UnableToCreateImage">
unable to create image
</message>
<message name="UnableToDecodeImageFile">
unable to decode image file
</message>
<message name="UnableToEncodeImageFile">
unable to encode image file
</message>
<message name="UnableToInitializeFPXLibrary">
unable to initialize FPX library
</message>
<message name="UnableToInitializeWMFLibrary">
unable to initialize WMF library
</message>
<message name="UnableToManageJP2Stream">
unable to manage JP2 stream
</message>
<message name="UnableToReadAspectRatio">
unable to read aspect ratio
</message>
<message name="UnableToReadSummaryInfo">
unable to read summary info
</message>
<message name="UnableToSetAffineMatrix">
unable to set affine matrix
</message>
<message name="UnableToSetAspectRatio">
unable to set aspect ratio
</message>
<message name="UnableToSetColorTwist">
unable to set color twist
</message>
<message name="UnableToSetContrast">
unable to set contrast
</message>
<message name="UnableToSetFilteringValue">
unable to set filtering value
</message>
<message name="UnableToSetImageTitle">
unable to set image title
</message>
<message name="UnableToSetJPEGLevel">
unable to set JPEG level
</message>
<message name="UnableToSetRegionOfInterest">
unable to set region of interest
</message>
<message name="UnableToSetSummaryInfo">
unable to set summary info
</message>
<message name="UnableToWriteSVGFormat">
unable to write SVG format
</message>
<message name="XPSDelegateFailed">
XPS delegate failed
</message>
</error>
</delegate>
<draw>
<error>
<message name="AlreadyPushingPatternDefinition">
already pushing pattern definition
</message>
<message name="NonconformingDrawingPrimitiveDefinition">
non-conforming drawing primitive definition
</message>
<message name="NotARelativeURL">
not a relative URL
</message>
<message name="NotCurrentlyPushingPatternDefinition">
not currently pushing pattern definition
</message>
<message name="SegmentStackOverflow">
segment stack overflow
</message>
<message name="TooManyBezierCoordinates">
too many bezier coordinates
</message>
<message name="UnableToPrint">
unable to print
</message>
<message name="UnbalancedGraphicContextPushPop">
unbalanced graphic context push-pop
</message>
<message name="URLNotFound">
URL not found
</message>
</error>
</draw>
<file>
<open>
<error>
<message name="AnErrorHasOccurredReadingFromFile">
an error has occurred reading from file
</message>
<message name="UnableToCreateTemporaryFile">
unable to create temporary file
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="UnableToWriteFile">
unable to write file
</message>
</error>
</open>
</file>

<filter>
<error>
<message name="FilterFailed">
filter failed
</message>
</error>
</filter>
<missing>
<delegate>
<error>
<message name="DelegateLibrarySupportNotBuiltIn">
delegate library support not built-in
</message>
<message name="NoDecodeDelegateForThisImageFormat">
no decode delegate for this image format
</message>
<message name="NoEncodeDelegateForThisImageFormat">
no encode delegate for this image format
</message>
</error>
<warning>
<message name="DelegateLibrarySupportNotBuiltIn">
delegate library support not built-in
</message>
<message name="FreeTypeLibraryIsNotAvailable">
FreeType library is not available
</message>
<message name="LCMSLibraryIsNotAvailable">
LCMS color profile library is not available
</message>
<message name="NoEncodeDelegateForThisImageFormat">
no encode delegate for this image format
</message>
</warning>
</delegate>
</missing>
<module>
<error>
<message name="ImageCoderSignatureMismatch">
image coder signature mismatch
</message>
<message name="ImageFilterSignatureMismatch">
image filter signature mismatch
</message>
<message name="UnableToLoadModule">
unable to load module
</message>
<message name="UnableToRegisterImageFormat">
unable to register image format
</message>
</error>
<fatalerror>
<message name="UnableToInitializeModuleLoader">
unable to initialize module loader
</message>
</fatalerror>
<warning>
<message name="UnableToCloseModule">
unable to close module
</message>
</warning>
</module>
<option>
<error>
<message name="ClutImageRequired">
color lookup table image required
</message>
<message name="CompositeImageRequired">
composite image required
</message>
<message name="DivideByZero">
divide by zero
</message>
<message name="FrameIsLessThanImageSize">
frame is less than image size
</message>
<message name="GeometryDimensionsAreZero">
geometry dimensions are zero
</message>
<message name="GeometryDoesNotContainImage">
geometry does not contain image
</message>
<message name="ImagesAreNotTheSameSize">
images are not the same size
</message>
<message name="ImageSizeMustExceedBevelWidth">
size must exceed bevel width
</message>
<message name="ImageWidthsOrHeightsDiffer">
image widths or heights differ
</message>
<message name="InvalidArgument">
invalid argument for option
</message>
<message name="InvalidGeometry">
invalid geometry
</message>
<message name="KernelWidthMustBeAnOddNumber">
kernel width must be an odd number
</message>
<message name="MapImageRequired">
map image required
</message>
<message name="MissingArgument">
option requires an argument
</message>
<message name="MissingAnImageFilename">
missing an image filename
</message>
<message name="MissingExpression">
missing expression
</message>
<message name="MissingNullSeparator">
missing Null Image List Separator
</message>
<message name="MustSpecifyAnImageName">
must specify an image name
</message>
<message name="MustSpecifyImageSize">
must specify image size
</message>
<message name="NoBlobDefined">
no Binary Large OBjects defined
</message>
<message name="NoClipPathDefined">
no clip path defined
</message>
<message name="NoImagesDefined">
no images defined
</message>
<message name="NoImageVectorGraphics">
no image vector graphics
</message>
<message name="NoSuchImage">
no such image
</message>
<message name="NoSuchOption">
no such option
</message>
<message name="NonZeroWidthAndHeightRequired">
non-zero width and height required
</message>
<message name="NotEnoughParameters">
not enough parameters
</message>
<message name="ParenthesisNestedTooDeeply">
parenthesis nested too deeply
</message>
<message name="ReferenceImageRequired">
reference image required
</message>
<message name="ReferenceIsNotMyType">
reference is not my type
</message>
<message name="SteganoImageRequired">
stegano image required
</message>
<message name="StereoImageRequired">
stereo image required
</message>
<message name="SubimageSpecificationReturnsNoImages">
subimage specification returns no images
</message>
<message name="UnableToAccessPath">
unable to access file path
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="UnableToParseExpression">
unable to parse expression
</message>
<message name="UnableToParseKernel">
unable to parse kernel string
</message>
<message name="UnbalancedBraces">
unbalanced braces
</message>
<message name="UnbalancedParenthesis">
unbalanced parenthesis
</message>
<message name="UnrecognizedAttribute">
unrecognized attribute
</message>
<message name="UnrecognizedChannelType">
unrecognized channel type
</message>
<message name="UnrecognizedColor">
unrecognized color
</message>
<message name="UnrecognizedColorspace">
unrecognized image colorspace
</message>
<message name="UnrecognizedComposeOperator">
unrecognized compose operator
</message>
<message name="UnrecognizedCompressType">
unrecognized compress type
</message>
<message name="UnrecognizedDirectionType">
unrecognized direction type
</message>
<message name="UnrecognizedDisposeMethod">
unrecognized dispose method
</message>
<message name="UnrecognizedDistortMethod">
unrecognized distortion method
</message>
<message name="UnrecognizedDitherMethod">
unrecognized dither method
</message>
<message name="UnrecognizedEndianType">
unrecognized endian type
</message>
<message name="UnrecognizedElement">
unrecognized element
</message>
<message name="UnrecognizedEvaluateOperator">
unrecognized evaluate operator
</message>
<message name="UnrecognizedEventType">
unrecognized event type
</message>
<message name="UnrecognizedFunction">
unrecognized function
</message>
<message name="UnrecognizedGravityType">
unrecognized gravity type
</message>
<message name="UnrecognizedImageCompression">
unrecognized image compression
</message>
<message name="UnrecognizedImageFilter">
unrecognized image filter
</message>
<message name="UnrecognizedImageFormat">
unrecognized image format
</message>
<message name="UnrecognizedImageMode">
unrecognized image mode
</message>
<message name="UnrecognizedImageOrientation">
unrecognized image orientation
</message>
<message name="UnrecognizedImageType">
unrecognized image type
</message>
<message name="UnrecognizedIntensityMethod">
unrecognized intensity method
</message>
<message name="UnrecognizedIntentType">
unrecognized intent type
</message>
<message name="UnrecognizedInterlaceType">
unrecognized interlace type
</message>
<message name="UnrecognizedInterpolateMethod">
unrecognized interpolate method
</message>
<message name="UnrecognizedKernelType">
unrecognized kernel type
</message>
<message name="UnrecognizedListType">
unrecognized list type
</message>
<message name="UnrecognizedMetricType">
unrecognized metric type
</message>
<message name="UnrecognizedModeType">
unrecognized mode type
</message>
<message name="UnrecognizedMorphologyMethod">
unrecognized morphology method
</message>
<message name="UnrecognizedOption">
unrecognized option
</message>
<message name="UnrecognizedPerlMagickMethod">
unrecognized PerlMagick method
</message>
<message name="UnrecognizedPixelMap">
unrecognized pixel map
</message>
<message name="UnrecognizedPreviewType">
unrecognized preview type
</message>
<message name="UnrecognizedResourceType">
unrecognized resource type
</message>
<message name="UnrecognizedSparseColorMethod">
unrecognized sparse color method
</message>
<message name="UnrecognizedStorageType">
unrecognized storage type
</message>
<message name="UnrecognizedStretchType">
unrecognized stretch type
</message>
<message name="UnrecognizedType">
unrecognized type
</message>
<message name="UnrecognizedUnitsType">
unrecognized units type
</message>
<message name="UnrecognizedValidateType">
unrecognized validate type
</message>
<message name="UnrecognizedVirtualPixelMethod">
unrecognized virtual pixel method
</message>
<message name="XmlInvalidAttribute">
XML invalid attribute
</message>
<message name="XmlInvalidContent">
XML invalid content
</message>
<message name="XmlMissingAttribute">
XML missing required attribute
</message>
<message name="XmlMissingContent">
XML missing required content
</message>
<message name="XmlMissingElement">
XML missing required element
</message>
</error>
<fatalerror>
<message name="FilenameTruncated">
image filename truncated
</message>
<message name="MissingAnImageFilename">
missing an image filename
</message>
<message name="UnrecognizedColormapType">
unrecognized colormap type
</message>
<message name="UnrecognizedColorspaceType">
unrecognized colorspace type
</message>
<message name="UnrecognizedEndianType">
unrecognized endian type
</message>
<message name="UnrecognizedImageCompressionType">
unrecognized compression type
</message>
<message name="UnrecognizedImageType">
unrecognized image type
</message>
<message name="UnrecognizedInterlaceType">
unrecognized interlace type
</message>
<message name="UnrecognizedListType">
unrecognized list type
</message>
<message name="UnrecognizedDisposeMethod">
unrecognized dispose method
</message>
<message name="UnrecognizedOption">
unrecognized option
</message>
<message name="UnrecognizedResourceType">
unrecognized resource type
</message>
<message name="UnrecognizedVirtualPixelMethod">
unrecognized virtual pixel method
</message>
</fatalerror>
<warning>
<message name="GeometryDoesNotContainImage">
geometry does not contain image
</message>
<message name="NoSuchElement">
no such element in list
</message>
<message name="UnknownImageProperty">
unknown image property
</message>
<message name="UnrecognizedColor">
unrecognized color
</message>
<message name="ZeroTimeAnimation">
animation only contains zero time delays
</message>
</warning>
</option>
<policy>
<error>
<message name="NotAuthorized">
not authorized
</message>
</error>
</policy>
<registry>
<error>
<message name="UnableToGetRegistryID">
unable to get registry ID
</message>
<message name="UnableToSetRegistry">
unable to set registry
</message>
</error>
</registry>
<resource>
<limit>
<error>
<message name="PixelCacheAllocationFailed">
pixel cache allocation failed
</message>
<message name="MemoryAllocationFailed">
memory allocation failed
</message>
<message name="TooManyObjects">
too many objects
</message>
<message name="UnableToAcquireString">
unable to acquire string
</message>
<message name="UnableToAllocateColormap">
unable to allocate colormap
</message>
<message name="UnableToConvertFont">
unable to convert font
</message>
<message name="UnableToCreateColormap">
unable to create colormap
</message>
<message name="UnableToDitherImage">
unable to dither image
</message>
<message name="UnableToClonePackageInfo">
unable to clone package info
</message>
<message name="UnableToGetPackageInfo">
unable to get package info
</message>
</error>
<fatalerror>
<message name="TimeLimitExceeded">
time limit exceeded
</message>
<message name="UnableToAllocateDashPattern">
unable to allocate dash pattern
</message>
<message name="UnableToAllocateDerivatives">
unable to allocate derivates
</message>
<message name="UnableToAllocateGammaMap">
unable to allocate gamma map
</message>
<message name="UnableToAllocateImage">
unable to allocate image
</message>
<message name="UnableToAllocateImagePixels">
unable to allocate image pixels
</message>
<message name="UnableToDestroySemaphore">
unable to destroy semaphore
</message>
<message name="UnableToInstantiateSemaphore">
unable to instantiate semaphore
</message>
<message name="UnableToAcquireString">
unable to allocate string
</message>
<message name="MemoryAllocationFailed">
Memory allocation failed
</message>
<message name="UnableToConcatenateString">
unable to concatenate string
</message>
<message name="UnableToConvertText">
unable to convert text
</message>
<message name="UnableToCreateColormap">
unable to create colormap
</message>
<message name="UnableToCloneImage">
unable to clone image
</message>
<message name="UnableToDisplayImage">
unable to display image
</message>
<message name="UnableToEscapeString">
unable to escape string
</message>
<message name="UnableToInterpretMSLImage">
unable to interpret MSL image
</message>
<message name="UnableToLockSemaphore">
unable to lock semaphore
</message>
<message name="UnableToUnlockSemaphore">
unable to unlock semaphore
</message>
</fatalerror>
<warning>
<message name="MemoryAllocationFailed">
memory allocation failed
</message>
</warning>
</limit>
</resource>
<type>
<error>
<message name="FontSubstitutionRequired">
font substitution required
</message>
<message name="UnableToGetTypeMetrics">
unable to get type metrics
</message>
<message name="UnableToInitializeFreetypeLibrary">
unable to initialize freetype library
</message>
<message name="UnableToReadFont">
unable to read font
</message>
<message name="UnrecognizedFontEncoding">
unrecognized font encoding
</message>
</error>
<warning>
<message name="UnableToReadFont">
unable to read font
</message>
</warning>
</type>
<stream>
<error>
<message name="ImageDoesNotContainTheStreamGeometry">
image does not contain the stream geometry
</message>
<message name="NoStreamHandlerIsDefined">
no stream handler is defined
</message>
<message name="PixelCacheIsNotOpen">
pixel cache is not open
</message>
</error>
</stream>
<wand>
<error>
<message name="InvalidColormapIndex">
invalid colormap index
</message>
<message name="ZeroRegionSize">
zero region size
</message>
<message name="UnableToOpenFile">
unable to open file
</message>
<message name="QuantumDepthMismatch">
wand quantum depth does not match that of the core API
</message>
<message name="ContainsNoImages">
wand contains no images
</message>
<message name="ContainsNoIterators">
wand contains no iterators
</message>
</error>
</wand>
<xserver>
<error>
<message name="ColorIsNotKnownToServer">
color is not known to server
</message>
<message name="NoWindowWithSpecifiedIDExists">
no window with specified ID exists
</message>
<message name="StandardColormapIsNotInitialized">
standard Colormap is not initialized
</message>
<message name="UnableToConnectToRemoteDisplay">
unable to connect to remote display
</message>
<message name="UnableToCreateBitmap">
unable to create bitmap
</message>
<message name="UnableToCreateColormap">
unable to create colormap
</message>
<message name="UnableToCreatePixmap">
unable to create pixmap
</message>
<message name="UnableToCreateProperty">
unable to create property
</message>
<message name="UnableToCreateStandardColormap">
unable to create standard colormap
</message>
<message name="UnableToDisplayImageInfo">
unable to display image info
</message>
<message name="UnableToGetProperty">
unable to get property
</message>
<message name="UnableToGetStandardColormap">
unable to get Standard Colormap
</message>
<message name="UnableToGetVisual">
unable to get visual
</message>
<message name="UnableToGrabMouse">
unable to grab mouse
</message>
<message name="UnableToLoadFont">
unable to load font
</message>
<message name="UnableToMatchVisualToStandardColormap">
unable to match visual to Standard Colormap
</message>
<message name="UnableToOpenXServer">
unable to open X server
</message>
<message name="UnableToReadXWindowAttributes">
unable to read X window attributes
</message>
<message name="UnableToReadXWindowImage">
unable to read X window image
</message>
<message name="UnrecognizedColormapType">
unrecognized colormap type
</message>
<message name="UnrecognizedGravityType">
unrecognized gravity type
</message>
<message name="UnrecognizedVisualSpecifier">
unrecognized visual specifier
</message>
</error>
<fatalerror>
<message name="UnableToCreateCursor">
unable to create X cursor
</message>
<message name="UnableToCreateGraphicContext">
unable to create graphic context
</message>
<message name="UnableToCreateStandardColormap">
unable to create standard colormap
</message>
<message name="UnableToCreateTextProperty">
unable to create text property
</message>
<message name="UnableToCreateXWindow">
unable to create X window
</message>
<message name="UnableToCreateXImage">
unable to create X image
</message>
<message name="UnableToCreateXPixmap">
unable to create X pixmap
</message>
<message name="UnableToDisplayImage">
unable to display image
</message>
<message name="UnableToGetVisual">
unable to get visual
</message>
<message name="UnableToGetPixelInfo">
unable to get pixel info
</message>
<message name="UnableToLoadFont">
unable to load font
</message>
<message name="UnableToMakeXWindow">
unable to make X window
</message>
<message name="UnableToOpenXServer">
unable to open X server
</message>
<message name="UnableToViewFonts">
unable to view fonts
</message>
</fatalerror>
<warning>
<message name="UsingDefaultVisual">
using default visual
</message>
<message name="UnableToGetVisual">
unable to get visual
</message>
</warning>
</xserver>
</exception>
<monitor>
<AddNoise>
<message name="Image">
add noise to image
</message>
</AddNoise>
<Append>
<message name="Image">
append image sequence
</message>
</Append>
<assign>
<message name="Image">
assign image colors
</message>
</assign>
<Average>
<message name="Image">
average image sequence
</message>
</Average>
<Chop>
<message name="Image">
chop image
</message>
</Chop>
<Classify>
<message name="Image">
classify image colors
</message>
</Classify>
<ColorReplace>
<message name="Image">
replace color in image
</message>
</ColorReplace>
<Colorize>
<message name="Image">
colorize image
</message>
</Colorize>
<Combine>
<message name="Image">
combine image
</message>
</Combine>
<ContrastStretch>
<message name="Image">
contrast-stretch image
</message>
</ContrastStretch>
<Convolve>
<message name="Image">
convolve image
</message>
</Convolve>
<Crop>
<message name="Image">
crop image
</message>
</Crop>
<Decode>
<message name="Image">
decode image
</message>
</Decode>
<Despeckle>
<message name="Image">
despeckle image
</message>
</Despeckle>
<Distort>
<message name="Image">
distort image
</message>
</Distort>
<Dither>
<message name="Image">
dither image colors
</message>
</Dither>
<DullContrast>
<message name="Image">
dull image contrast
</message>
</DullContrast>
<Encode>
<message name="Image">
encode image
</message>
</Encode>
<Equalize>
<message name="Image">
equalize image
</message>
</Equalize>
<Flip>
<message name="Image">
flip image
</message>
</Flip>
<Flop>
<message name="Image">
flop image
</message>
</Flop>
<Frame>
<message name="Image">
add frame to image
</message>
</Frame>
<Fx>
<message name="Image">
fx image
</message>
</Fx>
<GammaCorrect>
<message name="Image">
gamma correct image
</message>
</GammaCorrect>
<Histogram>
<message name="Image">
compute image histogram
</message>
</Histogram>
<Implode>
<message name="Image">
implode image
</message>
</Implode>
<Level>
<message name="Image">
level image
</message>
</Level>
<Load>
<message name="Image">
load image
</message>
<message name="Images">
load images
</message>
</Load>
<Magnfiy>
<message name="Image">
magnfiy image
</message>
</Magnfiy>
<MedianFilter>
<message name="Image">
filter image with neighborhood ranking
</message>
</MedianFilter>
<Minify>
<message name="Image">
minify image
</message>
</Minify>
<Modulate>
<message name="Image">
modulate image
</message>
</Modulate>
<Mogrify>
<message name="Image">
mogrify image
</message>
</Mogrify>
<Montage>
<message name="Image">
montage image
</message>
</Montage>
<Morph>
<message name="Image">
morph image sequence
</message>
</Morph>
<Mosaic>
<message name="Image">
mosaic image
</message>
</Mosaic>
<Negate>
<message name="Image">
negate image
</message>
</Negate>
<OilPaint>
<message name="Image">
oil paint image
</message>
</OilPaint>
<Opaque>
<message name="Image">
set opaque color in image
</message>
</Opaque>
<Plasma>
<message name="Image">
plasma image
</message>
</Plasma>
<Preview>
<message name="Image">
preview image
</message>
</Preview>
<Raise>
<message name="Image">
raise image
</message>
</Raise>
<Recolor>
<message name="Image">
recolor color image
</message>
</Recolor>
<Reduce>
<message name="Image">
reduce image colors
</message>
</Reduce>
<ReduceNoise>
<message name="Image">
reduce the image noise
</message>
</ReduceNoise>
<Render>
<message name="Image">
render image
</message>
</Render>
<Resize>
<message name="Image">
resize image
</message>
</Resize>
<RGBTransform>
<message name="Image">
RGB transform image
</message>
</RGBTransform>
<Roll>
<message name="Image">
roll image
</message>
</Roll>
<Rotate>
<message name="Image">
rotate image
</message>
</Rotate>
<Sample>
<message name="Image">
sample image
</message>
</Sample>
<Save>
<message name="Image">
save image
</message>
<message name="Images">
save images
</message>
</Save>
<Scale>
<message name="Image">
scale image
</message>
</Scale>
<Segment>
<message name="Image">
segment image
</message>
</Segment>
<Separate>
<message name="Image">
extract a channel from image
</message>
</Separate>
<SepiaTone>
<message name="Image">
sepia-tone image
</message>
</SepiaTone>
<Shade>
<message name="Image">
shade image
</message>
</Shade>
<Sharpen>
<message name="Image">
sharpen image
</message>
</Sharpen>
<SharpenContrast>
<message name="Image">
sharpen image contrast
</message>
</SharpenContrast>
<SigmoidalContrast>
<message name="Image">
sigmoidal contrast image
</message>
</SigmoidalContrast>
<Solarize>
<message name="Image">
solarize image
</message>
</Solarize>
<Splice>
<message name="Image">
splice image
</message>
</Splice>
<Spread>
<message name="Image">
spread image
</message>
</Spread>
<Stegano>
<message name="Image">
stegano image
</message>
</Stegano>
<Stereo>
<message name="Image">
stereo image
</message>
</Stereo>
<Swirl>
<message name="Image">
swirl image
</message>
</Swirl>
<Texture>
<message name="Image">
texture image
</message>
</Texture>
<Threshold>
<message name="Image">
threshold image
</message>
</Threshold>
<Tile>
<message name="Image">
tile image
</message>
</Tile>
<Tint>
<message name="Image">
tint image
</message>
</Tint>
<TransformRGB>
<message name="Image">
transform RGB image
</message>
</TransformRGB>
<Transparent>
<message name="Image">
set transparent color in image
</message>
</Transparent>
<Wave>
<message name="Image">
wave image
</message>
</Wave>
<Write>
<message name="Image">
write image
</message>
</Write>
<XShear>
<message name="Image">
x shear image
</message>
</XShear>
<YShear>
<message name="Image">
y shear image
</message>
</YShear>
</monitor>
</locale>
|
4777_1
|
crossvul
|
xml
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
shell
|
#!/bin/sh
while : ; do
if [ -f sys/rebuild.sh ]; then
break
fi
cd ..
if [ "`pwd`" = / ]; then
echo "Cant find sys/rebuild.sh"
exit 1
fi
done
Rebuild() {
cd "$1" || exit 1
make clean
make -j8 || exit 1
cd -
}
Build() {
cd "$1" || exit 1
make -j8 || exit 1
cd -
}
RebuildIOSDebug() {
Rebuild libr/debug
# Rebuild libr/util
# Rebuild libr/core
Rebuild binr/radare2
make -C binr/radare2 ios-sign
if [ -n "${IOSIP}" ]; then
scp binr/radare2/radare2 root@"${IOSIP}:."
else
echo "Set IOSIP environment variable to scp the radare2 program"
fi
}
RebuildJava() {
Rebuild shlr/java
Rebuild libr/asm
Rebuild libr/bin
Rebuild libr/core
}
RebuildCapstone() {
Rebuild shlr/capstone
Rebuild libr/asm
Rebuild libr/anal
}
RebuildSdb() {
Rebuild shlr/sdb
Rebuild libr/util
}
RebuildFs() {
Rebuild shlr/grub
Rebuild libr/fs
}
RebuildBin() {
Rebuild libr/bin
Rebuild libr/core
}
RebuildGdb() {
Rebuild shlr/gdb
Rebuild libr/io
Rebuild libr/debug
}
case "$1" in
fs) RebuildFs; ;;
bin) RebuildBin ; ;;
gdb) RebuildGdb ; ;;
sdb) RebuildSdb ; ;;
spp) RebuildSpp ; ;;
bin) RebuildBin ; ;;
java) RebuildJava ; ;;
iosdbg) RebuildIOSDebug ; ;;
capstone|cs) RebuildCapstone ; ;;
*)
echo "Usage: sys/rebuild.sh [gdb|java|capstone|sdb|iosdbg|cs|sdb|bin]"
;;
esac
|
#!/bin/sh
while : ; do
if [ -f sys/rebuild.sh ]; then
break
fi
cd ..
if [ "`pwd`" = / ]; then
echo "Cant find sys/rebuild.sh"
exit 1
fi
done
Rebuild() {
cd "$1" || exit 1
make clean
make -j8 || exit 1
cd -
}
Build() {
cd "$1" || exit 1
make -j8 || exit 1
cd -
}
RebuildIOSDebug() {
Rebuild libr/debug
# Rebuild libr/util
# Rebuild libr/core
Rebuild binr/radare2
make -C binr/radare2 ios-sign
if [ -n "${IOSIP}" ]; then
scp binr/radare2/radare2 root@"${IOSIP}:."
else
echo "Set IOSIP environment variable to scp the radare2 program"
fi
}
RebuildJava() {
Rebuild shlr/java
Rebuild libr/asm
Rebuild libr/bin
Rebuild libr/core
}
RebuildCapstone() {
Rebuild shlr/capstone
Rebuild libr/asm
Rebuild libr/anal
}
RebuildSdb() {
Rebuild shlr/sdb
Rebuild libr/util
}
RebuildFs() {
Rebuild shlr/grub
Rebuild libr/fs
}
RebuildBin() {
Rebuild libr/bin
Rebuild libr/core
}
RebuildGdb() {
Rebuild shlr/gdb
Rebuild libr/io
Rebuild libr/debug
}
case "$1" in
grub|fs)RebuildFs; ;;
bin) RebuildBin ; ;;
gdb) RebuildGdb ; ;;
sdb) RebuildSdb ; ;;
spp) RebuildSpp ; ;;
bin) RebuildBin ; ;;
java) RebuildJava ; ;;
iosdbg) RebuildIOSDebug ; ;;
capstone|cs) RebuildCapstone ; ;;
*)
echo "Usage: sys/rebuild.sh [gdb|java|capstone|sdb|iosdbg|cs|sdb|bin]"
;;
esac
|
3407_4
|
crossvul
|
sh
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
scala
|
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
/** JSON parser.
*/
object JsonParser {
import java.io._
class ParseException(message: String, cause: Exception) extends Exception(message, cause)
/** Parsed tokens from low level pull parser.
*/
sealed abstract class Token
case object OpenObj extends Token
case object CloseObj extends Token
case class FieldStart(name: String) extends Token
case object End extends Token
case class StringVal(value: String) extends Token
case class IntVal(value: BigInt) extends Token
case class DoubleVal(value: Double) extends Token
case class BoolVal(value: Boolean) extends Token
case object NullVal extends Token
case object OpenArr extends Token
case object CloseArr extends Token
/** Return parsed JSON.
* @throws ParseException is thrown if parsing fails
*/
def parse(s: String): JValue = parse(new Buffer(new StringReader(s), false))
/** Return parsed JSON.
* @param closeAutomatically true (default) if the Reader is automatically closed on EOF
* @throws ParseException is thrown if parsing fails
*/
def parse(s: Reader, closeAutomatically: Boolean = true): JValue =
parse(new Buffer(s, closeAutomatically))
/** Return parsed JSON.
*/
def parseOpt(s: String): Option[JValue] =
try { parse(s).toOpt } catch { case e: Exception => None }
/** Return parsed JSON.
* @param closeAutomatically true (default) if the Reader is automatically closed on EOF
*/
def parseOpt(s: Reader, closeAutomatically: Boolean = true): Option[JValue] =
try { parse(s, closeAutomatically).toOpt } catch { case e: Exception => None }
/** Parse in pull parsing style.
* Use <code>p.nextToken</code> to parse tokens one by one from a string.
* @see net.liftweb.json.JsonParser.Token
*/
def parse[A](s: String, p: Parser => A): A = parse(new StringReader(s), p)
/** Parse in pull parsing style.
* Use <code>p.nextToken</code> to parse tokens one by one from a stream.
* The Reader must be closed when parsing is stopped.
* @see net.liftweb.json.JsonParser.Token
*/
def parse[A](s: Reader, p: Parser => A): A = p(new Parser(new Buffer(s, false)))
private def parse(buf: Buffer): JValue = {
try {
astParser(new Parser(buf))
} catch {
case e: ParseException => throw e
case e: Exception => throw new ParseException("parsing failed", e)
} finally { buf.release }
}
private[json] def unquote(string: String): String =
unquote(new JsonParser.Buffer(new java.io.StringReader(string), false))
private[json] def unquote(buf: JsonParser.Buffer): String = {
def unquote0(buf: JsonParser.Buffer, base: String): String = {
val s = new java.lang.StringBuilder(base)
var c = '\\'
while (c != '"') {
if (c == '\\') {
buf.next match {
case '"' => s.append('"')
case '\\' => s.append('\\')
case '/' => s.append('/')
case 'b' => s.append('\b')
case 'f' => s.append('\f')
case 'n' => s.append('\n')
case 'r' => s.append('\r')
case 't' => s.append('\t')
case 'u' =>
val chars = Array(buf.next, buf.next, buf.next, buf.next)
val codePoint = Integer.parseInt(new String(chars), 16)
s.appendCodePoint(codePoint)
case _ => s.append('\\')
}
} else s.append(c)
c = buf.next
}
s.toString
}
buf.eofIsFailure = true
buf.mark
var c = buf.next
while (c != '"') {
if (c == '\\') {
val s = unquote0(buf, buf.substring)
buf.eofIsFailure = false
return s
}
c = buf.next
}
buf.eofIsFailure = false
buf.substring
}
// FIXME fail fast to prevent infinite loop, see
// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
private val BrokenDouble = BigDecimal("2.2250738585072012e-308")
private[json] def parseDouble(s: String) = {
val d = BigDecimal(s)
if (d == BrokenDouble) sys.error("Error parsing 2.2250738585072012e-308")
else d.doubleValue
}
private val astParser = (p: Parser) => {
val vals = new ValStack(p)
var token: Token = null
var root: Option[JValue] = None
// This is a slightly faster way to correct order of fields and arrays than using 'map'.
def reverse(v: JValue): JValue = v match {
case JObject(l) => JObject(l.map(reverse).asInstanceOf[List[JField]].reverse)
case JArray(l) => JArray(l.map(reverse).reverse)
case JField(name, value) => JField(name, reverse(value))
case x => x
}
def closeBlock(v: JValue) {
vals.peekOption match {
case Some(f: JField) =>
val field = vals.pop(classOf[JField])
val newField = JField(field.name, v)
val obj = vals.peek(classOf[JObject])
vals.replace(JObject(newField :: obj.obj))
case Some(o: JObject) => v match {
case x: JField => vals.replace(JObject(x :: o.obj))
case _ => p.fail("expected field but got " + v)
}
case Some(a: JArray) => vals.replace(JArray(v :: a.arr))
case Some(x) => p.fail("expected field, array or object but got " + x)
case None => root = Some(reverse(v))
}
}
def newValue(v: JValue) {
if (!vals.isEmpty)
vals.peek(classOf[JValue]) match {
case f: JField =>
vals.pop(classOf[JField])
val newField = JField(f.name, v)
val obj = vals.peek(classOf[JObject])
vals.replace(JObject(newField :: obj.obj))
case a: JArray => vals.replace(JArray(v :: a.arr))
case _ => p.fail("expected field or array")
}
else {
vals.push(v)
root = Some(v)
}
}
do {
token = p.nextToken
token match {
case OpenObj => vals.push(JObject(Nil))
case FieldStart(name) => vals.push(JField(name, null))
case StringVal(x) => newValue(JString(x))
case IntVal(x) => newValue(JInt(x))
case DoubleVal(x) => newValue(JDouble(x))
case BoolVal(x) => newValue(JBool(x))
case NullVal => newValue(JNull)
case CloseObj => closeBlock(vals.pop(classOf[JValue]))
case OpenArr => vals.push(JArray(Nil))
case CloseArr => closeBlock(vals.pop(classOf[JArray]))
case End =>
}
} while (token != End)
root getOrElse JNothing
}
private val EOF = (-1).asInstanceOf[Char]
private class ValStack(parser: Parser) {
import java.util.LinkedList
private[this] val stack = new LinkedList[JValue]()
def pop[A <: JValue](expectedType: Class[A]) = convert(stack.poll, expectedType)
def push(v: JValue) = stack.addFirst(v)
def peek[A <: JValue](expectedType: Class[A]) = convert(stack.peek, expectedType)
def replace[A <: JValue](newTop: JValue) = stack.set(0, newTop)
private def convert[A <: JValue](x: JValue, expectedType: Class[A]): A = {
if (x == null) parser.fail("expected object or array")
try { x.asInstanceOf[A] } catch { case _: ClassCastException => parser.fail("unexpected " + x) }
}
def peekOption = if (stack isEmpty) None else Some(stack.peek)
def isEmpty = stack.isEmpty
}
class Parser(buf: Buffer) {
import java.util.LinkedList
private[this] val blocks = new LinkedList[BlockMode]()
private[this] var fieldNameMode = true
def fail(msg: String) = throw new ParseException(msg + "\nNear: " + buf.near, null)
/** Parse next Token from stream.
*/
def nextToken: Token = {
def isDelimiter(c: Char) = c == ' ' || c == '\n' || c == ',' || c == '\r' || c == '\t' || c == '}' || c == ']'
def parseString: String =
try {
unquote(buf)
} catch {
case p: ParseException => throw p
case _: Exception => fail("unexpected string end")
}
def parseValue(first: Char) = {
var wasInt = true
var doubleVal = false
val s = new StringBuilder
s.append(first)
while (wasInt) {
val c = buf.next
if (c == EOF) {
wasInt = false
} else if (c == '.' || c == 'e' || c == 'E') {
doubleVal = true
s.append(c)
} else if (!(Character.isDigit(c) || c == '.' || c == 'e' || c == 'E' || c == '-')) {
wasInt = false
buf.back
} else s.append(c)
}
val value = s.toString
if (doubleVal) DoubleVal(parseDouble(value))
else IntVal(BigInt(value))
}
while (true) {
buf.next match {
case c if EOF == c =>
buf.automaticClose
return End
case '{' =>
blocks.addFirst(OBJECT)
fieldNameMode = true
return OpenObj
case '}' =>
blocks.poll
return CloseObj
case '"' =>
if (fieldNameMode && blocks.peek == OBJECT) return FieldStart(parseString)
else {
fieldNameMode = true
return StringVal(parseString)
}
case 't' =>
fieldNameMode = true
if (buf.next == 'r' && buf.next == 'u' && buf.next == 'e') {
return BoolVal(true)
}
fail("expected boolean")
case 'f' =>
fieldNameMode = true
if (buf.next == 'a' && buf.next == 'l' && buf.next == 's' && buf.next == 'e') {
return BoolVal(false)
}
fail("expected boolean")
case 'n' =>
fieldNameMode = true
if (buf.next == 'u' && buf.next == 'l' && buf.next == 'l') {
return NullVal
}
fail("expected null")
case ':' =>
if (blocks.peek == ARRAY) fail("Colon in an invalid position")
fieldNameMode = false
case '[' =>
blocks.addFirst(ARRAY)
return OpenArr
case ']' =>
fieldNameMode = true
blocks.poll
return CloseArr
case c if Character.isDigit(c) || c == '-' =>
fieldNameMode = true
return parseValue(c)
case c if isDelimiter(c) =>
case c => fail("unknown token " + c)
}
}
buf.automaticClose
End
}
sealed abstract class BlockMode
case object ARRAY extends BlockMode
case object OBJECT extends BlockMode
}
/* Buffer used to parse JSON.
* Buffer is divided to one or more segments (preallocated in Segments pool).
*/
private[json] class Buffer(in: Reader, closeAutomatically: Boolean) {
var offset = 0
var curMark = -1
var curMarkSegment = -1
var eofIsFailure = false
private[this] var segments: List[Segment] = List(Segments.apply())
private[this] var segment: Array[Char] = segments.head.seg
private[this] var cur = 0 // Pointer which points current parsing location
private[this] var curSegmentIdx = 0 // Pointer which points current segment
def mark = { curMark = cur; curMarkSegment = curSegmentIdx }
def back = cur = cur-1
def next: Char = {
if (cur >= offset && read < 0) {
if (eofIsFailure) throw new ParseException("unexpected eof", null) else EOF
} else {
val c = segment(cur)
cur += 1
c
}
}
def substring = {
if (curSegmentIdx == curMarkSegment) new String(segment, curMark, cur-curMark-1)
else { // slower path for case when string is in two or more segments
var parts: List[(Int, Int, Array[Char])] = Nil
var i = curSegmentIdx
while (i >= curMarkSegment) {
val s = segments(i).seg
val start = if (i == curMarkSegment) curMark else 0
val end = if (i == curSegmentIdx) cur else s.length+1
parts = (start, end, s) :: parts
i = i-1
}
val len = parts.map(p => p._2 - p._1 - 1).foldLeft(0)(_ + _)
val chars = new Array[Char](len)
i = 0
var pos = 0
while (i < parts.size) {
val (start, end, b) = parts(i)
val partLen = end-start-1
System.arraycopy(b, start, chars, pos, partLen)
pos = pos + partLen
i = i+1
}
new String(chars)
}
}
def near = new String(segment, (cur-20) max 0, (cur + 1) min Segments.segmentSize)
def release = segments.foreach(Segments.release)
private[JsonParser] def automaticClose = if (closeAutomatically) in.close
private[this] def read = {
if (offset >= segment.length) {
val newSegment = Segments.apply()
offset = 0
segment = newSegment.seg
segments = segments ::: List(newSegment)
curSegmentIdx = segments.length - 1
}
val length = in.read(segment, offset, segment.length-offset)
if (length != -1) {
cur = offset
offset += length
length
} else -1
}
}
/* A pool of preallocated char arrays.
*/
private[json] object Segments {
import java.util.concurrent.ArrayBlockingQueue
import java.util.concurrent.atomic.AtomicInteger
private[json] var segmentSize = 1000
private[this] val maxNumOfSegments = 10000
private[this] var segmentCount = new AtomicInteger(0)
private[this] val segments = new ArrayBlockingQueue[Segment](maxNumOfSegments)
private[json] def clear = segments.clear
def apply(): Segment = {
val s = acquire
// Give back a disposable segment if pool is exhausted.
if (s != null) s else DisposableSegment(new Array(segmentSize))
}
private[this] def acquire: Segment = {
val curCount = segmentCount.get
val createNew =
if (segments.size == 0 && curCount < maxNumOfSegments)
segmentCount.compareAndSet(curCount, curCount + 1)
else false
if (createNew) RecycledSegment(new Array(segmentSize)) else segments.poll
}
def release(s: Segment) = s match {
case _: RecycledSegment => segments.offer(s)
case _ =>
}
}
sealed trait Segment {
val seg: Array[Char]
}
case class RecycledSegment(seg: Array[Char]) extends Segment
case class DisposableSegment(seg: Array[Char]) extends Segment
}
|
/*
* Copyright 2009-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
/** JSON parser.
*/
object JsonParser {
import java.io._
class ParseException(message: String, cause: Exception) extends Exception(message, cause)
/** Parsed tokens from low level pull parser.
*/
sealed abstract class Token
case object OpenObj extends Token
case object CloseObj extends Token
case class FieldStart(name: String) extends Token
case object End extends Token
case class StringVal(value: String) extends Token
case class IntVal(value: BigInt) extends Token
case class DoubleVal(value: Double) extends Token
case class BoolVal(value: Boolean) extends Token
case object NullVal extends Token
case object OpenArr extends Token
case object CloseArr extends Token
/** Return parsed JSON.
* @throws ParseException is thrown if parsing fails
*/
def parse(s: String): JValue = parse(new Buffer(new StringReader(s), false))
/** Return parsed JSON.
* @param closeAutomatically true (default) if the Reader is automatically closed on EOF
* @throws ParseException is thrown if parsing fails
*/
def parse(s: Reader, closeAutomatically: Boolean = true): JValue =
parse(new Buffer(s, closeAutomatically))
/** Return parsed JSON.
*/
def parseOpt(s: String): Option[JValue] =
try { parse(s).toOpt } catch { case e: Exception => None }
/** Return parsed JSON.
* @param closeAutomatically true (default) if the Reader is automatically closed on EOF
*/
def parseOpt(s: Reader, closeAutomatically: Boolean = true): Option[JValue] =
try { parse(s, closeAutomatically).toOpt } catch { case e: Exception => None }
/** Parse in pull parsing style.
* Use <code>p.nextToken</code> to parse tokens one by one from a string.
* @see net.liftweb.json.JsonParser.Token
*/
def parse[A](s: String, p: Parser => A): A = parse(new StringReader(s), p)
/** Parse in pull parsing style.
* Use <code>p.nextToken</code> to parse tokens one by one from a stream.
* The Reader must be closed when parsing is stopped.
* @see net.liftweb.json.JsonParser.Token
*/
def parse[A](s: Reader, p: Parser => A): A = p(new Parser(new Buffer(s, false)))
private def parse(buf: Buffer): JValue = {
try {
astParser(new Parser(buf))
} catch {
case e: ParseException => throw e
case e: Exception => throw new ParseException("parsing failed", e)
} finally { buf.release }
}
private[json] def unquote(string: String): String =
unquote(new JsonParser.Buffer(new java.io.StringReader(string), false))
private[json] def unquote(buf: JsonParser.Buffer): String = {
def unquote0(buf: JsonParser.Buffer, base: String): String = {
val s = new java.lang.StringBuilder(base)
var c = '\\'
while (c != '"') {
if (c == '\\') {
buf.next match {
case '"' => s.append('"')
case '\\' => s.append('\\')
case '/' => s.append('/')
case 'b' => s.append('\b')
case 'f' => s.append('\f')
case 'n' => s.append('\n')
case 'r' => s.append('\r')
case 't' => s.append('\t')
case 'u' =>
val chars = Array(buf.next, buf.next, buf.next, buf.next)
val codePoint = Integer.parseInt(new String(chars), 16)
s.appendCodePoint(codePoint)
case _ => s.append('\\')
}
} else s.append(c)
c = buf.next
}
s.toString
}
buf.eofIsFailure = true
buf.mark
var c = buf.next
while (c != '"') {
if (c == '\\') {
val s = unquote0(buf, buf.substring)
buf.eofIsFailure = false
return s
}
c = buf.next
}
buf.eofIsFailure = false
buf.substring
}
// FIXME fail fast to prevent infinite loop, see
// http://www.exploringbinary.com/java-hangs-when-converting-2-2250738585072012e-308/
private val BrokenDouble = BigDecimal("2.2250738585072012e-308")
private[json] def parseDouble(s: String) = {
val d = BigDecimal(s)
if (d == BrokenDouble) sys.error("Error parsing 2.2250738585072012e-308")
else d.doubleValue
}
private val astParser = (p: Parser) => {
val vals = new ValStack(p)
var token: Token = null
var root: Option[JValue] = None
// This is a slightly faster way to correct order of fields and arrays than using 'map'.
def reverse(v: JValue): JValue = v match {
case JObject(l) => JObject(l.map(reverse).asInstanceOf[List[JField]].reverse)
case JArray(l) => JArray(l.map(reverse).reverse)
case JField(name, value) => JField(name, reverse(value))
case x => x
}
def closeBlock(v: JValue) {
vals.peekOption match {
case Some(f: JField) =>
val field = vals.pop(classOf[JField])
val newField = JField(field.name, v)
val obj = vals.peek(classOf[JObject])
vals.replace(JObject(newField :: obj.obj))
case Some(o: JObject) => v match {
case x: JField => vals.replace(JObject(x :: o.obj))
case _ => p.fail("expected field but got " + v)
}
case Some(a: JArray) => vals.replace(JArray(v :: a.arr))
case Some(x) => p.fail("expected field, array or object but got " + x)
case None => root = Some(reverse(v))
}
}
def newValue(v: JValue) {
if (!vals.isEmpty)
vals.peek(classOf[JValue]) match {
case f: JField =>
vals.pop(classOf[JField])
val newField = JField(f.name, v)
val obj = vals.peek(classOf[JObject])
vals.replace(JObject(newField :: obj.obj))
case a: JArray => vals.replace(JArray(v :: a.arr))
case _ => p.fail("expected field or array")
}
else {
vals.push(v)
root = Some(v)
}
}
do {
token = p.nextToken
token match {
case OpenObj => vals.push(JObject(Nil))
case FieldStart(name) => vals.push(JField(name, null))
case StringVal(x) => newValue(JString(x))
case IntVal(x) => newValue(JInt(x))
case DoubleVal(x) => newValue(JDouble(x))
case BoolVal(x) => newValue(JBool(x))
case NullVal => newValue(JNull)
case CloseObj => closeBlock(vals.pop(classOf[JValue]))
case OpenArr => vals.push(JArray(Nil))
case CloseArr => closeBlock(vals.pop(classOf[JArray]))
case End =>
}
} while (token != End)
root getOrElse JNothing
}
private val EOF = (-1).asInstanceOf[Char]
private class ValStack(parser: Parser) {
import java.util.LinkedList
private[this] val stack = new LinkedList[JValue]()
def pop[A <: JValue](expectedType: Class[A]) = convert(stack.poll, expectedType)
def push(v: JValue) = stack.addFirst(v)
def peek[A <: JValue](expectedType: Class[A]) = convert(stack.peek, expectedType)
def replace[A <: JValue](newTop: JValue) = stack.set(0, newTop)
private def convert[A <: JValue](x: JValue, expectedType: Class[A]): A = {
if (x == null) parser.fail("expected object or array")
try { x.asInstanceOf[A] } catch { case _: ClassCastException => parser.fail("unexpected " + x) }
}
def peekOption = if (stack isEmpty) None else Some(stack.peek)
def isEmpty = stack.isEmpty
}
class Parser(buf: Buffer) {
import java.util.LinkedList
private[this] val blocks = new LinkedList[BlockMode]()
private[this] var fieldNameMode = true
def fail(msg: String) = throw new ParseException(msg + "\nNear: " + buf.near, null)
/** Parse next Token from stream.
*/
def nextToken: Token = {
def isDelimiter(c: Char) = c == ' ' || c == '\n' || c == ',' || c == '\r' || c == '\t' || c == '}' || c == ']'
def parseString: String =
try {
unquote(buf)
} catch {
case p: ParseException => throw p
case _: Exception => fail("unexpected string end")
}
def parseValue(first: Char) = {
var wasInt = true
var doubleVal = false
val s = new StringBuilder
s.append(first)
while (wasInt) {
val c = buf.next
if (c == EOF) {
wasInt = false
} else if (c == '.' || c == 'e' || c == 'E') {
doubleVal = true
s.append(c)
} else if (!(Character.isDigit(c) || c == '.' || c == 'e' || c == 'E' || c == '-')) {
wasInt = false
buf.back
} else s.append(c)
}
val value = s.toString
if (doubleVal) DoubleVal(parseDouble(value))
else IntVal(BigInt(value))
}
while (true) {
buf.next match {
case c if EOF == c =>
buf.automaticClose
return End
case '{' =>
blocks.addFirst(OBJECT)
fieldNameMode = true
return OpenObj
case '}' =>
blocks.poll
return CloseObj
case '"' =>
if (fieldNameMode && blocks.peek == OBJECT) return FieldStart(parseString)
else {
fieldNameMode = true
return StringVal(parseString)
}
case 't' =>
fieldNameMode = true
if (buf.next == 'r' && buf.next == 'u' && buf.next == 'e') {
return BoolVal(true)
}
fail("expected boolean")
case 'f' =>
fieldNameMode = true
if (buf.next == 'a' && buf.next == 'l' && buf.next == 's' && buf.next == 'e') {
return BoolVal(false)
}
fail("expected boolean")
case 'n' =>
fieldNameMode = true
if (buf.next == 'u' && buf.next == 'l' && buf.next == 'l') {
return NullVal
}
fail("expected null")
case ':' =>
if (blocks.peek == ARRAY) fail("Colon in an invalid position")
fieldNameMode = false
case '[' =>
blocks.addFirst(ARRAY)
return OpenArr
case ']' =>
fieldNameMode = true
blocks.poll
return CloseArr
case c if Character.isDigit(c) || c == '-' =>
fieldNameMode = true
return parseValue(c)
case c if isDelimiter(c) =>
case c => fail("unknown token " + c)
}
}
buf.automaticClose
End
}
sealed abstract class BlockMode
case object ARRAY extends BlockMode
case object OBJECT extends BlockMode
}
/* Buffer used to parse JSON.
* Buffer is divided to one or more segments (preallocated in Segments pool).
*/
private[json] class Buffer(in: Reader, closeAutomatically: Boolean) {
var offset = 0
var curMark = -1
var curMarkSegment = -1
var eofIsFailure = false
private[this] var segments: List[Segment] = List(Segments.apply())
private[this] var segment: Array[Char] = segments.head.seg
private[this] var cur = 0 // Pointer which points current parsing location
private[this] var curSegmentIdx = 0 // Pointer which points current segment
def mark = { curMark = cur; curMarkSegment = curSegmentIdx }
def back = cur = cur-1
def next: Char = {
if (cur >= offset && read < 0) {
if (eofIsFailure) throw new ParseException("unexpected eof", null) else EOF
} else {
val c = segment(cur)
cur += 1
c
}
}
def substring = {
if (curSegmentIdx == curMarkSegment) new String(segment, curMark, cur-curMark-1)
else { // slower path for case when string is in two or more segments
var parts: List[(Int, Int, Array[Char])] = Nil
var i = curSegmentIdx
while (i >= curMarkSegment) {
val s = segments(i).seg
val start = if (i == curMarkSegment) curMark else 0
val end = if (i == curSegmentIdx) cur else s.length+1
parts = (start, end, s) :: parts
i = i-1
}
val len = parts.map(p => p._2 - p._1 - 1).foldLeft(0)(_ + _)
val chars = new Array[Char](len)
i = 0
var pos = 0
while (i < parts.size) {
val (start, end, b) = parts(i)
val partLen = end-start-1
System.arraycopy(b, start, chars, pos, partLen)
pos = pos + partLen
i = i+1
}
new String(chars)
}
}
def near = {
val start = (cur - 20) max 0
val len = ((cur + 1) min Segments.segmentSize) - start
new String(segment, start, len)
}
def release = segments.foreach(Segments.release)
private[JsonParser] def automaticClose = if (closeAutomatically) in.close
private[this] def read = {
if (offset >= segment.length) {
val newSegment = Segments.apply()
offset = 0
segment = newSegment.seg
segments = segments ::: List(newSegment)
curSegmentIdx = segments.length - 1
}
val length = in.read(segment, offset, segment.length-offset)
if (length != -1) {
cur = offset
offset += length
length
} else -1
}
}
/* A pool of preallocated char arrays.
*/
private[json] object Segments {
import java.util.concurrent.ArrayBlockingQueue
import java.util.concurrent.atomic.AtomicInteger
private[json] var segmentSize = 1000
private[this] val maxNumOfSegments = 10000
private[this] var segmentCount = new AtomicInteger(0)
private[this] val segments = new ArrayBlockingQueue[Segment](maxNumOfSegments)
private[json] def clear = segments.clear
def apply(): Segment = {
val s = acquire
// Give back a disposable segment if pool is exhausted.
if (s != null) s else DisposableSegment(new Array(segmentSize))
}
private[this] def acquire: Segment = {
val curCount = segmentCount.get
val createNew =
if (segments.size == 0 && curCount < maxNumOfSegments)
segmentCount.compareAndSet(curCount, curCount + 1)
else false
if (createNew) RecycledSegment(new Array(segmentSize)) else segments.poll
}
def release(s: Segment) = s match {
case _: RecycledSegment => segments.offer(s)
case _ =>
}
}
sealed trait Segment {
val seg: Array[Char]
}
case class RecycledSegment(seg: Array[Char]) extends Segment
case class DisposableSegment(seg: Array[Char]) extends Segment
}
|
5702_0
|
crossvul
|
scala
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
scala
|
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
import org.specs2.mutable.Specification
import org.specs2.ScalaCheck
import org.scalacheck.{Arbitrary, Gen}
import org.scalacheck.Prop._
/**
* System under specification for JSON Parser.
*/
object JsonParserSpec extends Specification with JValueGen with ScalaCheck {
"JSON Parser Specification".title
"Any valid json can be parsed" in {
val parsing = (json: JValue) => { parse(Printer.pretty(render(json))); true }
check(forAll(genJValue)(parsing))
}
"Buffer size does not change parsing result" in {
val bufSize = Gen.choose(2, 64)
val parsing = (x: JValue, s1: Int, s2: Int) => { parseVal(x, s1) == parseVal(x, s2) }
check(forAll(genObject, bufSize, bufSize)(parsing))
}
"Parsing is thread safe" in {
import java.util.concurrent._
val json = Examples.person
val executor = Executors.newFixedThreadPool(100)
val results = (0 to 100).map(_ =>
executor.submit(new Callable[JValue] { def call = parse(json) })).toList.map(_.get)
results.zip(results.tail).forall(pair => pair._1 == pair._2) mustEqual true
}
"All valid string escape characters can be parsed" in {
parse("[\"abc\\\"\\\\\\/\\b\\f\\n\\r\\t\\u00a0\"]") must_== JArray(JString("abc\"\\/\b\f\n\r\t\u00a0")::Nil)
}
"Unclosed string literal fails parsing" in {
parseOpt("{\"foo\":\"sd") mustEqual None
parseOpt("{\"foo\":\"sd}") mustEqual None
}
"The EOF has reached when the Reader returns EOF" in {
class StingyReader(s: String) extends java.io.StringReader(s) {
override def read(cbuf: Array[Char], off: Int, len: Int): Int = {
val c = read()
if (c == -1) -1
else {
cbuf(off) = c.toChar
1
}
}
}
val json = JsonParser.parse(new StingyReader(""" ["hello"] """))
json mustEqual JArray(JString("hello") :: Nil)
}
implicit def arbJValue: Arbitrary[JValue] = Arbitrary(genObject)
private def parseVal(json: JValue, bufSize: Int) = {
val existingSize = JsonParser.Segments.segmentSize
try {
JsonParser.Segments.segmentSize = bufSize
JsonParser.Segments.clear
JsonParser.parse(compact(render(json)))
} finally {
JsonParser.Segments.segmentSize = existingSize
}
}
}
|
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
import org.specs2.mutable.Specification
import org.specs2.ScalaCheck
import org.scalacheck.{Arbitrary, Gen}
import org.scalacheck.Prop._
/**
* System under specification for JSON Parser.
*/
object JsonParserSpec extends Specification with JValueGen with ScalaCheck {
private def parseBadThing(): String = try {
parse("""{"user":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"<}""")
"x" * 1000
} catch {
case e: Throwable => e.getMessage
}
"JSON Parser Specification".title
"Any valid json can be parsed" in {
val parsing = (json: JValue) => { parse(Printer.pretty(render(json))); true }
check(forAll(genJValue)(parsing))
}
"Buffer size does not change parsing result" in {
val bufSize = Gen.choose(2, 64)
val parsing = (x: JValue, s1: Int, s2: Int) => { parseVal(x, s1) == parseVal(x, s2) }
check(forAll(genObject, bufSize, bufSize)(parsing))
}
"Parsing is thread safe" in {
import java.util.concurrent._
val json = Examples.person
val executor = Executors.newFixedThreadPool(100)
val results = (0 to 100).map(_ =>
executor.submit(new Callable[JValue] { def call = parse(json) })).toList.map(_.get)
results.zip(results.tail).forall(pair => pair._1 == pair._2) mustEqual true
}
"All valid string escape characters can be parsed" in {
parse("[\"abc\\\"\\\\\\/\\b\\f\\n\\r\\t\\u00a0\"]") must_== JArray(JString("abc\"\\/\b\f\n\r\t\u00a0")::Nil)
}
"Parser does not bleed prior results" in {
parse("""{"a": "now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things. now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things. now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things. now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things. now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things. now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things. now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things.now is the time for all good men to come to the aid of their dog and eat dog food with other dogs and bark and woof and do dog things"}""")
val msg = parseBadThing()
msg.length must be_<=(50)
}
"Unclosed string literal fails parsing" in {
parseOpt("{\"foo\":\"sd") mustEqual None
parseOpt("{\"foo\":\"sd}") mustEqual None
}
"The EOF has reached when the Reader returns EOF" in {
class StingyReader(s: String) extends java.io.StringReader(s) {
override def read(cbuf: Array[Char], off: Int, len: Int): Int = {
val c = read()
if (c == -1) -1
else {
cbuf(off) = c.toChar
1
}
}
}
val json = JsonParser.parse(new StingyReader(""" ["hello"] """))
json mustEqual JArray(JString("hello") :: Nil)
}
implicit def arbJValue: Arbitrary[JValue] = Arbitrary(genObject)
private def parseVal(json: JValue, bufSize: Int) = {
val existingSize = JsonParser.Segments.segmentSize
try {
JsonParser.Segments.segmentSize = bufSize
JsonParser.Segments.clear
JsonParser.parse(compact(render(json)))
} finally {
JsonParser.Segments.segmentSize = existingSize
}
}
}
|
5702_1
|
crossvul
|
scala
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <fizz/record/PlaintextRecordLayer.h>
#include <folly/String.h>
namespace fizz {
using ContentTypeType = typename std::underlying_type<ContentType>::type;
using ProtocolVersionType =
typename std::underlying_type<ProtocolVersion>::type;
static constexpr uint16_t kMaxPlaintextRecordSize = 0x4000; // 16k
static constexpr size_t kPlaintextHeaderSize =
sizeof(ContentType) + sizeof(ProtocolVersion) + sizeof(uint16_t);
folly::Optional<TLSMessage> PlaintextReadRecordLayer::read(
folly::IOBufQueue& buf) {
while (true) {
folly::io::Cursor cursor(buf.front());
if (buf.empty() || !cursor.canAdvance(kPlaintextHeaderSize)) {
return folly::none;
}
TLSMessage msg;
msg.type = static_cast<ContentType>(cursor.readBE<ContentTypeType>());
if (skipEncryptedRecords_) {
if (msg.type == ContentType::application_data) {
cursor.skip(sizeof(ProtocolVersion));
auto length = cursor.readBE<uint16_t>();
if (buf.chainLength() < (cursor - buf.front()) + length) {
return folly::none;
}
length +=
sizeof(ContentType) + sizeof(ProtocolVersion) + sizeof(uint16_t);
buf.trimStart(length);
continue;
} else if (msg.type != ContentType::change_cipher_spec) {
skipEncryptedRecords_ = false;
}
}
switch (msg.type) {
case ContentType::handshake:
case ContentType::alert:
break;
case ContentType::change_cipher_spec:
break;
default:
throw std::runtime_error(folly::to<std::string>(
"received plaintext content type ",
static_cast<ContentTypeType>(msg.type),
", header: ",
folly::hexlify(buf.splitAtMost(10)->coalesce())));
}
receivedRecordVersion_ =
static_cast<ProtocolVersion>(cursor.readBE<ProtocolVersionType>());
auto length = cursor.readBE<uint16_t>();
if (length > kMaxPlaintextRecordSize) {
throw std::runtime_error("received too long plaintext record");
}
if (length == 0) {
throw std::runtime_error("received empty plaintext record");
}
if (buf.chainLength() < (cursor - buf.front()) + length) {
return folly::none;
}
cursor.clone(msg.fragment, length);
buf.trimStart(cursor - buf.front());
if (msg.type == ContentType::change_cipher_spec) {
msg.fragment->coalesce();
if (msg.fragment->length() == 1 && *msg.fragment->data() == 0x01) {
continue;
} else {
throw FizzException(
"received ccs", AlertDescription::illegal_parameter);
}
}
return std::move(msg);
}
}
EncryptionLevel PlaintextReadRecordLayer::getEncryptionLevel() const {
return EncryptionLevel::Plaintext;
}
TLSContent PlaintextWriteRecordLayer::write(TLSMessage&& msg) const {
return write(std::move(msg), ProtocolVersion::tls_1_2);
}
TLSContent PlaintextWriteRecordLayer::writeInitialClientHello(
Buf encodedClientHello) const {
return write(
TLSMessage{ContentType::handshake, std::move(encodedClientHello)},
ProtocolVersion::tls_1_0);
}
TLSContent PlaintextWriteRecordLayer::write(
TLSMessage msg,
ProtocolVersion recordVersion) const {
if (msg.type == ContentType::application_data) {
throw std::runtime_error("refusing to send plaintext application data");
}
auto fragment = std::move(msg.fragment);
folly::io::Cursor cursor(fragment.get());
std::unique_ptr<folly::IOBuf> data;
while (!cursor.isAtEnd()) {
Buf thisFragment;
auto len = cursor.cloneAtMost(thisFragment, kMaxPlaintextRecordSize);
auto header = folly::IOBuf::create(kPlaintextHeaderSize);
folly::io::Appender appender(header.get(), kPlaintextHeaderSize);
appender.writeBE(static_cast<ContentTypeType>(msg.type));
appender.writeBE(static_cast<ProtocolVersionType>(recordVersion));
appender.writeBE<uint16_t>(len);
if (!data) {
data = std::move(header);
} else {
data->prependChain(std::move(header));
}
data->prependChain(std::move(thisFragment));
}
TLSContent content;
content.data = std::move(data);
content.contentType = msg.type;
content.encryptionLevel = EncryptionLevel::Plaintext;
return content;
}
EncryptionLevel PlaintextWriteRecordLayer::getEncryptionLevel() const {
return EncryptionLevel::Plaintext;
}
} // namespace fizz
|
/*
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <fizz/record/PlaintextRecordLayer.h>
#include <folly/String.h>
namespace fizz {
using ContentTypeType = typename std::underlying_type<ContentType>::type;
using ProtocolVersionType =
typename std::underlying_type<ProtocolVersion>::type;
static constexpr uint16_t kMaxPlaintextRecordSize = 0x4000; // 16k
static constexpr size_t kPlaintextHeaderSize =
sizeof(ContentType) + sizeof(ProtocolVersion) + sizeof(uint16_t);
folly::Optional<TLSMessage> PlaintextReadRecordLayer::read(
folly::IOBufQueue& buf) {
while (true) {
folly::io::Cursor cursor(buf.front());
if (buf.empty() || !cursor.canAdvance(kPlaintextHeaderSize)) {
return folly::none;
}
TLSMessage msg;
msg.type = static_cast<ContentType>(cursor.readBE<ContentTypeType>());
if (skipEncryptedRecords_) {
if (msg.type == ContentType::application_data) {
cursor.skip(sizeof(ProtocolVersion));
auto length = cursor.readBE<uint16_t>();
if (buf.chainLength() < (cursor - buf.front()) + length) {
return folly::none;
}
buf.trimStart(static_cast<size_t>(kPlaintextHeaderSize) + length);
continue;
} else if (msg.type != ContentType::change_cipher_spec) {
skipEncryptedRecords_ = false;
}
}
switch (msg.type) {
case ContentType::handshake:
case ContentType::alert:
break;
case ContentType::change_cipher_spec:
break;
default:
throw std::runtime_error(folly::to<std::string>(
"received plaintext content type ",
static_cast<ContentTypeType>(msg.type),
", header: ",
folly::hexlify(buf.splitAtMost(10)->coalesce())));
}
receivedRecordVersion_ =
static_cast<ProtocolVersion>(cursor.readBE<ProtocolVersionType>());
auto length = cursor.readBE<uint16_t>();
if (length > kMaxPlaintextRecordSize) {
throw std::runtime_error("received too long plaintext record");
}
if (length == 0) {
throw std::runtime_error("received empty plaintext record");
}
if (buf.chainLength() < (cursor - buf.front()) + length) {
return folly::none;
}
cursor.clone(msg.fragment, length);
buf.trimStart(cursor - buf.front());
if (msg.type == ContentType::change_cipher_spec) {
msg.fragment->coalesce();
if (msg.fragment->length() == 1 && *msg.fragment->data() == 0x01) {
continue;
} else {
throw FizzException(
"received ccs", AlertDescription::illegal_parameter);
}
}
return std::move(msg);
}
}
EncryptionLevel PlaintextReadRecordLayer::getEncryptionLevel() const {
return EncryptionLevel::Plaintext;
}
TLSContent PlaintextWriteRecordLayer::write(TLSMessage&& msg) const {
return write(std::move(msg), ProtocolVersion::tls_1_2);
}
TLSContent PlaintextWriteRecordLayer::writeInitialClientHello(
Buf encodedClientHello) const {
return write(
TLSMessage{ContentType::handshake, std::move(encodedClientHello)},
ProtocolVersion::tls_1_0);
}
TLSContent PlaintextWriteRecordLayer::write(
TLSMessage msg,
ProtocolVersion recordVersion) const {
if (msg.type == ContentType::application_data) {
throw std::runtime_error("refusing to send plaintext application data");
}
auto fragment = std::move(msg.fragment);
folly::io::Cursor cursor(fragment.get());
std::unique_ptr<folly::IOBuf> data;
while (!cursor.isAtEnd()) {
Buf thisFragment;
auto len = cursor.cloneAtMost(thisFragment, kMaxPlaintextRecordSize);
auto header = folly::IOBuf::create(kPlaintextHeaderSize);
folly::io::Appender appender(header.get(), kPlaintextHeaderSize);
appender.writeBE(static_cast<ContentTypeType>(msg.type));
appender.writeBE(static_cast<ProtocolVersionType>(recordVersion));
appender.writeBE<uint16_t>(len);
if (!data) {
data = std::move(header);
} else {
data->prependChain(std::move(header));
}
data->prependChain(std::move(thisFragment));
}
TLSContent content;
content.data = std::move(data);
content.contentType = msg.type;
content.encryptionLevel = EncryptionLevel::Plaintext;
return content;
}
EncryptionLevel PlaintextWriteRecordLayer::getEncryptionLevel() const {
return EncryptionLevel::Plaintext;
}
} // namespace fizz
|
1382_0
|
crossvul
|
cpp
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <folly/portability/GMock.h>
#include <folly/portability/GTest.h>
#include <fizz/record/PlaintextRecordLayer.h>
#include <folly/String.h>
using namespace folly;
using namespace folly::io;
using testing::_;
using namespace testing;
namespace fizz {
namespace test {
class PlaintextRecordTest : public testing::Test {
protected:
PlaintextReadRecordLayer read_;
PlaintextWriteRecordLayer write_;
IOBufQueue queue_{IOBufQueue::cacheChainLength()};
IOBufEqualTo eq_;
Buf getBuf(const std::string& hex) {
auto data = unhexlify(hex);
return IOBuf::copyBuffer(data.data(), data.size());
}
void addToQueue(const std::string& hex) {
queue_.append(getBuf(hex));
}
void expectSame(const Buf& buf, const std::string& hex) {
auto str = buf->moveToFbString().toStdString();
EXPECT_EQ(hexlify(str), hex);
}
};
TEST_F(PlaintextRecordTest, TestReadEmpty) {
EXPECT_FALSE(read_.read(queue_).hasValue());
}
TEST_F(PlaintextRecordTest, TestReadHandshake) {
addToQueue("16030100050123456789");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestReadAlert) {
addToQueue("15030100050123456789");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::alert);
expectSame(msg->fragment, "0123456789");
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestReadAppData) {
addToQueue("17030100050123456789");
EXPECT_ANY_THROW(read_.read(queue_));
}
TEST_F(PlaintextRecordTest, TestWaitForData) {
addToQueue("160301000512345678");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_EQ(queue_.chainLength(), 9);
}
TEST_F(PlaintextRecordTest, TestWaitForHeader) {
addToQueue("16030102");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_EQ(queue_.chainLength(), 4);
}
TEST_F(PlaintextRecordTest, TestMaxSize) {
addToQueue("1603014000");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_EQ(queue_.chainLength(), 5);
}
TEST_F(PlaintextRecordTest, TestOverSize) {
addToQueue("1603014001");
EXPECT_ANY_THROW(read_.read(queue_));
}
TEST_F(PlaintextRecordTest, TestEmpty) {
addToQueue("1603010000aa");
EXPECT_ANY_THROW(read_.read(queue_));
}
TEST_F(PlaintextRecordTest, TestDataRemaining) {
addToQueue("16030100050123456789160301");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_EQ(queue_.chainLength(), 3);
expectSame(queue_.move(), "160301");
}
TEST_F(PlaintextRecordTest, TestSkipAndWait) {
read_.setSkipEncryptedRecords(true);
addToQueue("17030100050123456789");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestWaitBeforeSkip) {
read_.setSkipEncryptedRecords(true);
addToQueue("170301000501234567");
EXPECT_FALSE(read_.read(queue_).hasValue());
expectSame(queue_.move(), "170301000501234567");
}
TEST_F(PlaintextRecordTest, TestSkipAndRead) {
read_.setSkipEncryptedRecords(true);
addToQueue("170301000501234567891703010005012345678916030100050123456789");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestWriteHandshake) {
TLSMessage msg{ContentType::handshake, getBuf("1234567890")};
auto buf = write_.write(std::move(msg));
expectSame(buf.data, "16030300051234567890");
}
TEST_F(PlaintextRecordTest, TestWriteClientHello) {
auto buf = write_.writeInitialClientHello(getBuf("1234567890"));
expectSame(buf.data, "16030100051234567890");
}
TEST_F(PlaintextRecordTest, TestWriteAppData) {
TLSMessage msg{ContentType::application_data};
EXPECT_ANY_THROW(write_.write(std::move(msg)));
}
TEST_F(PlaintextRecordTest, TestFragmentedWrite) {
TLSMessage msg{ContentType::handshake, IOBuf::create(0)};
auto buf = IOBuf::create(0x4010);
buf->append(0x4010);
memset(buf->writableData(), 0x1, buf->length());
msg.fragment->prependChain(std::move(buf));
auto write = write_.write(std::move(msg));
TLSMessage msg1{ContentType::handshake, IOBuf::create(0)};
buf = IOBuf::create(0x4000);
buf->append(0x4000);
memset(buf->writableData(), 0x1, buf->length());
msg1.fragment->prependChain(std::move(buf));
auto write1 = write_.write(std::move(msg1));
TLSMessage msg2{ContentType::handshake, IOBuf::create(0)};
buf = IOBuf::create(0x10);
buf->append(0x10);
memset(buf->writableData(), 0x1, buf->length());
msg2.fragment->prependChain(std::move(buf));
auto write2 = write_.write(std::move(msg2));
write1.data->prependChain(std::move(write2.data));
IOBufEqualTo eq;
EXPECT_TRUE(eq(write.data, write1.data));
}
} // namespace test
} // namespace fizz
|
/*
* Copyright (c) 2018-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <folly/portability/GMock.h>
#include <folly/portability/GTest.h>
#include <fizz/record/PlaintextRecordLayer.h>
#include <folly/String.h>
using namespace folly;
using namespace folly::io;
using testing::_;
using namespace testing;
namespace fizz {
namespace test {
class PlaintextRecordTest : public testing::Test {
protected:
PlaintextReadRecordLayer read_;
PlaintextWriteRecordLayer write_;
IOBufQueue queue_{IOBufQueue::cacheChainLength()};
IOBufEqualTo eq_;
Buf getBuf(const std::string& hex) {
auto data = unhexlify(hex);
return IOBuf::copyBuffer(data.data(), data.size());
}
void addToQueue(const std::string& hex) {
queue_.append(getBuf(hex));
}
void expectSame(const Buf& buf, const std::string& hex) {
auto str = buf->moveToFbString().toStdString();
EXPECT_EQ(hexlify(str), hex);
}
};
TEST_F(PlaintextRecordTest, TestReadEmpty) {
EXPECT_FALSE(read_.read(queue_).hasValue());
}
TEST_F(PlaintextRecordTest, TestReadHandshake) {
addToQueue("16030100050123456789");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestReadAlert) {
addToQueue("15030100050123456789");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::alert);
expectSame(msg->fragment, "0123456789");
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestReadAppData) {
addToQueue("17030100050123456789");
EXPECT_ANY_THROW(read_.read(queue_));
}
TEST_F(PlaintextRecordTest, TestWaitForData) {
addToQueue("160301000512345678");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_EQ(queue_.chainLength(), 9);
}
TEST_F(PlaintextRecordTest, TestWaitForHeader) {
addToQueue("16030102");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_EQ(queue_.chainLength(), 4);
}
TEST_F(PlaintextRecordTest, TestMaxSize) {
addToQueue("1603014000");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_EQ(queue_.chainLength(), 5);
}
TEST_F(PlaintextRecordTest, TestOverSize) {
addToQueue("1603014001");
EXPECT_ANY_THROW(read_.read(queue_));
}
TEST_F(PlaintextRecordTest, TestEmpty) {
addToQueue("1603010000aa");
EXPECT_ANY_THROW(read_.read(queue_));
}
TEST_F(PlaintextRecordTest, TestDataRemaining) {
addToQueue("16030100050123456789160301");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_EQ(queue_.chainLength(), 3);
expectSame(queue_.move(), "160301");
}
TEST_F(PlaintextRecordTest, TestSkipAndWait) {
read_.setSkipEncryptedRecords(true);
addToQueue("17030100050123456789");
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestSkipOversizedRecord) {
read_.setSkipEncryptedRecords(true);
addToQueue("170301fffb");
auto longBuf = IOBuf::create(0xfffb);
longBuf->append(0xfffb);
queue_.append(std::move(longBuf));
EXPECT_FALSE(read_.read(queue_).hasValue());
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestWaitBeforeSkip) {
read_.setSkipEncryptedRecords(true);
addToQueue("170301000501234567");
EXPECT_FALSE(read_.read(queue_).hasValue());
expectSame(queue_.move(), "170301000501234567");
}
TEST_F(PlaintextRecordTest, TestSkipAndRead) {
read_.setSkipEncryptedRecords(true);
addToQueue("170301000501234567891703010005012345678916030100050123456789");
auto msg = read_.read(queue_);
EXPECT_EQ(msg->type, ContentType::handshake);
expectSame(msg->fragment, "0123456789");
EXPECT_TRUE(queue_.empty());
}
TEST_F(PlaintextRecordTest, TestWriteHandshake) {
TLSMessage msg{ContentType::handshake, getBuf("1234567890")};
auto buf = write_.write(std::move(msg));
expectSame(buf.data, "16030300051234567890");
}
TEST_F(PlaintextRecordTest, TestWriteClientHello) {
auto buf = write_.writeInitialClientHello(getBuf("1234567890"));
expectSame(buf.data, "16030100051234567890");
}
TEST_F(PlaintextRecordTest, TestWriteAppData) {
TLSMessage msg{ContentType::application_data};
EXPECT_ANY_THROW(write_.write(std::move(msg)));
}
TEST_F(PlaintextRecordTest, TestFragmentedWrite) {
TLSMessage msg{ContentType::handshake, IOBuf::create(0)};
auto buf = IOBuf::create(0x4010);
buf->append(0x4010);
memset(buf->writableData(), 0x1, buf->length());
msg.fragment->prependChain(std::move(buf));
auto write = write_.write(std::move(msg));
TLSMessage msg1{ContentType::handshake, IOBuf::create(0)};
buf = IOBuf::create(0x4000);
buf->append(0x4000);
memset(buf->writableData(), 0x1, buf->length());
msg1.fragment->prependChain(std::move(buf));
auto write1 = write_.write(std::move(msg1));
TLSMessage msg2{ContentType::handshake, IOBuf::create(0)};
buf = IOBuf::create(0x10);
buf->append(0x10);
memset(buf->writableData(), 0x1, buf->length());
msg2.fragment->prependChain(std::move(buf));
auto write2 = write_.write(std::move(msg2));
write1.data->prependChain(std::move(write2.data));
IOBufEqualTo eq;
EXPECT_TRUE(eq(write.data, write1.data));
}
} // namespace test
} // namespace fizz
|
1382_1
|
crossvul
|
cpp
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wangle/codec/LineBasedFrameDecoder.h>
namespace wangle {
using folly::io::Cursor;
using folly::IOBuf;
using folly::IOBufQueue;
LineBasedFrameDecoder::LineBasedFrameDecoder(uint32_t maxLength,
bool stripDelimiter,
TerminatorType terminatorType)
: maxLength_(maxLength)
, stripDelimiter_(stripDelimiter)
, terminatorType_(terminatorType) {}
bool LineBasedFrameDecoder::decode(Context* ctx,
IOBufQueue& buf,
std::unique_ptr<IOBuf>& result,
size_t&) {
int64_t eol = findEndOfLine(buf);
if (!discarding_) {
if (eol >= 0) {
Cursor c(buf.front());
c += eol;
auto delimLength = c.read<char>() == '\r' ? 2 : 1;
if (eol > maxLength_) {
buf.split(eol + delimLength);
fail(ctx, folly::to<std::string>(eol));
return false;
}
std::unique_ptr<folly::IOBuf> frame;
if (stripDelimiter_) {
frame = buf.split(eol);
buf.trimStart(delimLength);
} else {
frame = buf.split(eol + delimLength);
}
result = std::move(frame);
return true;
} else {
auto len = buf.chainLength();
if (len > maxLength_) {
discardedBytes_ = len;
buf.trimStart(len);
discarding_ = true;
fail(ctx, "over " + folly::to<std::string>(len));
}
return false;
}
} else {
if (eol >= 0) {
Cursor c(buf.front());
c += eol;
auto delimLength = c.read<char>() == '\r' ? 2 : 1;
buf.trimStart(eol + delimLength);
discardedBytes_ = 0;
discarding_ = false;
} else {
discardedBytes_ = buf.chainLength();
buf.move();
}
return false;
}
}
void LineBasedFrameDecoder::fail(Context* ctx, std::string len) {
ctx->fireReadException(
folly::make_exception_wrapper<std::runtime_error>(
"frame length" + len +
" exeeds max " + folly::to<std::string>(maxLength_)));
}
int64_t LineBasedFrameDecoder::findEndOfLine(IOBufQueue& buf) {
Cursor c(buf.front());
for (uint32_t i = 0; i < maxLength_ && i < buf.chainLength(); i++) {
auto b = c.read<char>();
if (b == '\n' && terminatorType_ != TerminatorType::CARRIAGENEWLINE) {
return i;
} else if (terminatorType_ != TerminatorType::NEWLINE &&
b == '\r' && !c.isAtEnd() && c.read<char>() == '\n') {
return i;
}
}
return -1;
}
} // namespace wangle
|
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <wangle/codec/LineBasedFrameDecoder.h>
namespace wangle {
using folly::io::Cursor;
using folly::IOBuf;
using folly::IOBufQueue;
LineBasedFrameDecoder::LineBasedFrameDecoder(uint32_t maxLength,
bool stripDelimiter,
TerminatorType terminatorType)
: maxLength_(maxLength)
, stripDelimiter_(stripDelimiter)
, terminatorType_(terminatorType) {}
bool LineBasedFrameDecoder::decode(Context* ctx,
IOBufQueue& buf,
std::unique_ptr<IOBuf>& result,
size_t&) {
int64_t eol = findEndOfLine(buf);
if (!discarding_) {
if (eol >= 0) {
Cursor c(buf.front());
c += eol;
auto delimLength = c.read<char>() == '\r' ? 2 : 1;
if (eol > maxLength_) {
buf.split(eol + delimLength);
fail(ctx, folly::to<std::string>(eol));
return false;
}
std::unique_ptr<folly::IOBuf> frame;
if (stripDelimiter_) {
frame = buf.split(eol);
buf.trimStart(delimLength);
} else {
frame = buf.split(eol + delimLength);
}
result = std::move(frame);
return true;
} else {
auto len = buf.chainLength();
if (len > maxLength_) {
discardedBytes_ = len;
buf.trimStart(len);
discarding_ = true;
fail(ctx, "over " + folly::to<std::string>(len));
}
return false;
}
} else {
if (eol >= 0) {
Cursor c(buf.front());
c += eol;
auto delimLength = c.read<char>() == '\r' ? 2 : 1;
buf.trimStart(eol + delimLength);
discardedBytes_ = 0;
discarding_ = false;
} else {
discardedBytes_ = buf.chainLength();
buf.move();
}
return false;
}
}
void LineBasedFrameDecoder::fail(Context* ctx, std::string len) {
ctx->fireReadException(
folly::make_exception_wrapper<std::runtime_error>(
"frame length" + len +
" exeeds max " + folly::to<std::string>(maxLength_)));
}
int64_t LineBasedFrameDecoder::findEndOfLine(IOBufQueue& buf) {
Cursor c(buf.front());
for (uint32_t i = 0; i < maxLength_ && i < buf.chainLength(); i++) {
auto b = c.read<char>();
if (b == '\n' && terminatorType_ != TerminatorType::CARRIAGENEWLINE) {
return i;
} else if (
terminatorType_ != TerminatorType::NEWLINE && b == '\r' &&
!c.isAtEnd() && *c.peekBytes().data() == '\n') {
return i;
}
}
return -1;
}
} // namespace wangle
|
1384_0
|
crossvul
|
cpp
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/portability/GTest.h>
#include <wangle/codec/FixedLengthFrameDecoder.h>
#include <wangle/codec/LengthFieldBasedFrameDecoder.h>
#include <wangle/codec/LengthFieldPrepender.h>
#include <wangle/codec/LineBasedFrameDecoder.h>
#include <wangle/codec/test/CodecTestUtils.h>
using namespace folly;
using namespace wangle;
using namespace folly::io;
namespace {
auto createZeroedBuffer(size_t size) {
auto ret = IOBuf::create(size);
ret->append(size);
std::memset(ret->writableData(), 0x00, size);
return ret;
}
}
TEST(FixedLengthFrameDecoder, FailWhenLengthFieldEndOffset) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(FixedLengthFrameDecoder(10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 10);
}))
.finalize();
auto buf3 = createZeroedBuffer(3);
auto buf11 = createZeroedBuffer(11);
auto buf16 = createZeroedBuffer(16);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf3));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(buf11));
pipeline->read(q);
EXPECT_EQ(called, 1);
q.append(std::move(buf16));
pipeline->read(q);
EXPECT_EQ(called, 3);
}
TEST(LengthFieldFramePipeline, SimpleTest) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(test::BytesReflector())
.addBack(LengthFieldPrepender())
.addBack(LengthFieldBasedFrameDecoder())
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 2);
}))
.finalize();
auto buf = createZeroedBuffer(2);
pipeline->write(std::move(buf));
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFramePipeline, LittleEndian) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(test::BytesReflector())
.addBack(LengthFieldBasedFrameDecoder(4, 100, 0, 0, 4, false))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.addBack(LengthFieldPrepender(4, 0, false, false))
.finalize();
auto buf = createZeroedBuffer(1);
pipeline->write(std::move(buf));
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, Simple) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder())
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)1);
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, NoStrip) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 0, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(2);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint16_t)1);
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, Adjustment) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 0, -2, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(2);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint16_t)3); // includes frame size
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, PreHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 5);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // header
c.writeBE((uint16_t)1); // frame size
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, PostHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 0, 2, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 5);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint16_t)1); // frame size
c.write((uint16_t)100); // header
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoderStrip, PrePostHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, 2, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(6);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // pre header
c.writeBE((uint16_t)1); // frame size
c.write((uint16_t)100); // post header
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, StripPrePostHeaderFrameInclHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, -2, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(6);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // pre header
c.writeBE((uint16_t)5); // frame size
c.write((uint16_t)100); // post header
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestLengthFieldEndOffset) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 4, -2, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(8);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)0); // frame size
c.write((uint32_t)0); // crap
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestLengthFieldFrameSize) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 0, 0, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(16);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)12); // frame size
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestLengthFieldInitialBytes) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 0, 0, 10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(16);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)4); // frame size
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestNotEnoughBytes) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 0, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(16);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)7); // frame size - 1 byte too large (7 > 10 - 4)
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LineBasedFrameDecoder, Simple) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto buf = createZeroedBuffer(3);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 0);
buf = createZeroedBuffer(1);
RWPrivateCursor c(buf.get());
c.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(4);
RWPrivateCursor c1(buf.get());
c1.write(' ');
c1.write(' ');
c1.write(' ');
c1.write('\r');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(1);
RWPrivateCursor c2(buf.get());
c2.write('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
TEST(LineBasedFrameDecoder, SaveDelimiter) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(10, false))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 4);
}))
.finalize();
auto buf = createZeroedBuffer(3);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 0);
buf = createZeroedBuffer(1);
RWPrivateCursor c(buf.get());
c.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(3);
RWPrivateCursor c1(buf.get());
c1.write(' ');
c1.write(' ');
c1.write('\r');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(1);
RWPrivateCursor c2(buf.get());
c2.write('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
TEST(LineBasedFrameDecoder, Fail) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto buf = createZeroedBuffer(11);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(1);
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(2);
RWPrivateCursor c(buf.get());
c.write(' ');
c.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(12);
RWPrivateCursor c2(buf.get());
for (int i = 0; i < 11; i++) {
c2.write(' ');
}
c2.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
TEST(LineBasedFrameDecoder, NewLineOnly) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(
10, true, LineBasedFrameDecoder::TerminatorType::NEWLINE))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
auto buf = createZeroedBuffer(2);
RWPrivateCursor c(buf.get());
c.write<char>('\r');
c.write<char>('\n');
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LineBasedFrameDecoder, CarriageNewLineOnly) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(
10, true, LineBasedFrameDecoder::TerminatorType::CARRIAGENEWLINE))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
auto buf = createZeroedBuffer(3);
RWPrivateCursor c(buf.get());
c.write<char>('\n');
c.write<char>('\r');
c.write<char>('\n');
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
|
/*
* Copyright 2017-present Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <folly/portability/GTest.h>
#include <wangle/codec/FixedLengthFrameDecoder.h>
#include <wangle/codec/LengthFieldBasedFrameDecoder.h>
#include <wangle/codec/LengthFieldPrepender.h>
#include <wangle/codec/LineBasedFrameDecoder.h>
#include <wangle/codec/test/CodecTestUtils.h>
using namespace folly;
using namespace wangle;
using namespace folly::io;
namespace {
auto createZeroedBuffer(size_t size) {
auto ret = IOBuf::create(size);
ret->append(size);
std::memset(ret->writableData(), 0x00, size);
return ret;
}
}
TEST(FixedLengthFrameDecoder, FailWhenLengthFieldEndOffset) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(FixedLengthFrameDecoder(10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 10);
}))
.finalize();
auto buf3 = createZeroedBuffer(3);
auto buf11 = createZeroedBuffer(11);
auto buf16 = createZeroedBuffer(16);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf3));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(buf11));
pipeline->read(q);
EXPECT_EQ(called, 1);
q.append(std::move(buf16));
pipeline->read(q);
EXPECT_EQ(called, 3);
}
TEST(LengthFieldFramePipeline, SimpleTest) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(test::BytesReflector())
.addBack(LengthFieldPrepender())
.addBack(LengthFieldBasedFrameDecoder())
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 2);
}))
.finalize();
auto buf = createZeroedBuffer(2);
pipeline->write(std::move(buf));
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFramePipeline, LittleEndian) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(test::BytesReflector())
.addBack(LengthFieldBasedFrameDecoder(4, 100, 0, 0, 4, false))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.addBack(LengthFieldPrepender(4, 0, false, false))
.finalize();
auto buf = createZeroedBuffer(1);
pipeline->write(std::move(buf));
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, Simple) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder())
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)1);
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, NoStrip) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 0, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(2);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint16_t)1);
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, Adjustment) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 0, -2, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(2);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint16_t)3); // includes frame size
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, PreHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 5);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // header
c.writeBE((uint16_t)1); // frame size
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, PostHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 0, 2, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 5);
}))
.finalize();
auto bufFrame = createZeroedBuffer(4);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint16_t)1); // frame size
c.write((uint16_t)100); // header
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoderStrip, PrePostHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, 2, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(6);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // pre header
c.writeBE((uint16_t)1); // frame size
c.write((uint16_t)100); // post header
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, StripPrePostHeaderFrameInclHeader) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(2, 10, 2, -2, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto bufFrame = createZeroedBuffer(6);
RWPrivateCursor c(bufFrame.get());
c.write((uint16_t)100); // pre header
c.writeBE((uint16_t)5); // frame size
c.write((uint16_t)100); // post header
auto bufData = createZeroedBuffer(1);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 0);
q.append(std::move(bufData));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestLengthFieldEndOffset) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 4, -2, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(8);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)0); // frame size
c.write((uint32_t)0); // crap
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestLengthFieldFrameSize) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 0, 0, 4))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(16);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)12); // frame size
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestLengthFieldInitialBytes) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 0, 0, 10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(16);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)4); // frame size
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LengthFieldFrameDecoder, FailTestNotEnoughBytes) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LengthFieldBasedFrameDecoder(4, 10, 0, 0, 0))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto bufFrame = createZeroedBuffer(16);
RWPrivateCursor c(bufFrame.get());
c.writeBE((uint32_t)7); // frame size - 1 byte too large (7 > 10 - 4)
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
c.write((uint32_t)0); // nothing
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(bufFrame));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LineBasedFrameDecoder, Simple) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 3);
}))
.finalize();
auto buf = createZeroedBuffer(3);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 0);
buf = createZeroedBuffer(1);
RWPrivateCursor c(buf.get());
c.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(4);
RWPrivateCursor c1(buf.get());
c1.write(' ');
c1.write(' ');
c1.write(' ');
c1.write('\r');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(1);
RWPrivateCursor c2(buf.get());
c2.write('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
TEST(LineBasedFrameDecoder, SaveDelimiter) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(10, false))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 4);
}))
.finalize();
auto buf = createZeroedBuffer(3);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 0);
buf = createZeroedBuffer(1);
RWPrivateCursor c(buf.get());
c.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(3);
RWPrivateCursor c1(buf.get());
c1.write(' ');
c1.write(' ');
c1.write('\r');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(1);
RWPrivateCursor c2(buf.get());
c2.write('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
TEST(LineBasedFrameDecoder, Fail) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(10))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
ASSERT_EQ(nullptr, buf);
called++;
}))
.finalize();
auto buf = createZeroedBuffer(11);
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(1);
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(2);
RWPrivateCursor c(buf.get());
c.write(' ');
c.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
buf = createZeroedBuffer(12);
RWPrivateCursor c2(buf.get());
for (int i = 0; i < 11; i++) {
c2.write(' ');
}
c2.write<char>('\n');
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
TEST(LineBasedFrameDecoder, NewLineOnly) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(
10, true, LineBasedFrameDecoder::TerminatorType::NEWLINE))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
auto buf = createZeroedBuffer(2);
RWPrivateCursor c(buf.get());
c.write<char>('\r');
c.write<char>('\n');
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LineBasedFrameDecoder, CarriageNewLineOnly) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(
10, true, LineBasedFrameDecoder::TerminatorType::CARRIAGENEWLINE))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
auto buf = createZeroedBuffer(3);
RWPrivateCursor c(buf.get());
c.write<char>('\n');
c.write<char>('\r');
c.write<char>('\n');
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(std::move(buf));
pipeline->read(q);
EXPECT_EQ(called, 1);
}
TEST(LineBasedFrameDecoder, CarriageOnly) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
(*pipeline)
.addBack(LineBasedFrameDecoder(
10, true, LineBasedFrameDecoder::TerminatorType::CARRIAGENEWLINE))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf>) { FAIL(); }))
.finalize();
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(IOBuf::copyBuffer("\raa"));
pipeline->read(q);
}
TEST(LineBasedFrameDecoder, DoubleCarriage) {
auto pipeline = Pipeline<IOBufQueue&, std::unique_ptr<IOBuf>>::create();
int called = 0;
(*pipeline)
.addBack(LineBasedFrameDecoder(
10, true, LineBasedFrameDecoder::TerminatorType::CARRIAGENEWLINE))
.addBack(test::FrameTester([&](std::unique_ptr<IOBuf> buf) {
auto sz = buf->computeChainDataLength();
called++;
EXPECT_EQ(sz, 1);
}))
.finalize();
IOBufQueue q(IOBufQueue::cacheChainLength());
q.append(IOBuf::copyBuffer("\r\r\na\r\n"));
pipeline->read(q);
EXPECT_EQ(called, 2);
}
|
1384_1
|
crossvul
|
cpp
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
// Copyright Benoit Blanchon 2014-2015
// MIT License
//
// Arduino JSON library
// https://github.com/bblanchon/ArduinoJson
#include "../../include/ArduinoJson/Internals/QuotedString.hpp"
using namespace ArduinoJson::Internals;
// How to escape special chars:
// specialChars[2*i+1] => the special char
// specialChars[2*i] => the char to use instead
static const char specialChars[] = "\"\"\\\\b\bf\fn\nr\rt\t";
static inline char getSpecialChar(char c) {
// Optimized for code size on a 8-bit AVR
const char *p = specialChars;
while (p[0] && p[1] != c) {
p += 2;
}
return p[0];
}
static inline size_t printCharTo(char c, Print &p) {
char specialChar = getSpecialChar(c);
return specialChar ? p.write('\\') + p.write(specialChar) : p.write(c);
}
size_t QuotedString::printTo(const char *s, Print &p) {
if (!s) return p.print("null");
size_t n = p.write('\"');
while (*s) {
n += printCharTo(*s++, p);
}
return n + p.write('\"');
}
static char unescapeChar(char c) {
// Optimized for code size on a 8-bit AVR
const char *p = specialChars + 4;
for (;;) {
if (p[0] == '\0') return c;
if (p[0] == c) return p[1];
p += 2;
}
}
static inline bool isQuote(char c) { return c == '\"' || c == '\''; }
char *QuotedString::extractFrom(char *input, char **endPtr) {
char firstChar = *input;
if (!isQuote(firstChar)) {
// must start with a quote
return NULL;
}
char stopChar = firstChar; // closing quote is the same as opening quote
char *startPtr = input + 1; // skip the quote
char *readPtr = startPtr;
char *writePtr = startPtr;
char c;
for (;;) {
c = *readPtr++;
if (c == '\0') {
// premature ending
return NULL;
}
if (c == stopChar) {
// closing quote
break;
}
if (c == '\\') {
// replace char
c = unescapeChar(*readPtr++);
}
*writePtr++ = c;
}
// end the string here
*writePtr = '\0';
// update end ptr
*endPtr = readPtr;
return startPtr;
}
|
// Copyright Benoit Blanchon 2014-2015
// MIT License
//
// Arduino JSON library
// https://github.com/bblanchon/ArduinoJson
#include "../../include/ArduinoJson/Internals/QuotedString.hpp"
using namespace ArduinoJson::Internals;
// How to escape special chars:
// specialChars[2*i+1] => the special char
// specialChars[2*i] => the char to use instead
static const char specialChars[] = "\"\"\\\\b\bf\fn\nr\rt\t";
static inline char getSpecialChar(char c) {
// Optimized for code size on a 8-bit AVR
const char *p = specialChars;
while (p[0] && p[1] != c) {
p += 2;
}
return p[0];
}
static inline size_t printCharTo(char c, Print &p) {
char specialChar = getSpecialChar(c);
return specialChar ? p.write('\\') + p.write(specialChar) : p.write(c);
}
size_t QuotedString::printTo(const char *s, Print &p) {
if (!s) return p.print("null");
size_t n = p.write('\"');
while (*s) {
n += printCharTo(*s++, p);
}
return n + p.write('\"');
}
static char unescapeChar(char c) {
// Optimized for code size on a 8-bit AVR
const char *p = specialChars + 4;
for (;;) {
if (p[0] == '\0') return c;
if (p[0] == c) return p[1];
p += 2;
}
}
static inline bool isQuote(char c) { return c == '\"' || c == '\''; }
char *QuotedString::extractFrom(char *input, char **endPtr) {
char *startPtr = input + 1; // skip the quote
char *readPtr = startPtr;
char *writePtr = startPtr;
char c;
char firstChar = *input;
char stopChar = firstChar; // closing quote is the same as opening quote
if (!isQuote(firstChar)) goto ERROR_OPENING_QUOTE_MISSING;
for (;;) {
c = *readPtr++;
if (c == '\0') goto ERROR_CLOSING_QUOTE_MISSING;
if (c == stopChar) goto SUCCESS;
if (c == '\\') {
// replace char
c = unescapeChar(*readPtr++);
if (c == '\0') goto ERROR_ESCAPE_SEQUENCE_INTERRUPTED;
}
*writePtr++ = c;
}
SUCCESS:
// end the string here
*writePtr = '\0';
// update end ptr
*endPtr = readPtr;
// return pointer to unquoted string
return startPtr;
ERROR_OPENING_QUOTE_MISSING:
ERROR_CLOSING_QUOTE_MISSING:
ERROR_ESCAPE_SEQUENCE_INTERRUPTED:
return NULL;
}
|
1636_1
|
crossvul
|
cpp
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
// Copyright Benoit Blanchon 2014-2015
// MIT License
//
// Arduino JSON library
// https://github.com/bblanchon/ArduinoJson
#include <gtest/gtest.h>
#include <ArduinoJson/Internals/QuotedString.hpp>
using namespace ArduinoJson::Internals;
class QuotedString_ExtractFrom_Tests : public testing::Test {
protected:
void whenInputIs(const char *json) {
strcpy(_jsonString, json);
_result = QuotedString::extractFrom(_jsonString, &_trailing);
}
void resultMustBe(const char *expected) { EXPECT_STREQ(expected, _result); }
void trailingMustBe(const char *expected) {
EXPECT_STREQ(expected, _trailing);
}
private:
char _jsonString[256];
char *_result;
char *_trailing;
};
TEST_F(QuotedString_ExtractFrom_Tests, EmptyDoubleQuotedString) {
whenInputIs("\"\"");
resultMustBe("");
trailingMustBe("");
}
TEST_F(QuotedString_ExtractFrom_Tests, NoQuotes) {
whenInputIs("hello world");
resultMustBe(0);
}
TEST_F(QuotedString_ExtractFrom_Tests, MissingClosingQuote) {
whenInputIs("\"hello world");
resultMustBe(0);
}
TEST_F(QuotedString_ExtractFrom_Tests, EmptySingleQuotedString) {
whenInputIs("''");
resultMustBe("");
trailingMustBe("");
}
TEST_F(QuotedString_ExtractFrom_Tests, SimpleDoubleQuotedString) {
whenInputIs("\"hello world\"");
resultMustBe("hello world");
trailingMustBe("");
}
TEST_F(QuotedString_ExtractFrom_Tests, DoubleQuotedStringWithTrailing) {
whenInputIs("\"hello\" world");
resultMustBe("hello");
trailingMustBe(" world");
}
TEST_F(QuotedString_ExtractFrom_Tests, SingleQuotedStringWithTrailing) {
whenInputIs("'hello' world");
resultMustBe("hello");
trailingMustBe(" world");
}
TEST_F(QuotedString_ExtractFrom_Tests, CurlyBraces) {
whenInputIs("\"{hello:world}\"");
resultMustBe("{hello:world}");
}
TEST_F(QuotedString_ExtractFrom_Tests, SquareBraquets) {
whenInputIs("\"[hello,world]\"");
resultMustBe("[hello,world]");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedDoubleQuote) {
whenInputIs("\"hello \\\"world\\\"\"");
resultMustBe("hello \"world\"");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedSingleQuote) {
whenInputIs("\"hello \\\'world\\\'\"");
resultMustBe("hello 'world'");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedSolidus) {
whenInputIs("\"hello \\/world\\/\"");
resultMustBe("hello /world/");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedReverseSolidus) {
whenInputIs("\"hello \\\\world\\\\\"");
resultMustBe("hello \\world\\");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedBackspace) {
whenInputIs("\"hello \\bworld\\b\"");
resultMustBe("hello \bworld\b");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedFormfeed) {
whenInputIs("\"hello \\fworld\\f\"");
resultMustBe("hello \fworld\f");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedNewline) {
whenInputIs("\"hello \\nworld\\n\"");
resultMustBe("hello \nworld\n");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedCarriageReturn) {
whenInputIs("\"hello \\rworld\\r\"");
resultMustBe("hello \rworld\r");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedTab) {
whenInputIs("\"hello \\tworld\\t\"");
resultMustBe("hello \tworld\t");
}
TEST_F(QuotedString_ExtractFrom_Tests, AllEscapedCharsTogether) {
whenInputIs("\"1\\\"2\\\\3\\/4\\b5\\f6\\n7\\r8\\t9\"");
resultMustBe("1\"2\\3/4\b5\f6\n7\r8\t9");
}
|
// Copyright Benoit Blanchon 2014-2015
// MIT License
//
// Arduino JSON library
// https://github.com/bblanchon/ArduinoJson
#include <gtest/gtest.h>
#include <ArduinoJson/Internals/QuotedString.hpp>
using namespace ArduinoJson::Internals;
class QuotedString_ExtractFrom_Tests : public testing::Test {
protected:
void whenInputIs(const char *json) {
strcpy(_jsonString, json);
_result = QuotedString::extractFrom(_jsonString, &_trailing);
}
void whenInputIs(const char *json, size_t len) {
memcpy(_jsonString, json, len);
_result = QuotedString::extractFrom(_jsonString, &_trailing);
}
void resultMustBe(const char *expected) { EXPECT_STREQ(expected, _result); }
void trailingMustBe(const char *expected) {
EXPECT_STREQ(expected, _trailing);
}
private:
char _jsonString[256];
char *_result;
char *_trailing;
};
TEST_F(QuotedString_ExtractFrom_Tests, EmptyDoubleQuotedString) {
whenInputIs("\"\"");
resultMustBe("");
trailingMustBe("");
}
TEST_F(QuotedString_ExtractFrom_Tests, NoQuotes) {
whenInputIs("hello world");
resultMustBe(0);
}
TEST_F(QuotedString_ExtractFrom_Tests, MissingClosingQuote) {
whenInputIs("\"hello world");
resultMustBe(0);
}
TEST_F(QuotedString_ExtractFrom_Tests, EmptySingleQuotedString) {
whenInputIs("''");
resultMustBe("");
trailingMustBe("");
}
TEST_F(QuotedString_ExtractFrom_Tests, SimpleDoubleQuotedString) {
whenInputIs("\"hello world\"");
resultMustBe("hello world");
trailingMustBe("");
}
TEST_F(QuotedString_ExtractFrom_Tests, DoubleQuotedStringWithTrailing) {
whenInputIs("\"hello\" world");
resultMustBe("hello");
trailingMustBe(" world");
}
TEST_F(QuotedString_ExtractFrom_Tests, SingleQuotedStringWithTrailing) {
whenInputIs("'hello' world");
resultMustBe("hello");
trailingMustBe(" world");
}
TEST_F(QuotedString_ExtractFrom_Tests, CurlyBraces) {
whenInputIs("\"{hello:world}\"");
resultMustBe("{hello:world}");
}
TEST_F(QuotedString_ExtractFrom_Tests, SquareBraquets) {
whenInputIs("\"[hello,world]\"");
resultMustBe("[hello,world]");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedDoubleQuote) {
whenInputIs("\"hello \\\"world\\\"\"");
resultMustBe("hello \"world\"");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedSingleQuote) {
whenInputIs("\"hello \\\'world\\\'\"");
resultMustBe("hello 'world'");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedSolidus) {
whenInputIs("\"hello \\/world\\/\"");
resultMustBe("hello /world/");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedReverseSolidus) {
whenInputIs("\"hello \\\\world\\\\\"");
resultMustBe("hello \\world\\");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedBackspace) {
whenInputIs("\"hello \\bworld\\b\"");
resultMustBe("hello \bworld\b");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedFormfeed) {
whenInputIs("\"hello \\fworld\\f\"");
resultMustBe("hello \fworld\f");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedNewline) {
whenInputIs("\"hello \\nworld\\n\"");
resultMustBe("hello \nworld\n");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedCarriageReturn) {
whenInputIs("\"hello \\rworld\\r\"");
resultMustBe("hello \rworld\r");
}
TEST_F(QuotedString_ExtractFrom_Tests, EscapedTab) {
whenInputIs("\"hello \\tworld\\t\"");
resultMustBe("hello \tworld\t");
}
TEST_F(QuotedString_ExtractFrom_Tests, AllEscapedCharsTogether) {
whenInputIs("\"1\\\"2\\\\3\\/4\\b5\\f6\\n7\\r8\\t9\"");
resultMustBe("1\"2\\3/4\b5\f6\n7\r8\t9");
}
TEST_F(QuotedString_ExtractFrom_Tests, UnterminatedEscapeSequence) {
whenInputIs("\"\\\0\"", 4);
resultMustBe(0);
}
|
1636_2
|
crossvul
|
cpp
|
CWE-119
|
Improper Restriction of Operations within the Bounds of a Memory Buffer - Certain languages allow direct addressing of memory locations and do not automatically ensure that these locations are valid for the memory buffer that is being referenced.
|
cpp
|
/*
Copyright (c) 2008-2012, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "lazy_entry.hpp"
#include <cstring>
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
namespace
{
const int lazy_entry_grow_factor = 150; // percent
const int lazy_entry_dict_init = 5;
const int lazy_entry_list_init = 5;
}
namespace libtorrent
{
#define TORRENT_FAIL_BDECODE(code) \
{ \
ec = make_error_code(code); \
while (!stack.empty()) { \
top = stack.back(); \
if (top->type() == lazy_entry::dict_t || top->type() == lazy_entry::list_t) top->pop(); \
stack.pop_back(); \
} \
if (error_pos) *error_pos = start - orig_start; \
return -1; \
}
bool is_digit(char c) { return c >= '0' && c <= '9'; }
bool is_print(char c) { return c >= 32 && c < 127; }
// fills in 'val' with what the string between start and the
// first occurance of the delimiter is interpreted as an int.
// return the pointer to the delimiter, or 0 if there is a
// parse error. val should be initialized to zero
char const* parse_int(char const* start, char const* end, char delimiter, boost::int64_t& val)
{
while (start < end && *start != delimiter)
{
if (!is_digit(*start)) { return 0; }
val *= 10;
val += *start - '0';
++start;
}
return start;
}
char const* find_char(char const* start, char const* end, char delimiter)
{
while (start < end && *start != delimiter) ++start;
return start;
}
// return 0 = success
int lazy_bdecode(char const* start, char const* end, lazy_entry& ret
, error_code& ec, int* error_pos, int depth_limit, int item_limit)
{
char const* const orig_start = start;
ret.clear();
if (start == end) return 0;
std::vector<lazy_entry*> stack;
stack.push_back(&ret);
while (start < end)
{
if (stack.empty()) break; // done!
lazy_entry* top = stack.back();
if (int(stack.size()) > depth_limit) TORRENT_FAIL_BDECODE(bdecode_errors::depth_exceeded);
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
char t = *start;
++start;
if (start >= end && t != 'e') TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
switch (top->type())
{
case lazy_entry::dict_t:
{
if (t == 'e')
{
top->set_end(start);
stack.pop_back();
continue;
}
if (!is_digit(t)) TORRENT_FAIL_BDECODE(bdecode_errors::expected_string);
boost::int64_t len = t - '0';
start = parse_int(start, end, ':', len);
if (start == 0 || start + len + 3 > end || *start != ':')
TORRENT_FAIL_BDECODE(bdecode_errors::expected_colon);
++start;
if (start == end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
lazy_entry* ent = top->dict_append(start);
if (ent == 0) TORRENT_FAIL_BDECODE(boost::system::errc::not_enough_memory);
start += len;
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
stack.push_back(ent);
t = *start;
++start;
break;
}
case lazy_entry::list_t:
{
if (t == 'e')
{
top->set_end(start);
stack.pop_back();
continue;
}
lazy_entry* ent = top->list_append();
if (ent == 0) TORRENT_FAIL_BDECODE(boost::system::errc::not_enough_memory);
stack.push_back(ent);
break;
}
default: break;
}
--item_limit;
if (item_limit <= 0) TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
top = stack.back();
switch (t)
{
case 'd':
top->construct_dict(start - 1);
continue;
case 'l':
top->construct_list(start - 1);
continue;
case 'i':
{
char const* int_start = start;
start = find_char(start, end, 'e');
top->construct_int(int_start, start - int_start);
if (start == end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
TORRENT_ASSERT(*start == 'e');
++start;
stack.pop_back();
continue;
}
default:
{
if (!is_digit(t))
TORRENT_FAIL_BDECODE(bdecode_errors::expected_value);
boost::int64_t len = t - '0';
start = parse_int(start, end, ':', len);
if (start == 0 || start + len + 1 > end || *start != ':')
TORRENT_FAIL_BDECODE(bdecode_errors::expected_colon);
++start;
top->construct_string(start, int(len));
stack.pop_back();
start += len;
continue;
}
}
return 0;
}
return 0;
}
boost::int64_t lazy_entry::int_value() const
{
TORRENT_ASSERT(m_type == int_t);
boost::int64_t val = 0;
bool negative = false;
if (*m_data.start == '-') negative = true;
parse_int(negative?m_data.start+1:m_data.start, m_data.start + m_size, 'e', val);
if (negative) val = -val;
return val;
}
lazy_entry* lazy_entry::dict_append(char const* name)
{
TORRENT_ASSERT(m_type == dict_t);
TORRENT_ASSERT(m_size <= m_capacity);
if (m_capacity == 0)
{
int capacity = lazy_entry_dict_init;
m_data.dict = new (std::nothrow) lazy_dict_entry[capacity];
if (m_data.dict == 0) return 0;
m_capacity = capacity;
}
else if (m_size == m_capacity)
{
int capacity = m_capacity * lazy_entry_grow_factor / 100;
lazy_dict_entry* tmp = new (std::nothrow) lazy_dict_entry[capacity];
if (tmp == 0) return 0;
std::memcpy(tmp, m_data.dict, sizeof(lazy_dict_entry) * m_size);
for (int i = 0; i < int(m_size); ++i) m_data.dict[i].val.release();
delete[] m_data.dict;
m_data.dict = tmp;
m_capacity = capacity;
}
TORRENT_ASSERT(m_size < m_capacity);
lazy_dict_entry& ret = m_data.dict[m_size++];
ret.name = name;
return &ret.val;
}
void lazy_entry::pop()
{
if (m_size > 0) --m_size;
}
namespace
{
// the number of decimal digits needed
// to represent the given value
int num_digits(int val)
{
int ret = 1;
while (val >= 10)
{
++ret;
val /= 10;
}
return ret;
}
}
void lazy_entry::construct_string(char const* start, int length)
{
TORRENT_ASSERT(m_type == none_t);
m_type = string_t;
m_data.start = start;
m_size = length;
m_begin = start - 1 - num_digits(length);
m_len = start - m_begin + length;
}
namespace
{
// str1 is null-terminated
// str2 is not, str2 is len2 chars
bool string_equal(char const* str1, char const* str2, int len2)
{
while (len2 > 0)
{
if (*str1 != *str2) return false;
if (*str1 == 0) return false;
++str1;
++str2;
--len2;
}
return *str1 == 0;
}
}
std::pair<std::string, lazy_entry const*> lazy_entry::dict_at(int i) const
{
TORRENT_ASSERT(m_type == dict_t);
TORRENT_ASSERT(i < int(m_size));
lazy_dict_entry const& e = m_data.dict[i];
return std::make_pair(std::string(e.name, e.val.m_begin - e.name), &e.val);
}
std::string lazy_entry::dict_find_string_value(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::string_t) return std::string();
return e->string_value();
}
pascal_string lazy_entry::dict_find_pstr(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::string_t) return pascal_string(0, 0);
return e->string_pstr();
}
lazy_entry const* lazy_entry::dict_find_string(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::string_t) return 0;
return e;
}
lazy_entry const* lazy_entry::dict_find_int(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::int_t) return 0;
return e;
}
boost::int64_t lazy_entry::dict_find_int_value(char const* name, boost::int64_t default_val) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::int_t) return default_val;
return e->int_value();
}
lazy_entry const* lazy_entry::dict_find_dict(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::dict_t) return 0;
return e;
}
lazy_entry const* lazy_entry::dict_find_list(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::list_t) return 0;
return e;
}
lazy_entry* lazy_entry::dict_find(char const* name)
{
TORRENT_ASSERT(m_type == dict_t);
for (int i = 0; i < int(m_size); ++i)
{
lazy_dict_entry& e = m_data.dict[i];
if (string_equal(name, e.name, e.val.m_begin - e.name))
return &e.val;
}
return 0;
}
lazy_entry* lazy_entry::list_append()
{
TORRENT_ASSERT(m_type == list_t);
TORRENT_ASSERT(m_size <= m_capacity);
if (m_capacity == 0)
{
int capacity = lazy_entry_list_init;
m_data.list = new (std::nothrow) lazy_entry[capacity];
if (m_data.list == 0) return 0;
m_capacity = capacity;
}
else if (m_size == m_capacity)
{
int capacity = m_capacity * lazy_entry_grow_factor / 100;
lazy_entry* tmp = new (std::nothrow) lazy_entry[capacity];
if (tmp == 0) return 0;
std::memcpy(tmp, m_data.list, sizeof(lazy_entry) * m_size);
for (int i = 0; i < int(m_size); ++i) m_data.list[i].release();
delete[] m_data.list;
m_data.list = tmp;
m_capacity = capacity;
}
TORRENT_ASSERT(m_size < m_capacity);
return m_data.list + (m_size++);
}
std::string lazy_entry::list_string_value_at(int i) const
{
lazy_entry const* e = list_at(i);
if (e == 0 || e->type() != lazy_entry::string_t) return std::string();
return e->string_value();
}
pascal_string lazy_entry::list_pstr_at(int i) const
{
lazy_entry const* e = list_at(i);
if (e == 0 || e->type() != lazy_entry::string_t) return pascal_string(0, 0);
return e->string_pstr();
}
boost::int64_t lazy_entry::list_int_value_at(int i, boost::int64_t default_val) const
{
lazy_entry const* e = list_at(i);
if (e == 0 || e->type() != lazy_entry::int_t) return default_val;
return e->int_value();
}
void lazy_entry::clear()
{
switch (m_type)
{
case list_t: delete[] m_data.list; break;
case dict_t: delete[] m_data.dict; break;
default: break;
}
m_data.start = 0;
m_size = 0;
m_capacity = 0;
m_type = none_t;
}
std::pair<char const*, int> lazy_entry::data_section() const
{
typedef std::pair<char const*, int> return_t;
return return_t(m_begin, m_len);
}
int line_longer_than(lazy_entry const& e, int limit)
{
int line_len = 0;
switch (e.type())
{
case lazy_entry::list_t:
line_len += 4;
if (line_len > limit) return -1;
for (int i = 0; i < e.list_size(); ++i)
{
int ret = line_longer_than(*e.list_at(i), limit - line_len);
if (ret == -1) return -1;
line_len += ret + 2;
}
break;
case lazy_entry::dict_t:
line_len += 4;
if (line_len > limit) return -1;
for (int i = 0; i < e.dict_size(); ++i)
{
line_len += 4 + e.dict_at(i).first.size();
if (line_len > limit) return -1;
int ret = line_longer_than(*e.dict_at(i).second, limit - line_len);
if (ret == -1) return -1;
line_len += ret + 1;
}
break;
case lazy_entry::string_t:
line_len += 3 + e.string_length();
break;
case lazy_entry::int_t:
{
boost::int64_t val = e.int_value();
while (val > 0)
{
++line_len;
val /= 10;
}
line_len += 2;
}
break;
case lazy_entry::none_t:
line_len += 4;
break;
}
if (line_len > limit) return -1;
return line_len;
}
std::string print_entry(lazy_entry const& e, bool single_line, int indent)
{
char indent_str[200];
memset(indent_str, ' ', 200);
indent_str[0] = ',';
indent_str[1] = '\n';
indent_str[199] = 0;
if (indent < 197 && indent >= 0) indent_str[indent+2] = 0;
std::string ret;
switch (e.type())
{
case lazy_entry::none_t: return "none";
case lazy_entry::int_t:
{
char str[100];
snprintf(str, sizeof(str), "%" PRId64, e.int_value());
return str;
}
case lazy_entry::string_t:
{
bool printable = true;
char const* str = e.string_ptr();
for (int i = 0; i < e.string_length(); ++i)
{
using namespace std;
if (is_print((unsigned char)str[i])) continue;
printable = false;
break;
}
ret += "'";
if (printable)
{
ret += e.string_value();
ret += "'";
return ret;
}
for (int i = 0; i < e.string_length(); ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
ret += "'";
return ret;
}
case lazy_entry::list_t:
{
ret += '[';
bool one_liner = line_longer_than(e, 200) != -1 || single_line;
if (!one_liner) ret += indent_str + 1;
for (int i = 0; i < e.list_size(); ++i)
{
if (i == 0 && one_liner) ret += " ";
ret += print_entry(*e.list_at(i), single_line, indent + 2);
if (i < e.list_size() - 1) ret += (one_liner?", ":indent_str);
else ret += (one_liner?" ":indent_str+1);
}
ret += "]";
return ret;
}
case lazy_entry::dict_t:
{
ret += "{";
bool one_liner = line_longer_than(e, 200) != -1 || single_line;
if (!one_liner) ret += indent_str+1;
for (int i = 0; i < e.dict_size(); ++i)
{
if (i == 0 && one_liner) ret += " ";
std::pair<std::string, lazy_entry const*> ent = e.dict_at(i);
ret += "'";
ret += ent.first;
ret += "': ";
ret += print_entry(*ent.second, single_line, indent + 2);
if (i < e.dict_size() - 1) ret += (one_liner?", ":indent_str);
else ret += (one_liner?" ":indent_str+1);
}
ret += "}";
return ret;
}
}
return ret;
}
struct bdecode_error_category : boost::system::error_category
{
virtual const char* name() const BOOST_SYSTEM_NOEXCEPT;
virtual std::string message(int ev) const BOOST_SYSTEM_NOEXCEPT;
virtual boost::system::error_condition default_error_condition(int ev) const BOOST_SYSTEM_NOEXCEPT
{ return boost::system::error_condition(ev, *this); }
};
const char* bdecode_error_category::name() const BOOST_SYSTEM_NOEXCEPT
{
return "bdecode error";
}
std::string bdecode_error_category::message(int ev) const BOOST_SYSTEM_NOEXCEPT
{
static char const* msgs[] =
{
"no error",
"expected string in bencoded string",
"expected colon in bencoded string",
"unexpected end of file in bencoded string",
"expected value (list, dict, int or string) in bencoded string",
"bencoded nesting depth exceeded",
"bencoded item count limit exceeded",
};
if (ev < 0 || ev >= int(sizeof(msgs)/sizeof(msgs[0])))
return "Unknown error";
return msgs[ev];
}
boost::system::error_category& get_bdecode_category()
{
static bdecode_error_category bdecode_category;
return bdecode_category;
}
};
|
/*
Copyright (c) 2008-2014, Arvid Norberg
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "lazy_entry.hpp"
#include <cstring>
#define __STDC_FORMAT_MACROS
#include <inttypes.h>
namespace
{
const int lazy_entry_grow_factor = 150; // percent
const int lazy_entry_dict_init = 5;
const int lazy_entry_list_init = 5;
}
namespace libtorrent
{
namespace
{
int fail(int* error_pos
, std::vector<lazy_entry*>& stack
, char const* start
, char const* orig_start)
{
while (!stack.empty()) {
lazy_entry* top = stack.back();
if (top->type() == lazy_entry::dict_t || top->type() == lazy_entry::list_t)
{
top->pop();
break;
}
stack.pop_back();
}
if (error_pos) *error_pos = start - orig_start;
return -1;
}
}
#define TORRENT_FAIL_BDECODE(code) do { ec = make_error_code(code); return fail(error_pos, stack, start, orig_start); } while (false)
namespace { bool numeric(char c) { return c >= '0' && c <= '9'; } }
// fills in 'val' with what the string between start and the
// first occurance of the delimiter is interpreted as an int.
// return the pointer to the delimiter, or 0 if there is a
// parse error. val should be initialized to zero
char const* parse_int(char const* start, char const* end, char delimiter
, boost::int64_t& val, bdecode_errors::error_code_enum& ec)
{
while (start < end && *start != delimiter)
{
if (!numeric(*start))
{
ec = bdecode_errors::expected_string;
return start;
}
if (val > INT64_MAX / 10)
{
ec = bdecode_errors::overflow;
return start;
}
val *= 10;
int digit = *start - '0';
if (val > INT64_MAX - digit)
{
ec = bdecode_errors::overflow;
return start;
}
val += digit;
++start;
}
if (*start != delimiter)
ec = bdecode_errors::expected_colon;
return start;
}
char const* find_char(char const* start, char const* end, char delimiter)
{
while (start < end && *start != delimiter) ++start;
return start;
}
// return 0 = success
int lazy_bdecode(char const* start, char const* end, lazy_entry& ret
, error_code& ec, int* error_pos, int depth_limit, int item_limit)
{
char const* const orig_start = start;
ret.clear();
if (start == end) return 0;
std::vector<lazy_entry*> stack;
stack.push_back(&ret);
while (start <= end)
{
if (stack.empty()) break; // done!
lazy_entry* top = stack.back();
if (int(stack.size()) > depth_limit) TORRENT_FAIL_BDECODE(bdecode_errors::depth_exceeded);
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
char t = *start;
++start;
if (start >= end && t != 'e') TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
switch (top->type())
{
case lazy_entry::dict_t:
{
if (t == 'e')
{
top->set_end(start);
stack.pop_back();
continue;
}
if (!numeric(t)) TORRENT_FAIL_BDECODE(bdecode_errors::expected_string);
boost::int64_t len = t - '0';
bdecode_errors::error_code_enum e = bdecode_errors::no_error;
start = parse_int(start, end, ':', len, e);
if (e)
TORRENT_FAIL_BDECODE(e);
if (start + len + 1 > end)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (len < 0)
TORRENT_FAIL_BDECODE(bdecode_errors::overflow);
++start;
if (start == end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
lazy_entry* ent = top->dict_append(start);
if (ent == 0) TORRENT_FAIL_BDECODE(boost::system::errc::not_enough_memory);
start += len;
if (start >= end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
stack.push_back(ent);
t = *start;
++start;
break;
}
case lazy_entry::list_t:
{
if (t == 'e')
{
top->set_end(start);
stack.pop_back();
continue;
}
lazy_entry* ent = top->list_append();
if (ent == 0) TORRENT_FAIL_BDECODE(boost::system::errc::not_enough_memory);
stack.push_back(ent);
break;
}
default: break;
}
--item_limit;
if (item_limit <= 0) TORRENT_FAIL_BDECODE(bdecode_errors::limit_exceeded);
top = stack.back();
switch (t)
{
case 'd':
top->construct_dict(start - 1);
continue;
case 'l':
top->construct_list(start - 1);
continue;
case 'i':
{
char const* int_start = start;
start = find_char(start, end, 'e');
top->construct_int(int_start, start - int_start);
if (start == end) TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
TORRENT_ASSERT(*start == 'e');
++start;
stack.pop_back();
continue;
}
default:
{
if (!numeric(t))
TORRENT_FAIL_BDECODE(bdecode_errors::expected_value);
boost::int64_t len = t - '0';
bdecode_errors::error_code_enum e = bdecode_errors::no_error;
start = parse_int(start, end, ':', len, e);
if (e)
TORRENT_FAIL_BDECODE(e);
if (start + len + 1 > end)
TORRENT_FAIL_BDECODE(bdecode_errors::unexpected_eof);
if (len < 0)
TORRENT_FAIL_BDECODE(bdecode_errors::overflow);
++start;
top->construct_string(start, int(len));
stack.pop_back();
start += len;
continue;
}
}
return 0;
}
return 0;
}
boost::int64_t lazy_entry::int_value() const
{
TORRENT_ASSERT(m_type == int_t);
boost::int64_t val = 0;
bool negative = false;
if (*m_data.start == '-') negative = true;
bdecode_errors::error_code_enum ec = bdecode_errors::no_error;
parse_int(m_data.start + negative
, m_data.start + m_size, 'e', val, ec);
if (ec) return 0;
if (negative) val = -val;
return val;
}
lazy_entry* lazy_entry::dict_append(char const* name)
{
TORRENT_ASSERT(m_type == dict_t);
TORRENT_ASSERT(m_size <= m_capacity);
if (m_capacity == 0)
{
int capacity = lazy_entry_dict_init;
m_data.dict = new (std::nothrow) lazy_dict_entry[capacity];
if (m_data.dict == 0) return 0;
m_capacity = capacity;
}
else if (m_size == m_capacity)
{
int capacity = m_capacity * lazy_entry_grow_factor / 100;
lazy_dict_entry* tmp = new (std::nothrow) lazy_dict_entry[capacity];
if (tmp == 0) return 0;
std::memcpy(tmp, m_data.dict, sizeof(lazy_dict_entry) * m_size);
for (int i = 0; i < int(m_size); ++i) m_data.dict[i].val.release();
delete[] m_data.dict;
m_data.dict = tmp;
m_capacity = capacity;
}
TORRENT_ASSERT(m_size < m_capacity);
lazy_dict_entry& ret = m_data.dict[m_size++];
ret.name = name;
return &ret.val;
}
void lazy_entry::pop()
{
if (m_size > 0) --m_size;
}
namespace
{
// the number of decimal digits needed
// to represent the given value
int num_digits(int val)
{
int ret = 1;
while (val >= 10)
{
++ret;
val /= 10;
}
return ret;
}
}
void lazy_entry::construct_string(char const* start, int length)
{
TORRENT_ASSERT(m_type == none_t);
m_type = string_t;
m_data.start = start;
m_size = length;
m_begin = start - 1 - num_digits(length);
m_len = start - m_begin + length;
}
namespace
{
// str1 is null-terminated
// str2 is not, str2 is len2 chars
bool string_equal(char const* str1, char const* str2, int len2)
{
while (len2 > 0)
{
if (*str1 != *str2) return false;
if (*str1 == 0) return false;
++str1;
++str2;
--len2;
}
return *str1 == 0;
}
}
std::pair<std::string, lazy_entry const*> lazy_entry::dict_at(int i) const
{
TORRENT_ASSERT(m_type == dict_t);
TORRENT_ASSERT(i < int(m_size));
lazy_dict_entry const& e = m_data.dict[i];
return std::make_pair(std::string(e.name, e.val.m_begin - e.name), &e.val);
}
std::string lazy_entry::dict_find_string_value(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::string_t) return std::string();
return e->string_value();
}
pascal_string lazy_entry::dict_find_pstr(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::string_t) return pascal_string(0, 0);
return e->string_pstr();
}
lazy_entry const* lazy_entry::dict_find_string(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::string_t) return 0;
return e;
}
lazy_entry const* lazy_entry::dict_find_int(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::int_t) return 0;
return e;
}
boost::int64_t lazy_entry::dict_find_int_value(char const* name, boost::int64_t default_val) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::int_t) return default_val;
return e->int_value();
}
lazy_entry const* lazy_entry::dict_find_dict(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::dict_t) return 0;
return e;
}
lazy_entry const* lazy_entry::dict_find_dict(std::string const& name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::dict_t) return 0;
return e;
}
lazy_entry const* lazy_entry::dict_find_list(char const* name) const
{
lazy_entry const* e = dict_find(name);
if (e == 0 || e->type() != lazy_entry::list_t) return 0;
return e;
}
lazy_entry* lazy_entry::dict_find(char const* name)
{
TORRENT_ASSERT(m_type == dict_t);
for (int i = 0; i < int(m_size); ++i)
{
lazy_dict_entry& e = m_data.dict[i];
if (string_equal(name, e.name, e.val.m_begin - e.name))
return &e.val;
}
return 0;
}
lazy_entry* lazy_entry::dict_find(std::string const& name)
{
TORRENT_ASSERT(m_type == dict_t);
for (int i = 0; i < int(m_size); ++i)
{
lazy_dict_entry& e = m_data.dict[i];
if (name.size() != e.val.m_begin - e.name) continue;
if (std::equal(name.begin(), name.end(), e.name))
return &e.val;
}
return 0;
}
lazy_entry* lazy_entry::list_append()
{
TORRENT_ASSERT(m_type == list_t);
TORRENT_ASSERT(m_size <= m_capacity);
if (m_capacity == 0)
{
int capacity = lazy_entry_list_init;
m_data.list = new (std::nothrow) lazy_entry[capacity];
if (m_data.list == 0) return 0;
m_capacity = capacity;
}
else if (m_size == m_capacity)
{
int capacity = m_capacity * lazy_entry_grow_factor / 100;
lazy_entry* tmp = new (std::nothrow) lazy_entry[capacity];
if (tmp == 0) return 0;
std::memcpy(tmp, m_data.list, sizeof(lazy_entry) * m_size);
for (int i = 0; i < int(m_size); ++i) m_data.list[i].release();
delete[] m_data.list;
m_data.list = tmp;
m_capacity = capacity;
}
TORRENT_ASSERT(m_size < m_capacity);
return m_data.list + (m_size++);
}
std::string lazy_entry::list_string_value_at(int i) const
{
lazy_entry const* e = list_at(i);
if (e == 0 || e->type() != lazy_entry::string_t) return std::string();
return e->string_value();
}
pascal_string lazy_entry::list_pstr_at(int i) const
{
lazy_entry const* e = list_at(i);
if (e == 0 || e->type() != lazy_entry::string_t) return pascal_string(0, 0);
return e->string_pstr();
}
boost::int64_t lazy_entry::list_int_value_at(int i, boost::int64_t default_val) const
{
lazy_entry const* e = list_at(i);
if (e == 0 || e->type() != lazy_entry::int_t) return default_val;
return e->int_value();
}
void lazy_entry::clear()
{
switch (m_type)
{
case list_t: delete[] m_data.list; break;
case dict_t: delete[] m_data.dict; break;
default: break;
}
m_data.start = 0;
m_size = 0;
m_capacity = 0;
m_type = none_t;
}
std::pair<char const*, int> lazy_entry::data_section() const
{
typedef std::pair<char const*, int> return_t;
return return_t(m_begin, m_len);
}
int line_longer_than(lazy_entry const& e, int limit)
{
int line_len = 0;
switch (e.type())
{
case lazy_entry::list_t:
line_len += 4;
if (line_len > limit) return -1;
for (int i = 0; i < e.list_size(); ++i)
{
int ret = line_longer_than(*e.list_at(i), limit - line_len);
if (ret == -1) return -1;
line_len += ret + 2;
}
break;
case lazy_entry::dict_t:
line_len += 4;
if (line_len > limit) return -1;
for (int i = 0; i < e.dict_size(); ++i)
{
line_len += 4 + e.dict_at(i).first.size();
if (line_len > limit) return -1;
int ret = line_longer_than(*e.dict_at(i).second, limit - line_len);
if (ret == -1) return -1;
line_len += ret + 1;
}
break;
case lazy_entry::string_t:
line_len += 3 + e.string_length();
break;
case lazy_entry::int_t:
{
boost::int64_t val = e.int_value();
while (val > 0)
{
++line_len;
val /= 10;
}
line_len += 2;
}
break;
case lazy_entry::none_t:
line_len += 4;
break;
}
if (line_len > limit) return -1;
return line_len;
}
std::string print_entry(lazy_entry const& e, bool single_line, int indent)
{
char indent_str[200];
memset(indent_str, ' ', 200);
indent_str[0] = ',';
indent_str[1] = '\n';
indent_str[199] = 0;
if (indent < 197 && indent >= 0) indent_str[indent+2] = 0;
std::string ret;
switch (e.type())
{
case lazy_entry::none_t: return "none";
case lazy_entry::int_t:
{
char str[100];
snprintf(str, sizeof(str), "%" PRId64, e.int_value());
return str;
}
case lazy_entry::string_t:
{
bool printable = true;
char const* str = e.string_ptr();
for (int i = 0; i < e.string_length(); ++i)
{
char c = str[i];
if (c >= 32 && c < 127) continue;
printable = false;
break;
}
ret += "'";
if (printable)
{
if (single_line && e.string_length() > 30)
{
ret.append(e.string_ptr(), 14);
ret += "...";
ret.append(e.string_ptr() + e.string_length()-14, 14);
}
else
ret.append(e.string_ptr(), e.string_length());
ret += "'";
return ret;
}
if (single_line && e.string_length() > 20)
{
for (int i = 0; i < 9; ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
ret += "...";
for (int i = e.string_length() - 9
, len(e.string_length()); i < len; ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
}
else
{
for (int i = 0; i < e.string_length(); ++i)
{
char tmp[5];
snprintf(tmp, sizeof(tmp), "%02x", (unsigned char)str[i]);
ret += tmp;
}
}
ret += "'";
return ret;
}
case lazy_entry::list_t:
{
ret += '[';
bool one_liner = line_longer_than(e, 200) != -1 || single_line;
if (!one_liner) ret += indent_str + 1;
for (int i = 0; i < e.list_size(); ++i)
{
if (i == 0 && one_liner) ret += " ";
ret += print_entry(*e.list_at(i), single_line, indent + 2);
if (i < e.list_size() - 1) ret += (one_liner?", ":indent_str);
else ret += (one_liner?" ":indent_str+1);
}
ret += "]";
return ret;
}
case lazy_entry::dict_t:
{
ret += "{";
bool one_liner = line_longer_than(e, 200) != -1 || single_line;
if (!one_liner) ret += indent_str+1;
for (int i = 0; i < e.dict_size(); ++i)
{
if (i == 0 && one_liner) ret += " ";
std::pair<std::string, lazy_entry const*> ent = e.dict_at(i);
ret += "'";
ret += ent.first;
ret += "': ";
ret += print_entry(*ent.second, single_line, indent + 2);
if (i < e.dict_size() - 1) ret += (one_liner?", ":indent_str);
else ret += (one_liner?" ":indent_str+1);
}
ret += "}";
return ret;
}
}
return ret;
}
struct bdecode_error_category : boost::system::error_category
{
virtual const char* name() const BOOST_SYSTEM_NOEXCEPT;
virtual std::string message(int ev) const BOOST_SYSTEM_NOEXCEPT;
virtual boost::system::error_condition default_error_condition(int ev) const BOOST_SYSTEM_NOEXCEPT
{ return boost::system::error_condition(ev, *this); }
};
const char* bdecode_error_category::name() const BOOST_SYSTEM_NOEXCEPT
{
return "bdecode error";
}
std::string bdecode_error_category::message(int ev) const BOOST_SYSTEM_NOEXCEPT
{
static char const* msgs[] =
{
"no error",
"expected string in bencoded string",
"expected colon in bencoded string",
"unexpected end of file in bencoded string",
"expected value (list, dict, int or string) in bencoded string",
"bencoded nesting depth exceeded",
"bencoded item count limit exceeded",
"integer overflow",
};
if (ev < 0 || ev >= int(sizeof(msgs)/sizeof(msgs[0])))
return "Unknown error";
return msgs[ev];
}
boost::system::error_category& get_bdecode_category()
{
static bdecode_error_category bdecode_category;
return bdecode_category;
}
namespace bdecode_errors
{
boost::system::error_code make_error_code(error_code_enum e)
{
return boost::system::error_code(e, get_bdecode_category());
}
}
};
|
2324_0
|
crossvul
|
cpp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.